linux/block/blk-core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1991, 1992 Linus Torvalds
   3 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   4 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   7 *      -  July2000
   8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   9 */
  10
  11/*
  12 * This handles all read/write requests to block devices
  13 */
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/backing-dev.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/highmem.h>
  20#include <linux/mm.h>
  21#include <linux/kernel_stat.h>
  22#include <linux/string.h>
  23#include <linux/init.h>
  24#include <linux/completion.h>
  25#include <linux/slab.h>
  26#include <linux/swap.h>
  27#include <linux/writeback.h>
  28#include <linux/task_io_accounting_ops.h>
  29#include <linux/interrupt.h>
  30#include <linux/cpu.h>
  31#include <linux/blktrace_api.h>
  32#include <linux/fault-inject.h>
  33
  34#include "blk.h"
  35
  36static int __make_request(struct request_queue *q, struct bio *bio);
  37
  38/*
  39 * For the allocated request tables
  40 */
  41static struct kmem_cache *request_cachep;
  42
  43/*
  44 * For queue allocation
  45 */
  46struct kmem_cache *blk_requestq_cachep;
  47
  48/*
  49 * Controlling structure to kblockd
  50 */
  51static struct workqueue_struct *kblockd_workqueue;
  52
  53static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
  54
  55static void drive_stat_acct(struct request *rq, int new_io)
  56{
  57        struct hd_struct *part;
  58        int rw = rq_data_dir(rq);
  59
  60        if (!blk_fs_request(rq) || !rq->rq_disk)
  61                return;
  62
  63        part = get_part(rq->rq_disk, rq->sector);
  64        if (!new_io)
  65                __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
  66        else {
  67                disk_round_stats(rq->rq_disk);
  68                rq->rq_disk->in_flight++;
  69                if (part) {
  70                        part_round_stats(part);
  71                        part->in_flight++;
  72                }
  73        }
  74}
  75
  76void blk_queue_congestion_threshold(struct request_queue *q)
  77{
  78        int nr;
  79
  80        nr = q->nr_requests - (q->nr_requests / 8) + 1;
  81        if (nr > q->nr_requests)
  82                nr = q->nr_requests;
  83        q->nr_congestion_on = nr;
  84
  85        nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
  86        if (nr < 1)
  87                nr = 1;
  88        q->nr_congestion_off = nr;
  89}
  90
  91/**
  92 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
  93 * @bdev:       device
  94 *
  95 * Locates the passed device's request queue and returns the address of its
  96 * backing_dev_info
  97 *
  98 * Will return NULL if the request queue cannot be located.
  99 */
 100struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 101{
 102        struct backing_dev_info *ret = NULL;
 103        struct request_queue *q = bdev_get_queue(bdev);
 104
 105        if (q)
 106                ret = &q->backing_dev_info;
 107        return ret;
 108}
 109EXPORT_SYMBOL(blk_get_backing_dev_info);
 110
 111void blk_rq_init(struct request_queue *q, struct request *rq)
 112{
 113        memset(rq, 0, sizeof(*rq));
 114
 115        INIT_LIST_HEAD(&rq->queuelist);
 116        INIT_LIST_HEAD(&rq->donelist);
 117        rq->q = q;
 118        rq->sector = rq->hard_sector = (sector_t) -1;
 119        INIT_HLIST_NODE(&rq->hash);
 120        RB_CLEAR_NODE(&rq->rb_node);
 121        rq->cmd = rq->__cmd;
 122        rq->tag = -1;
 123        rq->ref_count = 1;
 124}
 125EXPORT_SYMBOL(blk_rq_init);
 126
 127static void req_bio_endio(struct request *rq, struct bio *bio,
 128                          unsigned int nbytes, int error)
 129{
 130        struct request_queue *q = rq->q;
 131
 132        if (&q->bar_rq != rq) {
 133                if (error)
 134                        clear_bit(BIO_UPTODATE, &bio->bi_flags);
 135                else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
 136                        error = -EIO;
 137
 138                if (unlikely(nbytes > bio->bi_size)) {
 139                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
 140                               __func__, nbytes, bio->bi_size);
 141                        nbytes = bio->bi_size;
 142                }
 143
 144                bio->bi_size -= nbytes;
 145                bio->bi_sector += (nbytes >> 9);
 146
 147                if (bio_integrity(bio))
 148                        bio_integrity_advance(bio, nbytes);
 149
 150                if (bio->bi_size == 0)
 151                        bio_endio(bio, error);
 152        } else {
 153
 154                /*
 155                 * Okay, this is the barrier request in progress, just
 156                 * record the error;
 157                 */
 158                if (error && !q->orderr)
 159                        q->orderr = error;
 160        }
 161}
 162
 163void blk_dump_rq_flags(struct request *rq, char *msg)
 164{
 165        int bit;
 166
 167        printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
 168                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
 169                rq->cmd_flags);
 170
 171        printk(KERN_INFO "  sector %llu, nr/cnr %lu/%u\n",
 172                                                (unsigned long long)rq->sector,
 173                                                rq->nr_sectors,
 174                                                rq->current_nr_sectors);
 175        printk(KERN_INFO "  bio %p, biotail %p, buffer %p, data %p, len %u\n",
 176                                                rq->bio, rq->biotail,
 177                                                rq->buffer, rq->data,
 178                                                rq->data_len);
 179
 180        if (blk_pc_request(rq)) {
 181                printk(KERN_INFO "  cdb: ");
 182                for (bit = 0; bit < BLK_MAX_CDB; bit++)
 183                        printk("%02x ", rq->cmd[bit]);
 184                printk("\n");
 185        }
 186}
 187EXPORT_SYMBOL(blk_dump_rq_flags);
 188
 189/*
 190 * "plug" the device if there are no outstanding requests: this will
 191 * force the transfer to start only after we have put all the requests
 192 * on the list.
 193 *
 194 * This is called with interrupts off and no requests on the queue and
 195 * with the queue lock held.
 196 */
 197void blk_plug_device(struct request_queue *q)
 198{
 199        WARN_ON(!irqs_disabled());
 200
 201        /*
 202         * don't plug a stopped queue, it must be paired with blk_start_queue()
 203         * which will restart the queueing
 204         */
 205        if (blk_queue_stopped(q))
 206                return;
 207
 208        if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
 209                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
 210                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
 211        }
 212}
 213EXPORT_SYMBOL(blk_plug_device);
 214
 215/**
 216 * blk_plug_device_unlocked - plug a device without queue lock held
 217 * @q:    The &struct request_queue to plug
 218 *
 219 * Description:
 220 *   Like @blk_plug_device(), but grabs the queue lock and disables
 221 *   interrupts.
 222 **/
 223void blk_plug_device_unlocked(struct request_queue *q)
 224{
 225        unsigned long flags;
 226
 227        spin_lock_irqsave(q->queue_lock, flags);
 228        blk_plug_device(q);
 229        spin_unlock_irqrestore(q->queue_lock, flags);
 230}
 231EXPORT_SYMBOL(blk_plug_device_unlocked);
 232
 233/*
 234 * remove the queue from the plugged list, if present. called with
 235 * queue lock held and interrupts disabled.
 236 */
 237int blk_remove_plug(struct request_queue *q)
 238{
 239        WARN_ON(!irqs_disabled());
 240
 241        if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
 242                return 0;
 243
 244        del_timer(&q->unplug_timer);
 245        return 1;
 246}
 247EXPORT_SYMBOL(blk_remove_plug);
 248
 249/*
 250 * remove the plug and let it rip..
 251 */
 252void __generic_unplug_device(struct request_queue *q)
 253{
 254        if (unlikely(blk_queue_stopped(q)))
 255                return;
 256
 257        if (!blk_remove_plug(q))
 258                return;
 259
 260        q->request_fn(q);
 261}
 262EXPORT_SYMBOL(__generic_unplug_device);
 263
 264/**
 265 * generic_unplug_device - fire a request queue
 266 * @q:    The &struct request_queue in question
 267 *
 268 * Description:
 269 *   Linux uses plugging to build bigger requests queues before letting
 270 *   the device have at them. If a queue is plugged, the I/O scheduler
 271 *   is still adding and merging requests on the queue. Once the queue
 272 *   gets unplugged, the request_fn defined for the queue is invoked and
 273 *   transfers started.
 274 **/
 275void generic_unplug_device(struct request_queue *q)
 276{
 277        if (blk_queue_plugged(q)) {
 278                spin_lock_irq(q->queue_lock);
 279                __generic_unplug_device(q);
 280                spin_unlock_irq(q->queue_lock);
 281        }
 282}
 283EXPORT_SYMBOL(generic_unplug_device);
 284
 285static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 286                                   struct page *page)
 287{
 288        struct request_queue *q = bdi->unplug_io_data;
 289
 290        blk_unplug(q);
 291}
 292
 293void blk_unplug_work(struct work_struct *work)
 294{
 295        struct request_queue *q =
 296                container_of(work, struct request_queue, unplug_work);
 297
 298        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
 299                                q->rq.count[READ] + q->rq.count[WRITE]);
 300
 301        q->unplug_fn(q);
 302}
 303
 304void blk_unplug_timeout(unsigned long data)
 305{
 306        struct request_queue *q = (struct request_queue *)data;
 307
 308        blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
 309                                q->rq.count[READ] + q->rq.count[WRITE]);
 310
 311        kblockd_schedule_work(&q->unplug_work);
 312}
 313
 314void blk_unplug(struct request_queue *q)
 315{
 316        /*
 317         * devices don't necessarily have an ->unplug_fn defined
 318         */
 319        if (q->unplug_fn) {
 320                blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
 321                                        q->rq.count[READ] + q->rq.count[WRITE]);
 322
 323                q->unplug_fn(q);
 324        }
 325}
 326EXPORT_SYMBOL(blk_unplug);
 327
 328/**
 329 * blk_start_queue - restart a previously stopped queue
 330 * @q:    The &struct request_queue in question
 331 *
 332 * Description:
 333 *   blk_start_queue() will clear the stop flag on the queue, and call
 334 *   the request_fn for the queue if it was in a stopped state when
 335 *   entered. Also see blk_stop_queue(). Queue lock must be held.
 336 **/
 337void blk_start_queue(struct request_queue *q)
 338{
 339        WARN_ON(!irqs_disabled());
 340
 341        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 342
 343        /*
 344         * one level of recursion is ok and is much faster than kicking
 345         * the unplug handling
 346         */
 347        if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 348                q->request_fn(q);
 349                queue_flag_clear(QUEUE_FLAG_REENTER, q);
 350        } else {
 351                blk_plug_device(q);
 352                kblockd_schedule_work(&q->unplug_work);
 353        }
 354}
 355EXPORT_SYMBOL(blk_start_queue);
 356
 357/**
 358 * blk_stop_queue - stop a queue
 359 * @q:    The &struct request_queue in question
 360 *
 361 * Description:
 362 *   The Linux block layer assumes that a block driver will consume all
 363 *   entries on the request queue when the request_fn strategy is called.
 364 *   Often this will not happen, because of hardware limitations (queue
 365 *   depth settings). If a device driver gets a 'queue full' response,
 366 *   or if it simply chooses not to queue more I/O at one point, it can
 367 *   call this function to prevent the request_fn from being called until
 368 *   the driver has signalled it's ready to go again. This happens by calling
 369 *   blk_start_queue() to restart queue operations. Queue lock must be held.
 370 **/
 371void blk_stop_queue(struct request_queue *q)
 372{
 373        blk_remove_plug(q);
 374        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 375}
 376EXPORT_SYMBOL(blk_stop_queue);
 377
 378/**
 379 * blk_sync_queue - cancel any pending callbacks on a queue
 380 * @q: the queue
 381 *
 382 * Description:
 383 *     The block layer may perform asynchronous callback activity
 384 *     on a queue, such as calling the unplug function after a timeout.
 385 *     A block device may call blk_sync_queue to ensure that any
 386 *     such activity is cancelled, thus allowing it to release resources
 387 *     that the callbacks might use. The caller must already have made sure
 388 *     that its ->make_request_fn will not re-add plugging prior to calling
 389 *     this function.
 390 *
 391 */
 392void blk_sync_queue(struct request_queue *q)
 393{
 394        del_timer_sync(&q->unplug_timer);
 395        kblockd_flush_work(&q->unplug_work);
 396}
 397EXPORT_SYMBOL(blk_sync_queue);
 398
 399/**
 400 * blk_run_queue - run a single device queue
 401 * @q:  The queue to run
 402 */
 403void __blk_run_queue(struct request_queue *q)
 404{
 405        blk_remove_plug(q);
 406
 407        /*
 408         * Only recurse once to avoid overrunning the stack, let the unplug
 409         * handling reinvoke the handler shortly if we already got there.
 410         */
 411        if (!elv_queue_empty(q)) {
 412                if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
 413                        q->request_fn(q);
 414                        queue_flag_clear(QUEUE_FLAG_REENTER, q);
 415                } else {
 416                        blk_plug_device(q);
 417                        kblockd_schedule_work(&q->unplug_work);
 418                }
 419        }
 420}
 421EXPORT_SYMBOL(__blk_run_queue);
 422
 423/**
 424 * blk_run_queue - run a single device queue
 425 * @q: The queue to run
 426 */
 427void blk_run_queue(struct request_queue *q)
 428{
 429        unsigned long flags;
 430
 431        spin_lock_irqsave(q->queue_lock, flags);
 432        __blk_run_queue(q);
 433        spin_unlock_irqrestore(q->queue_lock, flags);
 434}
 435EXPORT_SYMBOL(blk_run_queue);
 436
 437void blk_put_queue(struct request_queue *q)
 438{
 439        kobject_put(&q->kobj);
 440}
 441
 442void blk_cleanup_queue(struct request_queue *q)
 443{
 444        mutex_lock(&q->sysfs_lock);
 445        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 446        mutex_unlock(&q->sysfs_lock);
 447
 448        if (q->elevator)
 449                elevator_exit(q->elevator);
 450
 451        blk_put_queue(q);
 452}
 453EXPORT_SYMBOL(blk_cleanup_queue);
 454
 455static int blk_init_free_list(struct request_queue *q)
 456{
 457        struct request_list *rl = &q->rq;
 458
 459        rl->count[READ] = rl->count[WRITE] = 0;
 460        rl->starved[READ] = rl->starved[WRITE] = 0;
 461        rl->elvpriv = 0;
 462        init_waitqueue_head(&rl->wait[READ]);
 463        init_waitqueue_head(&rl->wait[WRITE]);
 464
 465        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 466                                mempool_free_slab, request_cachep, q->node);
 467
 468        if (!rl->rq_pool)
 469                return -ENOMEM;
 470
 471        return 0;
 472}
 473
 474struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 475{
 476        return blk_alloc_queue_node(gfp_mask, -1);
 477}
 478EXPORT_SYMBOL(blk_alloc_queue);
 479
 480struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 481{
 482        struct request_queue *q;
 483        int err;
 484
 485        q = kmem_cache_alloc_node(blk_requestq_cachep,
 486                                gfp_mask | __GFP_ZERO, node_id);
 487        if (!q)
 488                return NULL;
 489
 490        q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 491        q->backing_dev_info.unplug_io_data = q;
 492        err = bdi_init(&q->backing_dev_info);
 493        if (err) {
 494                kmem_cache_free(blk_requestq_cachep, q);
 495                return NULL;
 496        }
 497
 498        init_timer(&q->unplug_timer);
 499
 500        kobject_init(&q->kobj, &blk_queue_ktype);
 501
 502        mutex_init(&q->sysfs_lock);
 503        spin_lock_init(&q->__queue_lock);
 504
 505        return q;
 506}
 507EXPORT_SYMBOL(blk_alloc_queue_node);
 508
 509/**
 510 * blk_init_queue  - prepare a request queue for use with a block device
 511 * @rfn:  The function to be called to process requests that have been
 512 *        placed on the queue.
 513 * @lock: Request queue spin lock
 514 *
 515 * Description:
 516 *    If a block device wishes to use the standard request handling procedures,
 517 *    which sorts requests and coalesces adjacent requests, then it must
 518 *    call blk_init_queue().  The function @rfn will be called when there
 519 *    are requests on the queue that need to be processed.  If the device
 520 *    supports plugging, then @rfn may not be called immediately when requests
 521 *    are available on the queue, but may be called at some time later instead.
 522 *    Plugged queues are generally unplugged when a buffer belonging to one
 523 *    of the requests on the queue is needed, or due to memory pressure.
 524 *
 525 *    @rfn is not required, or even expected, to remove all requests off the
 526 *    queue, but only as many as it can handle at a time.  If it does leave
 527 *    requests on the queue, it is responsible for arranging that the requests
 528 *    get dealt with eventually.
 529 *
 530 *    The queue spin lock must be held while manipulating the requests on the
 531 *    request queue; this lock will be taken also from interrupt context, so irq
 532 *    disabling is needed for it.
 533 *
 534 *    Function returns a pointer to the initialized request queue, or NULL if
 535 *    it didn't succeed.
 536 *
 537 * Note:
 538 *    blk_init_queue() must be paired with a blk_cleanup_queue() call
 539 *    when the block device is deactivated (such as at module unload).
 540 **/
 541
 542struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 543{
 544        return blk_init_queue_node(rfn, lock, -1);
 545}
 546EXPORT_SYMBOL(blk_init_queue);
 547
 548struct request_queue *
 549blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 550{
 551        struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 552
 553        if (!q)
 554                return NULL;
 555
 556        q->node = node_id;
 557        if (blk_init_free_list(q)) {
 558                kmem_cache_free(blk_requestq_cachep, q);
 559                return NULL;
 560        }
 561
 562        /*
 563         * if caller didn't supply a lock, they get per-queue locking with
 564         * our embedded lock
 565         */
 566        if (!lock)
 567                lock = &q->__queue_lock;
 568
 569        q->request_fn           = rfn;
 570        q->prep_rq_fn           = NULL;
 571        q->unplug_fn            = generic_unplug_device;
 572        q->queue_flags          = (1 << QUEUE_FLAG_CLUSTER);
 573        q->queue_lock           = lock;
 574
 575        blk_queue_segment_boundary(q, 0xffffffff);
 576
 577        blk_queue_make_request(q, __make_request);
 578        blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
 579
 580        blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
 581        blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
 582
 583        q->sg_reserved_size = INT_MAX;
 584
 585        blk_set_cmd_filter_defaults(&q->cmd_filter);
 586
 587        /*
 588         * all done
 589         */
 590        if (!elevator_init(q, NULL)) {
 591                blk_queue_congestion_threshold(q);
 592                return q;
 593        }
 594
 595        blk_put_queue(q);
 596        return NULL;
 597}
 598EXPORT_SYMBOL(blk_init_queue_node);
 599
 600int blk_get_queue(struct request_queue *q)
 601{
 602        if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
 603                kobject_get(&q->kobj);
 604                return 0;
 605        }
 606
 607        return 1;
 608}
 609
 610static inline void blk_free_request(struct request_queue *q, struct request *rq)
 611{
 612        if (rq->cmd_flags & REQ_ELVPRIV)
 613                elv_put_request(q, rq);
 614        mempool_free(rq, q->rq.rq_pool);
 615}
 616
 617static struct request *
 618blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
 619{
 620        struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
 621
 622        if (!rq)
 623                return NULL;
 624
 625        blk_rq_init(q, rq);
 626
 627        /*
 628         * first three bits are identical in rq->cmd_flags and bio->bi_rw,
 629         * see bio.h and blkdev.h
 630         */
 631        rq->cmd_flags = rw | REQ_ALLOCED;
 632
 633        if (priv) {
 634                if (unlikely(elv_set_request(q, rq, gfp_mask))) {
 635                        mempool_free(rq, q->rq.rq_pool);
 636                        return NULL;
 637                }
 638                rq->cmd_flags |= REQ_ELVPRIV;
 639        }
 640
 641        return rq;
 642}
 643
 644/*
 645 * ioc_batching returns true if the ioc is a valid batching request and
 646 * should be given priority access to a request.
 647 */
 648static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 649{
 650        if (!ioc)
 651                return 0;
 652
 653        /*
 654         * Make sure the process is able to allocate at least 1 request
 655         * even if the batch times out, otherwise we could theoretically
 656         * lose wakeups.
 657         */
 658        return ioc->nr_batch_requests == q->nr_batching ||
 659                (ioc->nr_batch_requests > 0
 660                && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
 661}
 662
 663/*
 664 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
 665 * will cause the process to be a "batcher" on all queues in the system. This
 666 * is the behaviour we want though - once it gets a wakeup it should be given
 667 * a nice run.
 668 */
 669static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
 670{
 671        if (!ioc || ioc_batching(q, ioc))
 672                return;
 673
 674        ioc->nr_batch_requests = q->nr_batching;
 675        ioc->last_waited = jiffies;
 676}
 677
 678static void __freed_request(struct request_queue *q, int rw)
 679{
 680        struct request_list *rl = &q->rq;
 681
 682        if (rl->count[rw] < queue_congestion_off_threshold(q))
 683                blk_clear_queue_congested(q, rw);
 684
 685        if (rl->count[rw] + 1 <= q->nr_requests) {
 686                if (waitqueue_active(&rl->wait[rw]))
 687                        wake_up(&rl->wait[rw]);
 688
 689                blk_clear_queue_full(q, rw);
 690        }
 691}
 692
 693/*
 694 * A request has just been released.  Account for it, update the full and
 695 * congestion status, wake up any waiters.   Called under q->queue_lock.
 696 */
 697static void freed_request(struct request_queue *q, int rw, int priv)
 698{
 699        struct request_list *rl = &q->rq;
 700
 701        rl->count[rw]--;
 702        if (priv)
 703                rl->elvpriv--;
 704
 705        __freed_request(q, rw);
 706
 707        if (unlikely(rl->starved[rw ^ 1]))
 708                __freed_request(q, rw ^ 1);
 709}
 710
 711#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
 712/*
 713 * Get a free request, queue_lock must be held.
 714 * Returns NULL on failure, with queue_lock held.
 715 * Returns !NULL on success, with queue_lock *not held*.
 716 */
 717static struct request *get_request(struct request_queue *q, int rw_flags,
 718                                   struct bio *bio, gfp_t gfp_mask)
 719{
 720        struct request *rq = NULL;
 721        struct request_list *rl = &q->rq;
 722        struct io_context *ioc = NULL;
 723        const int rw = rw_flags & 0x01;
 724        int may_queue, priv;
 725
 726        may_queue = elv_may_queue(q, rw_flags);
 727        if (may_queue == ELV_MQUEUE_NO)
 728                goto rq_starved;
 729
 730        if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
 731                if (rl->count[rw]+1 >= q->nr_requests) {
 732                        ioc = current_io_context(GFP_ATOMIC, q->node);
 733                        /*
 734                         * The queue will fill after this allocation, so set
 735                         * it as full, and mark this process as "batching".
 736                         * This process will be allowed to complete a batch of
 737                         * requests, others will be blocked.
 738                         */
 739                        if (!blk_queue_full(q, rw)) {
 740                                ioc_set_batching(q, ioc);
 741                                blk_set_queue_full(q, rw);
 742                        } else {
 743                                if (may_queue != ELV_MQUEUE_MUST
 744                                                && !ioc_batching(q, ioc)) {
 745                                        /*
 746                                         * The queue is full and the allocating
 747                                         * process is not a "batcher", and not
 748                                         * exempted by the IO scheduler
 749                                         */
 750                                        goto out;
 751                                }
 752                        }
 753                }
 754                blk_set_queue_congested(q, rw);
 755        }
 756
 757        /*
 758         * Only allow batching queuers to allocate up to 50% over the defined
 759         * limit of requests, otherwise we could have thousands of requests
 760         * allocated with any setting of ->nr_requests
 761         */
 762        if (rl->count[rw] >= (3 * q->nr_requests / 2))
 763                goto out;
 764
 765        rl->count[rw]++;
 766        rl->starved[rw] = 0;
 767
 768        priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
 769        if (priv)
 770                rl->elvpriv++;
 771
 772        spin_unlock_irq(q->queue_lock);
 773
 774        rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
 775        if (unlikely(!rq)) {
 776                /*
 777                 * Allocation failed presumably due to memory. Undo anything
 778                 * we might have messed up.
 779                 *
 780                 * Allocating task should really be put onto the front of the
 781                 * wait queue, but this is pretty rare.
 782                 */
 783                spin_lock_irq(q->queue_lock);
 784                freed_request(q, rw, priv);
 785
 786                /*
 787                 * in the very unlikely event that allocation failed and no
 788                 * requests for this direction was pending, mark us starved
 789                 * so that freeing of a request in the other direction will
 790                 * notice us. another possible fix would be to split the
 791                 * rq mempool into READ and WRITE
 792                 */
 793rq_starved:
 794                if (unlikely(rl->count[rw] == 0))
 795                        rl->starved[rw] = 1;
 796
 797                goto out;
 798        }
 799
 800        /*
 801         * ioc may be NULL here, and ioc_batching will be false. That's
 802         * OK, if the queue is under the request limit then requests need
 803         * not count toward the nr_batch_requests limit. There will always
 804         * be some limit enforced by BLK_BATCH_TIME.
 805         */
 806        if (ioc_batching(q, ioc))
 807                ioc->nr_batch_requests--;
 808
 809        blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 810out:
 811        return rq;
 812}
 813
 814/*
 815 * No available requests for this queue, unplug the device and wait for some
 816 * requests to become available.
 817 *
 818 * Called with q->queue_lock held, and returns with it unlocked.
 819 */
 820static struct request *get_request_wait(struct request_queue *q, int rw_flags,
 821                                        struct bio *bio)
 822{
 823        const int rw = rw_flags & 0x01;
 824        struct request *rq;
 825
 826        rq = get_request(q, rw_flags, bio, GFP_NOIO);
 827        while (!rq) {
 828                DEFINE_WAIT(wait);
 829                struct io_context *ioc;
 830                struct request_list *rl = &q->rq;
 831
 832                prepare_to_wait_exclusive(&rl->wait[rw], &wait,
 833                                TASK_UNINTERRUPTIBLE);
 834
 835                blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
 836
 837                __generic_unplug_device(q);
 838                spin_unlock_irq(q->queue_lock);
 839                io_schedule();
 840
 841                /*
 842                 * After sleeping, we become a "batching" process and
 843                 * will be able to allocate at least one request, and
 844                 * up to a big batch of them for a small period time.
 845                 * See ioc_batching, ioc_set_batching
 846                 */
 847                ioc = current_io_context(GFP_NOIO, q->node);
 848                ioc_set_batching(q, ioc);
 849
 850                spin_lock_irq(q->queue_lock);
 851                finish_wait(&rl->wait[rw], &wait);
 852
 853                rq = get_request(q, rw_flags, bio, GFP_NOIO);
 854        };
 855
 856        return rq;
 857}
 858
 859struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
 860{
 861        struct request *rq;
 862
 863        BUG_ON(rw != READ && rw != WRITE);
 864
 865        spin_lock_irq(q->queue_lock);
 866        if (gfp_mask & __GFP_WAIT) {
 867                rq = get_request_wait(q, rw, NULL);
 868        } else {
 869                rq = get_request(q, rw, NULL, gfp_mask);
 870                if (!rq)
 871                        spin_unlock_irq(q->queue_lock);
 872        }
 873        /* q->queue_lock is unlocked at this point */
 874
 875        return rq;
 876}
 877EXPORT_SYMBOL(blk_get_request);
 878
 879/**
 880 * blk_start_queueing - initiate dispatch of requests to device
 881 * @q:          request queue to kick into gear
 882 *
 883 * This is basically a helper to remove the need to know whether a queue
 884 * is plugged or not if someone just wants to initiate dispatch of requests
 885 * for this queue.
 886 *
 887 * The queue lock must be held with interrupts disabled.
 888 */
 889void blk_start_queueing(struct request_queue *q)
 890{
 891        if (!blk_queue_plugged(q))
 892                q->request_fn(q);
 893        else
 894                __generic_unplug_device(q);
 895}
 896EXPORT_SYMBOL(blk_start_queueing);
 897
 898/**
 899 * blk_requeue_request - put a request back on queue
 900 * @q:          request queue where request should be inserted
 901 * @rq:         request to be inserted
 902 *
 903 * Description:
 904 *    Drivers often keep queueing requests until the hardware cannot accept
 905 *    more, when that condition happens we need to put the request back
 906 *    on the queue. Must be called with queue lock held.
 907 */
 908void blk_requeue_request(struct request_queue *q, struct request *rq)
 909{
 910        blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
 911
 912        if (blk_rq_tagged(rq))
 913                blk_queue_end_tag(q, rq);
 914
 915        elv_requeue_request(q, rq);
 916}
 917EXPORT_SYMBOL(blk_requeue_request);
 918
 919/**
 920 * blk_insert_request - insert a special request in to a request queue
 921 * @q:          request queue where request should be inserted
 922 * @rq:         request to be inserted
 923 * @at_head:    insert request at head or tail of queue
 924 * @data:       private data
 925 *
 926 * Description:
 927 *    Many block devices need to execute commands asynchronously, so they don't
 928 *    block the whole kernel from preemption during request execution.  This is
 929 *    accomplished normally by inserting aritficial requests tagged as
 930 *    REQ_SPECIAL in to the corresponding request queue, and letting them be
 931 *    scheduled for actual execution by the request queue.
 932 *
 933 *    We have the option of inserting the head or the tail of the queue.
 934 *    Typically we use the tail for new ioctls and so forth.  We use the head
 935 *    of the queue for things like a QUEUE_FULL message from a device, or a
 936 *    host that is unable to accept a particular command.
 937 */
 938void blk_insert_request(struct request_queue *q, struct request *rq,
 939                        int at_head, void *data)
 940{
 941        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
 942        unsigned long flags;
 943
 944        /*
 945         * tell I/O scheduler that this isn't a regular read/write (ie it
 946         * must not attempt merges on this) and that it acts as a soft
 947         * barrier
 948         */
 949        rq->cmd_type = REQ_TYPE_SPECIAL;
 950        rq->cmd_flags |= REQ_SOFTBARRIER;
 951
 952        rq->special = data;
 953
 954        spin_lock_irqsave(q->queue_lock, flags);
 955
 956        /*
 957         * If command is tagged, release the tag
 958         */
 959        if (blk_rq_tagged(rq))
 960                blk_queue_end_tag(q, rq);
 961
 962        drive_stat_acct(rq, 1);
 963        __elv_add_request(q, rq, where, 0);
 964        blk_start_queueing(q);
 965        spin_unlock_irqrestore(q->queue_lock, flags);
 966}
 967EXPORT_SYMBOL(blk_insert_request);
 968
 969/*
 970 * add-request adds a request to the linked list.
 971 * queue lock is held and interrupts disabled, as we muck with the
 972 * request queue list.
 973 */
 974static inline void add_request(struct request_queue *q, struct request *req)
 975{
 976        drive_stat_acct(req, 1);
 977
 978        /*
 979         * elevator indicated where it wants this request to be
 980         * inserted at elevator_merge time
 981         */
 982        __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
 983}
 984
 985/*
 986 * disk_round_stats()   - Round off the performance stats on a struct
 987 * disk_stats.
 988 *
 989 * The average IO queue length and utilisation statistics are maintained
 990 * by observing the current state of the queue length and the amount of
 991 * time it has been in this state for.
 992 *
 993 * Normally, that accounting is done on IO completion, but that can result
 994 * in more than a second's worth of IO being accounted for within any one
 995 * second, leading to >100% utilisation.  To deal with that, we call this
 996 * function to do a round-off before returning the results when reading
 997 * /proc/diskstats.  This accounts immediately for all queue usage up to
 998 * the current jiffies and restarts the counters again.
 999 */
1000void disk_round_stats(struct gendisk *disk)
1001{
1002        unsigned long now = jiffies;
1003
1004        if (now == disk->stamp)
1005                return;
1006
1007        if (disk->in_flight) {
1008                __disk_stat_add(disk, time_in_queue,
1009                                disk->in_flight * (now - disk->stamp));
1010                __disk_stat_add(disk, io_ticks, (now - disk->stamp));
1011        }
1012        disk->stamp = now;
1013}
1014EXPORT_SYMBOL_GPL(disk_round_stats);
1015
1016void part_round_stats(struct hd_struct *part)
1017{
1018        unsigned long now = jiffies;
1019
1020        if (now == part->stamp)
1021                return;
1022
1023        if (part->in_flight) {
1024                __part_stat_add(part, time_in_queue,
1025                                part->in_flight * (now - part->stamp));
1026                __part_stat_add(part, io_ticks, (now - part->stamp));
1027        }
1028        part->stamp = now;
1029}
1030
1031/*
1032 * queue lock must be held
1033 */
1034void __blk_put_request(struct request_queue *q, struct request *req)
1035{
1036        if (unlikely(!q))
1037                return;
1038        if (unlikely(--req->ref_count))
1039                return;
1040
1041        elv_completed_request(q, req);
1042
1043        /*
1044         * Request may not have originated from ll_rw_blk. if not,
1045         * it didn't come out of our reserved rq pools
1046         */
1047        if (req->cmd_flags & REQ_ALLOCED) {
1048                int rw = rq_data_dir(req);
1049                int priv = req->cmd_flags & REQ_ELVPRIV;
1050
1051                BUG_ON(!list_empty(&req->queuelist));
1052                BUG_ON(!hlist_unhashed(&req->hash));
1053
1054                blk_free_request(q, req);
1055                freed_request(q, rw, priv);
1056        }
1057}
1058EXPORT_SYMBOL_GPL(__blk_put_request);
1059
1060void blk_put_request(struct request *req)
1061{
1062        unsigned long flags;
1063        struct request_queue *q = req->q;
1064
1065        spin_lock_irqsave(q->queue_lock, flags);
1066        __blk_put_request(q, req);
1067        spin_unlock_irqrestore(q->queue_lock, flags);
1068}
1069EXPORT_SYMBOL(blk_put_request);
1070
1071void init_request_from_bio(struct request *req, struct bio *bio)
1072{
1073        req->cmd_type = REQ_TYPE_FS;
1074
1075        /*
1076         * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
1077         */
1078        if (bio_rw_ahead(bio) || bio_failfast(bio))
1079                req->cmd_flags |= REQ_FAILFAST;
1080
1081        /*
1082         * REQ_BARRIER implies no merging, but lets make it explicit
1083         */
1084        if (unlikely(bio_barrier(bio)))
1085                req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1086
1087        if (bio_sync(bio))
1088                req->cmd_flags |= REQ_RW_SYNC;
1089        if (bio_rw_meta(bio))
1090                req->cmd_flags |= REQ_RW_META;
1091
1092        req->errors = 0;
1093        req->hard_sector = req->sector = bio->bi_sector;
1094        req->ioprio = bio_prio(bio);
1095        req->start_time = jiffies;
1096        blk_rq_bio_prep(req->q, req, bio);
1097}
1098
1099static int __make_request(struct request_queue *q, struct bio *bio)
1100{
1101        struct request *req;
1102        int el_ret, nr_sectors, barrier, err;
1103        const unsigned short prio = bio_prio(bio);
1104        const int sync = bio_sync(bio);
1105        int rw_flags;
1106
1107        nr_sectors = bio_sectors(bio);
1108
1109        /*
1110         * low level driver can indicate that it wants pages above a
1111         * certain limit bounced to low memory (ie for highmem, or even
1112         * ISA dma in theory)
1113         */
1114        blk_queue_bounce(q, &bio);
1115
1116        barrier = bio_barrier(bio);
1117        if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) {
1118                err = -EOPNOTSUPP;
1119                goto end_io;
1120        }
1121
1122        spin_lock_irq(q->queue_lock);
1123
1124        if (unlikely(barrier) || elv_queue_empty(q))
1125                goto get_rq;
1126
1127        el_ret = elv_merge(q, &req, bio);
1128        switch (el_ret) {
1129        case ELEVATOR_BACK_MERGE:
1130                BUG_ON(!rq_mergeable(req));
1131
1132                if (!ll_back_merge_fn(q, req, bio))
1133                        break;
1134
1135                blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1136
1137                req->biotail->bi_next = bio;
1138                req->biotail = bio;
1139                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1140                req->ioprio = ioprio_best(req->ioprio, prio);
1141                drive_stat_acct(req, 0);
1142                if (!attempt_back_merge(q, req))
1143                        elv_merged_request(q, req, el_ret);
1144                goto out;
1145
1146        case ELEVATOR_FRONT_MERGE:
1147                BUG_ON(!rq_mergeable(req));
1148
1149                if (!ll_front_merge_fn(q, req, bio))
1150                        break;
1151
1152                blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1153
1154                bio->bi_next = req->bio;
1155                req->bio = bio;
1156
1157                /*
1158                 * may not be valid. if the low level driver said
1159                 * it didn't need a bounce buffer then it better
1160                 * not touch req->buffer either...
1161                 */
1162                req->buffer = bio_data(bio);
1163                req->current_nr_sectors = bio_cur_sectors(bio);
1164                req->hard_cur_sectors = req->current_nr_sectors;
1165                req->sector = req->hard_sector = bio->bi_sector;
1166                req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1167                req->ioprio = ioprio_best(req->ioprio, prio);
1168                drive_stat_acct(req, 0);
1169                if (!attempt_front_merge(q, req))
1170                        elv_merged_request(q, req, el_ret);
1171                goto out;
1172
1173        /* ELV_NO_MERGE: elevator says don't/can't merge. */
1174        default:
1175                ;
1176        }
1177
1178get_rq:
1179        /*
1180         * This sync check and mask will be re-done in init_request_from_bio(),
1181         * but we need to set it earlier to expose the sync flag to the
1182         * rq allocator and io schedulers.
1183         */
1184        rw_flags = bio_data_dir(bio);
1185        if (sync)
1186                rw_flags |= REQ_RW_SYNC;
1187
1188        /*
1189         * Grab a free request. This is might sleep but can not fail.
1190         * Returns with the queue unlocked.
1191         */
1192        req = get_request_wait(q, rw_flags, bio);
1193
1194        /*
1195         * After dropping the lock and possibly sleeping here, our request
1196         * may now be mergeable after it had proven unmergeable (above).
1197         * We don't worry about that case for efficiency. It won't happen
1198         * often, and the elevators are able to handle it.
1199         */
1200        init_request_from_bio(req, bio);
1201
1202        spin_lock_irq(q->queue_lock);
1203        if (elv_queue_empty(q))
1204                blk_plug_device(q);
1205        add_request(q, req);
1206out:
1207        if (sync)
1208                __generic_unplug_device(q);
1209
1210        spin_unlock_irq(q->queue_lock);
1211        return 0;
1212
1213end_io:
1214        bio_endio(bio, err);
1215        return 0;
1216}
1217
1218/*
1219 * If bio->bi_dev is a partition, remap the location
1220 */
1221static inline void blk_partition_remap(struct bio *bio)
1222{
1223        struct block_device *bdev = bio->bi_bdev;
1224
1225        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1226                struct hd_struct *p = bdev->bd_part;
1227
1228                bio->bi_sector += p->start_sect;
1229                bio->bi_bdev = bdev->bd_contains;
1230
1231                blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1232                                    bdev->bd_dev, bio->bi_sector,
1233                                    bio->bi_sector - p->start_sect);
1234        }
1235}
1236
1237static void handle_bad_sector(struct bio *bio)
1238{
1239        char b[BDEVNAME_SIZE];
1240
1241        printk(KERN_INFO "attempt to access beyond end of device\n");
1242        printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1243                        bdevname(bio->bi_bdev, b),
1244                        bio->bi_rw,
1245                        (unsigned long long)bio->bi_sector + bio_sectors(bio),
1246                        (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1247
1248        set_bit(BIO_EOF, &bio->bi_flags);
1249}
1250
1251#ifdef CONFIG_FAIL_MAKE_REQUEST
1252
1253static DECLARE_FAULT_ATTR(fail_make_request);
1254
1255static int __init setup_fail_make_request(char *str)
1256{
1257        return setup_fault_attr(&fail_make_request, str);
1258}
1259__setup("fail_make_request=", setup_fail_make_request);
1260
1261static int should_fail_request(struct bio *bio)
1262{
1263        if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) ||
1264            (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail))
1265                return should_fail(&fail_make_request, bio->bi_size);
1266
1267        return 0;
1268}
1269
1270static int __init fail_make_request_debugfs(void)
1271{
1272        return init_fault_attr_dentries(&fail_make_request,
1273                                        "fail_make_request");
1274}
1275
1276late_initcall(fail_make_request_debugfs);
1277
1278#else /* CONFIG_FAIL_MAKE_REQUEST */
1279
1280static inline int should_fail_request(struct bio *bio)
1281{
1282        return 0;
1283}
1284
1285#endif /* CONFIG_FAIL_MAKE_REQUEST */
1286
1287/*
1288 * Check whether this bio extends beyond the end of the device.
1289 */
1290static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1291{
1292        sector_t maxsector;
1293
1294        if (!nr_sectors)
1295                return 0;
1296
1297        /* Test device or partition size, when known. */
1298        maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1299        if (maxsector) {
1300                sector_t sector = bio->bi_sector;
1301
1302                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1303                        /*
1304                         * This may well happen - the kernel calls bread()
1305                         * without checking the size of the device, e.g., when
1306                         * mounting a device.
1307                         */
1308                        handle_bad_sector(bio);
1309                        return 1;
1310                }
1311        }
1312
1313        return 0;
1314}
1315
1316/**
1317 * generic_make_request: hand a buffer to its device driver for I/O
1318 * @bio:  The bio describing the location in memory and on the device.
1319 *
1320 * generic_make_request() is used to make I/O requests of block
1321 * devices. It is passed a &struct bio, which describes the I/O that needs
1322 * to be done.
1323 *
1324 * generic_make_request() does not return any status.  The
1325 * success/failure status of the request, along with notification of
1326 * completion, is delivered asynchronously through the bio->bi_end_io
1327 * function described (one day) else where.
1328 *
1329 * The caller of generic_make_request must make sure that bi_io_vec
1330 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1331 * set to describe the device address, and the
1332 * bi_end_io and optionally bi_private are set to describe how
1333 * completion notification should be signaled.
1334 *
1335 * generic_make_request and the drivers it calls may use bi_next if this
1336 * bio happens to be merged with someone else, and may change bi_dev and
1337 * bi_sector for remaps as it sees fit.  So the values of these fields
1338 * should NOT be depended on after the call to generic_make_request.
1339 */
1340static inline void __generic_make_request(struct bio *bio)
1341{
1342        struct request_queue *q;
1343        sector_t old_sector;
1344        int ret, nr_sectors = bio_sectors(bio);
1345        dev_t old_dev;
1346        int err = -EIO;
1347
1348        might_sleep();
1349
1350        if (bio_check_eod(bio, nr_sectors))
1351                goto end_io;
1352
1353        /*
1354         * Resolve the mapping until finished. (drivers are
1355         * still free to implement/resolve their own stacking
1356         * by explicitly returning 0)
1357         *
1358         * NOTE: we don't repeat the blk_size check for each new device.
1359         * Stacking drivers are expected to know what they are doing.
1360         */
1361        old_sector = -1;
1362        old_dev = 0;
1363        do {
1364                char b[BDEVNAME_SIZE];
1365
1366                q = bdev_get_queue(bio->bi_bdev);
1367                if (!q) {
1368                        printk(KERN_ERR
1369                               "generic_make_request: Trying to access "
1370                                "nonexistent block-device %s (%Lu)\n",
1371                                bdevname(bio->bi_bdev, b),
1372                                (long long) bio->bi_sector);
1373end_io:
1374                        bio_endio(bio, err);
1375                        break;
1376                }
1377
1378                if (unlikely(nr_sectors > q->max_hw_sectors)) {
1379                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1380                                bdevname(bio->bi_bdev, b),
1381                                bio_sectors(bio),
1382                                q->max_hw_sectors);
1383                        goto end_io;
1384                }
1385
1386                if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1387                        goto end_io;
1388
1389                if (should_fail_request(bio))
1390                        goto end_io;
1391
1392                /*
1393                 * If this device has partitions, remap block n
1394                 * of partition p to block n+start(p) of the disk.
1395                 */
1396                blk_partition_remap(bio);
1397
1398                if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1399                        goto end_io;
1400
1401                if (old_sector != -1)
1402                        blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1403                                            old_sector);
1404
1405                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1406
1407                old_sector = bio->bi_sector;
1408                old_dev = bio->bi_bdev->bd_dev;
1409
1410                if (bio_check_eod(bio, nr_sectors))
1411                        goto end_io;
1412                if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
1413                        err = -EOPNOTSUPP;
1414                        goto end_io;
1415                }
1416
1417                ret = q->make_request_fn(q, bio);
1418        } while (ret);
1419}
1420
1421/*
1422 * We only want one ->make_request_fn to be active at a time,
1423 * else stack usage with stacked devices could be a problem.
1424 * So use current->bio_{list,tail} to keep a list of requests
1425 * submited by a make_request_fn function.
1426 * current->bio_tail is also used as a flag to say if
1427 * generic_make_request is currently active in this task or not.
1428 * If it is NULL, then no make_request is active.  If it is non-NULL,
1429 * then a make_request is active, and new requests should be added
1430 * at the tail
1431 */
1432void generic_make_request(struct bio *bio)
1433{
1434        if (current->bio_tail) {
1435                /* make_request is active */
1436                *(current->bio_tail) = bio;
1437                bio->bi_next = NULL;
1438                current->bio_tail = &bio->bi_next;
1439                return;
1440        }
1441        /* following loop may be a bit non-obvious, and so deserves some
1442         * explanation.
1443         * Before entering the loop, bio->bi_next is NULL (as all callers
1444         * ensure that) so we have a list with a single bio.
1445         * We pretend that we have just taken it off a longer list, so
1446         * we assign bio_list to the next (which is NULL) and bio_tail
1447         * to &bio_list, thus initialising the bio_list of new bios to be
1448         * added.  __generic_make_request may indeed add some more bios
1449         * through a recursive call to generic_make_request.  If it
1450         * did, we find a non-NULL value in bio_list and re-enter the loop
1451         * from the top.  In this case we really did just take the bio
1452         * of the top of the list (no pretending) and so fixup bio_list and
1453         * bio_tail or bi_next, and call into __generic_make_request again.
1454         *
1455         * The loop was structured like this to make only one call to
1456         * __generic_make_request (which is important as it is large and
1457         * inlined) and to keep the structure simple.
1458         */
1459        BUG_ON(bio->bi_next);
1460        do {
1461                current->bio_list = bio->bi_next;
1462                if (bio->bi_next == NULL)
1463                        current->bio_tail = &current->bio_list;
1464                else
1465                        bio->bi_next = NULL;
1466                __generic_make_request(bio);
1467                bio = current->bio_list;
1468        } while (bio);
1469        current->bio_tail = NULL; /* deactivate */
1470}
1471EXPORT_SYMBOL(generic_make_request);
1472
1473/**
1474 * submit_bio: submit a bio to the block device layer for I/O
1475 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1476 * @bio: The &struct bio which describes the I/O
1477 *
1478 * submit_bio() is very similar in purpose to generic_make_request(), and
1479 * uses that function to do most of the work. Both are fairly rough
1480 * interfaces, @bio must be presetup and ready for I/O.
1481 *
1482 */
1483void submit_bio(int rw, struct bio *bio)
1484{
1485        int count = bio_sectors(bio);
1486
1487        bio->bi_rw |= rw;
1488
1489        /*
1490         * If it's a regular read/write or a barrier with data attached,
1491         * go through the normal accounting stuff before submission.
1492         */
1493        if (!bio_empty_barrier(bio)) {
1494
1495                BIO_BUG_ON(!bio->bi_size);
1496                BIO_BUG_ON(!bio->bi_io_vec);
1497
1498                if (rw & WRITE) {
1499                        count_vm_events(PGPGOUT, count);
1500                } else {
1501                        task_io_account_read(bio->bi_size);
1502                        count_vm_events(PGPGIN, count);
1503                }
1504
1505                if (unlikely(block_dump)) {
1506                        char b[BDEVNAME_SIZE];
1507                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1508                        current->comm, task_pid_nr(current),
1509                                (rw & WRITE) ? "WRITE" : "READ",
1510                                (unsigned long long)bio->bi_sector,
1511                                bdevname(bio->bi_bdev, b));
1512                }
1513        }
1514
1515        generic_make_request(bio);
1516}
1517EXPORT_SYMBOL(submit_bio);
1518
1519/**
1520 * __end_that_request_first - end I/O on a request
1521 * @req:      the request being processed
1522 * @error:    0 for success, < 0 for error
1523 * @nr_bytes: number of bytes to complete
1524 *
1525 * Description:
1526 *     Ends I/O on a number of bytes attached to @req, and sets it up
1527 *     for the next range of segments (if any) in the cluster.
1528 *
1529 * Return:
1530 *     0 - we are done with this request, call end_that_request_last()
1531 *     1 - still buffers pending for this request
1532 **/
1533static int __end_that_request_first(struct request *req, int error,
1534                                    int nr_bytes)
1535{
1536        int total_bytes, bio_nbytes, next_idx = 0;
1537        struct bio *bio;
1538
1539        blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1540
1541        /*
1542         * for a REQ_BLOCK_PC request, we want to carry any eventual
1543         * sense key with us all the way through
1544         */
1545        if (!blk_pc_request(req))
1546                req->errors = 0;
1547
1548        if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1549                printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1550                                req->rq_disk ? req->rq_disk->disk_name : "?",
1551                                (unsigned long long)req->sector);
1552        }
1553
1554        if (blk_fs_request(req) && req->rq_disk) {
1555                struct hd_struct *part = get_part(req->rq_disk, req->sector);
1556                const int rw = rq_data_dir(req);
1557
1558                all_stat_add(req->rq_disk, part, sectors[rw],
1559                                nr_bytes >> 9, req->sector);
1560        }
1561
1562        total_bytes = bio_nbytes = 0;
1563        while ((bio = req->bio) != NULL) {
1564                int nbytes;
1565
1566                /*
1567                 * For an empty barrier request, the low level driver must
1568                 * store a potential error location in ->sector. We pass
1569                 * that back up in ->bi_sector.
1570                 */
1571                if (blk_empty_barrier(req))
1572                        bio->bi_sector = req->sector;
1573
1574                if (nr_bytes >= bio->bi_size) {
1575                        req->bio = bio->bi_next;
1576                        nbytes = bio->bi_size;
1577                        req_bio_endio(req, bio, nbytes, error);
1578                        next_idx = 0;
1579                        bio_nbytes = 0;
1580                } else {
1581                        int idx = bio->bi_idx + next_idx;
1582
1583                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1584                                blk_dump_rq_flags(req, "__end_that");
1585                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1586                                       __func__, bio->bi_idx, bio->bi_vcnt);
1587                                break;
1588                        }
1589
1590                        nbytes = bio_iovec_idx(bio, idx)->bv_len;
1591                        BIO_BUG_ON(nbytes > bio->bi_size);
1592
1593                        /*
1594                         * not a complete bvec done
1595                         */
1596                        if (unlikely(nbytes > nr_bytes)) {
1597                                bio_nbytes += nr_bytes;
1598                                total_bytes += nr_bytes;
1599                                break;
1600                        }
1601
1602                        /*
1603                         * advance to the next vector
1604                         */
1605                        next_idx++;
1606                        bio_nbytes += nbytes;
1607                }
1608
1609                total_bytes += nbytes;
1610                nr_bytes -= nbytes;
1611
1612                bio = req->bio;
1613                if (bio) {
1614                        /*
1615                         * end more in this run, or just return 'not-done'
1616                         */
1617                        if (unlikely(nr_bytes <= 0))
1618                                break;
1619                }
1620        }
1621
1622        /*
1623         * completely done
1624         */
1625        if (!req->bio)
1626                return 0;
1627
1628        /*
1629         * if the request wasn't completed, update state
1630         */
1631        if (bio_nbytes) {
1632                req_bio_endio(req, bio, bio_nbytes, error);
1633                bio->bi_idx += next_idx;
1634                bio_iovec(bio)->bv_offset += nr_bytes;
1635                bio_iovec(bio)->bv_len -= nr_bytes;
1636        }
1637
1638        blk_recalc_rq_sectors(req, total_bytes >> 9);
1639        blk_recalc_rq_segments(req);
1640        return 1;
1641}
1642
1643/*
1644 * splice the completion data to a local structure and hand off to
1645 * process_completion_queue() to complete the requests
1646 */
1647static void blk_done_softirq(struct softirq_action *h)
1648{
1649        struct list_head *cpu_list, local_list;
1650
1651        local_irq_disable();
1652        cpu_list = &__get_cpu_var(blk_cpu_done);
1653        list_replace_init(cpu_list, &local_list);
1654        local_irq_enable();
1655
1656        while (!list_empty(&local_list)) {
1657                struct request *rq;
1658
1659                rq = list_entry(local_list.next, struct request, donelist);
1660                list_del_init(&rq->donelist);
1661                rq->q->softirq_done_fn(rq);
1662        }
1663}
1664
1665static int __cpuinit blk_cpu_notify(struct notifier_block *self,
1666                                    unsigned long action, void *hcpu)
1667{
1668        /*
1669         * If a CPU goes away, splice its entries to the current CPU
1670         * and trigger a run of the softirq
1671         */
1672        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1673                int cpu = (unsigned long) hcpu;
1674
1675                local_irq_disable();
1676                list_splice_init(&per_cpu(blk_cpu_done, cpu),
1677                                 &__get_cpu_var(blk_cpu_done));
1678                raise_softirq_irqoff(BLOCK_SOFTIRQ);
1679                local_irq_enable();
1680        }
1681
1682        return NOTIFY_OK;
1683}
1684
1685
1686static struct notifier_block blk_cpu_notifier __cpuinitdata = {
1687        .notifier_call  = blk_cpu_notify,
1688};
1689
1690/**
1691 * blk_complete_request - end I/O on a request
1692 * @req:      the request being processed
1693 *
1694 * Description:
1695 *     Ends all I/O on a request. It does not handle partial completions,
1696 *     unless the driver actually implements this in its completion callback
1697 *     through requeueing. The actual completion happens out-of-order,
1698 *     through a softirq handler. The user must have registered a completion
1699 *     callback through blk_queue_softirq_done().
1700 **/
1701
1702void blk_complete_request(struct request *req)
1703{
1704        struct list_head *cpu_list;
1705        unsigned long flags;
1706
1707        BUG_ON(!req->q->softirq_done_fn);
1708
1709        local_irq_save(flags);
1710
1711        cpu_list = &__get_cpu_var(blk_cpu_done);
1712        list_add_tail(&req->donelist, cpu_list);
1713        raise_softirq_irqoff(BLOCK_SOFTIRQ);
1714
1715        local_irq_restore(flags);
1716}
1717EXPORT_SYMBOL(blk_complete_request);
1718
1719/*
1720 * queue lock must be held
1721 */
1722static void end_that_request_last(struct request *req, int error)
1723{
1724        struct gendisk *disk = req->rq_disk;
1725
1726        if (blk_rq_tagged(req))
1727                blk_queue_end_tag(req->q, req);
1728
1729        if (blk_queued_rq(req))
1730                blkdev_dequeue_request(req);
1731
1732        if (unlikely(laptop_mode) && blk_fs_request(req))
1733                laptop_io_completion();
1734
1735        /*
1736         * Account IO completion.  bar_rq isn't accounted as a normal
1737         * IO on queueing nor completion.  Accounting the containing
1738         * request is enough.
1739         */
1740        if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1741                unsigned long duration = jiffies - req->start_time;
1742                const int rw = rq_data_dir(req);
1743                struct hd_struct *part = get_part(disk, req->sector);
1744
1745                __all_stat_inc(disk, part, ios[rw], req->sector);
1746                __all_stat_add(disk, part, ticks[rw], duration, req->sector);
1747                disk_round_stats(disk);
1748                disk->in_flight--;
1749                if (part) {
1750                        part_round_stats(part);
1751                        part->in_flight--;
1752                }
1753        }
1754
1755        if (req->end_io)
1756                req->end_io(req, error);
1757        else {
1758                if (blk_bidi_rq(req))
1759                        __blk_put_request(req->next_rq->q, req->next_rq);
1760
1761                __blk_put_request(req->q, req);
1762        }
1763}
1764
1765static inline void __end_request(struct request *rq, int uptodate,
1766                                 unsigned int nr_bytes)
1767{
1768        int error = 0;
1769
1770        if (uptodate <= 0)
1771                error = uptodate ? uptodate : -EIO;
1772
1773        __blk_end_request(rq, error, nr_bytes);
1774}
1775
1776/**
1777 * blk_rq_bytes - Returns bytes left to complete in the entire request
1778 * @rq: the request being processed
1779 **/
1780unsigned int blk_rq_bytes(struct request *rq)
1781{
1782        if (blk_fs_request(rq))
1783                return rq->hard_nr_sectors << 9;
1784
1785        return rq->data_len;
1786}
1787EXPORT_SYMBOL_GPL(blk_rq_bytes);
1788
1789/**
1790 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
1791 * @rq: the request being processed
1792 **/
1793unsigned int blk_rq_cur_bytes(struct request *rq)
1794{
1795        if (blk_fs_request(rq))
1796                return rq->current_nr_sectors << 9;
1797
1798        if (rq->bio)
1799                return rq->bio->bi_size;
1800
1801        return rq->data_len;
1802}
1803EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1804
1805/**
1806 * end_queued_request - end all I/O on a queued request
1807 * @rq:         the request being processed
1808 * @uptodate:   error value or 0/1 uptodate flag
1809 *
1810 * Description:
1811 *     Ends all I/O on a request, and removes it from the block layer queues.
1812 *     Not suitable for normal IO completion, unless the driver still has
1813 *     the request attached to the block layer.
1814 *
1815 **/
1816void end_queued_request(struct request *rq, int uptodate)
1817{
1818        __end_request(rq, uptodate, blk_rq_bytes(rq));
1819}
1820EXPORT_SYMBOL(end_queued_request);
1821
1822/**
1823 * end_dequeued_request - end all I/O on a dequeued request
1824 * @rq:         the request being processed
1825 * @uptodate:   error value or 0/1 uptodate flag
1826 *
1827 * Description:
1828 *     Ends all I/O on a request. The request must already have been
1829 *     dequeued using blkdev_dequeue_request(), as is normally the case
1830 *     for most drivers.
1831 *
1832 **/
1833void end_dequeued_request(struct request *rq, int uptodate)
1834{
1835        __end_request(rq, uptodate, blk_rq_bytes(rq));
1836}
1837EXPORT_SYMBOL(end_dequeued_request);
1838
1839
1840/**
1841 * end_request - end I/O on the current segment of the request
1842 * @req:        the request being processed
1843 * @uptodate:   error value or 0/1 uptodate flag
1844 *
1845 * Description:
1846 *     Ends I/O on the current segment of a request. If that is the only
1847 *     remaining segment, the request is also completed and freed.
1848 *
1849 *     This is a remnant of how older block drivers handled IO completions.
1850 *     Modern drivers typically end IO on the full request in one go, unless
1851 *     they have a residual value to account for. For that case this function
1852 *     isn't really useful, unless the residual just happens to be the
1853 *     full current segment. In other words, don't use this function in new
1854 *     code. Either use end_request_completely(), or the
1855 *     end_that_request_chunk() (along with end_that_request_last()) for
1856 *     partial completions.
1857 *
1858 **/
1859void end_request(struct request *req, int uptodate)
1860{
1861        __end_request(req, uptodate, req->hard_cur_sectors << 9);
1862}
1863EXPORT_SYMBOL(end_request);
1864
1865/**
1866 * blk_end_io - Generic end_io function to complete a request.
1867 * @rq:           the request being processed
1868 * @error:        0 for success, < 0 for error
1869 * @nr_bytes:     number of bytes to complete @rq
1870 * @bidi_bytes:   number of bytes to complete @rq->next_rq
1871 * @drv_callback: function called between completion of bios in the request
1872 *                and completion of the request.
1873 *                If the callback returns non 0, this helper returns without
1874 *                completion of the request.
1875 *
1876 * Description:
1877 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1878 *     If @rq has leftover, sets it up for the next range of segments.
1879 *
1880 * Return:
1881 *     0 - we are done with this request
1882 *     1 - this request is not freed yet, it still has pending buffers.
1883 **/
1884static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1885                      unsigned int bidi_bytes,
1886                      int (drv_callback)(struct request *))
1887{
1888        struct request_queue *q = rq->q;
1889        unsigned long flags = 0UL;
1890
1891        if (blk_fs_request(rq) || blk_pc_request(rq)) {
1892                if (__end_that_request_first(rq, error, nr_bytes))
1893                        return 1;
1894
1895                /* Bidi request must be completed as a whole */
1896                if (blk_bidi_rq(rq) &&
1897                    __end_that_request_first(rq->next_rq, error, bidi_bytes))
1898                        return 1;
1899        }
1900
1901        /* Special feature for tricky drivers */
1902        if (drv_callback && drv_callback(rq))
1903                return 1;
1904
1905        add_disk_randomness(rq->rq_disk);
1906
1907        spin_lock_irqsave(q->queue_lock, flags);
1908        end_that_request_last(rq, error);
1909        spin_unlock_irqrestore(q->queue_lock, flags);
1910
1911        return 0;
1912}
1913
1914/**
1915 * blk_end_request - Helper function for drivers to complete the request.
1916 * @rq:       the request being processed
1917 * @error:    0 for success, < 0 for error
1918 * @nr_bytes: number of bytes to complete
1919 *
1920 * Description:
1921 *     Ends I/O on a number of bytes attached to @rq.
1922 *     If @rq has leftover, sets it up for the next range of segments.
1923 *
1924 * Return:
1925 *     0 - we are done with this request
1926 *     1 - still buffers pending for this request
1927 **/
1928int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1929{
1930        return blk_end_io(rq, error, nr_bytes, 0, NULL);
1931}
1932EXPORT_SYMBOL_GPL(blk_end_request);
1933
1934/**
1935 * __blk_end_request - Helper function for drivers to complete the request.
1936 * @rq:       the request being processed
1937 * @error:    0 for success, < 0 for error
1938 * @nr_bytes: number of bytes to complete
1939 *
1940 * Description:
1941 *     Must be called with queue lock held unlike blk_end_request().
1942 *
1943 * Return:
1944 *     0 - we are done with this request
1945 *     1 - still buffers pending for this request
1946 **/
1947int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1948{
1949        if (blk_fs_request(rq) || blk_pc_request(rq)) {
1950                if (__end_that_request_first(rq, error, nr_bytes))
1951                        return 1;
1952        }
1953
1954        add_disk_randomness(rq->rq_disk);
1955
1956        end_that_request_last(rq, error);
1957
1958        return 0;
1959}
1960EXPORT_SYMBOL_GPL(__blk_end_request);
1961
1962/**
1963 * blk_end_bidi_request - Helper function for drivers to complete bidi request.
1964 * @rq:         the bidi request being processed
1965 * @error:      0 for success, < 0 for error
1966 * @nr_bytes:   number of bytes to complete @rq
1967 * @bidi_bytes: number of bytes to complete @rq->next_rq
1968 *
1969 * Description:
1970 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
1971 *
1972 * Return:
1973 *     0 - we are done with this request
1974 *     1 - still buffers pending for this request
1975 **/
1976int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
1977                         unsigned int bidi_bytes)
1978{
1979        return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
1980}
1981EXPORT_SYMBOL_GPL(blk_end_bidi_request);
1982
1983/**
1984 * blk_end_request_callback - Special helper function for tricky drivers
1985 * @rq:           the request being processed
1986 * @error:        0 for success, < 0 for error
1987 * @nr_bytes:     number of bytes to complete
1988 * @drv_callback: function called between completion of bios in the request
1989 *                and completion of the request.
1990 *                If the callback returns non 0, this helper returns without
1991 *                completion of the request.
1992 *
1993 * Description:
1994 *     Ends I/O on a number of bytes attached to @rq.
1995 *     If @rq has leftover, sets it up for the next range of segments.
1996 *
1997 *     This special helper function is used only for existing tricky drivers.
1998 *     (e.g. cdrom_newpc_intr() of ide-cd)
1999 *     This interface will be removed when such drivers are rewritten.
2000 *     Don't use this interface in other places anymore.
2001 *
2002 * Return:
2003 *     0 - we are done with this request
2004 *     1 - this request is not freed yet.
2005 *         this request still has pending buffers or
2006 *         the driver doesn't want to finish this request yet.
2007 **/
2008int blk_end_request_callback(struct request *rq, int error,
2009                             unsigned int nr_bytes,
2010                             int (drv_callback)(struct request *))
2011{
2012        return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2013}
2014EXPORT_SYMBOL_GPL(blk_end_request_callback);
2015
2016void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2017                     struct bio *bio)
2018{
2019        /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
2020        rq->cmd_flags |= (bio->bi_rw & 3);
2021
2022        rq->nr_phys_segments = bio_phys_segments(q, bio);
2023        rq->nr_hw_segments = bio_hw_segments(q, bio);
2024        rq->current_nr_sectors = bio_cur_sectors(bio);
2025        rq->hard_cur_sectors = rq->current_nr_sectors;
2026        rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2027        rq->buffer = bio_data(bio);
2028        rq->data_len = bio->bi_size;
2029
2030        rq->bio = rq->biotail = bio;
2031
2032        if (bio->bi_bdev)
2033                rq->rq_disk = bio->bi_bdev->bd_disk;
2034}
2035
2036int kblockd_schedule_work(struct work_struct *work)
2037{
2038        return queue_work(kblockd_workqueue, work);
2039}
2040EXPORT_SYMBOL(kblockd_schedule_work);
2041
2042void kblockd_flush_work(struct work_struct *work)
2043{
2044        cancel_work_sync(work);
2045}
2046EXPORT_SYMBOL(kblockd_flush_work);
2047
2048int __init blk_dev_init(void)
2049{
2050        int i;
2051
2052        kblockd_workqueue = create_workqueue("kblockd");
2053        if (!kblockd_workqueue)
2054                panic("Failed to create kblockd\n");
2055
2056        request_cachep = kmem_cache_create("blkdev_requests",
2057                        sizeof(struct request), 0, SLAB_PANIC, NULL);
2058
2059        blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2060                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2061
2062        for_each_possible_cpu(i)
2063                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
2064
2065        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
2066        register_hotcpu_notifier(&blk_cpu_notifier);
2067
2068        return 0;
2069}
2070
2071