linux/block/blk-core.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1991, 1992 Linus Torvalds
   3 * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
   4 * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
   5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
   6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
   7 *      -  July2000
   8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
   9 */
  10
  11/*
  12 * This handles all read/write requests to block devices
  13 */
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/backing-dev.h>
  17#include <linux/bio.h>
  18#include <linux/blkdev.h>
  19#include <linux/highmem.h>
  20#include <linux/mm.h>
  21#include <linux/kernel_stat.h>
  22#include <linux/string.h>
  23#include <linux/init.h>
  24#include <linux/completion.h>
  25#include <linux/slab.h>
  26#include <linux/swap.h>
  27#include <linux/writeback.h>
  28#include <linux/task_io_accounting_ops.h>
  29#include <linux/fault-inject.h>
  30#include <linux/list_sort.h>
  31#include <linux/delay.h>
  32#include <linux/ratelimit.h>
  33
  34#define CREATE_TRACE_POINTS
  35#include <trace/events/block.h>
  36
  37#include "blk.h"
  38#include "blk-cgroup.h"
  39
  40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
  41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
  42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
  43
  44DEFINE_IDA(blk_queue_ida);
  45
  46/*
  47 * For the allocated request tables
  48 */
  49static struct kmem_cache *request_cachep;
  50
  51/*
  52 * For queue allocation
  53 */
  54struct kmem_cache *blk_requestq_cachep;
  55
  56/*
  57 * Controlling structure to kblockd
  58 */
  59static struct workqueue_struct *kblockd_workqueue;
  60
  61static void drive_stat_acct(struct request *rq, int new_io)
  62{
  63        struct hd_struct *part;
  64        int rw = rq_data_dir(rq);
  65        int cpu;
  66
  67        if (!blk_do_io_stat(rq))
  68                return;
  69
  70        cpu = part_stat_lock();
  71
  72        if (!new_io) {
  73                part = rq->part;
  74                part_stat_inc(cpu, part, merges[rw]);
  75        } else {
  76                part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
  77                if (!hd_struct_try_get(part)) {
  78                        /*
  79                         * The partition is already being removed,
  80                         * the request will be accounted on the disk only
  81                         *
  82                         * We take a reference on disk->part0 although that
  83                         * partition will never be deleted, so we can treat
  84                         * it as any other partition.
  85                         */
  86                        part = &rq->rq_disk->part0;
  87                        hd_struct_get(part);
  88                }
  89                part_round_stats(cpu, part);
  90                part_inc_in_flight(part, rw);
  91                rq->part = part;
  92        }
  93
  94        part_stat_unlock();
  95}
  96
  97void blk_queue_congestion_threshold(struct request_queue *q)
  98{
  99        int nr;
 100
 101        nr = q->nr_requests - (q->nr_requests / 8) + 1;
 102        if (nr > q->nr_requests)
 103                nr = q->nr_requests;
 104        q->nr_congestion_on = nr;
 105
 106        nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
 107        if (nr < 1)
 108                nr = 1;
 109        q->nr_congestion_off = nr;
 110}
 111
 112/**
 113 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
 114 * @bdev:       device
 115 *
 116 * Locates the passed device's request queue and returns the address of its
 117 * backing_dev_info
 118 *
 119 * Will return NULL if the request queue cannot be located.
 120 */
 121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 122{
 123        struct backing_dev_info *ret = NULL;
 124        struct request_queue *q = bdev_get_queue(bdev);
 125
 126        if (q)
 127                ret = &q->backing_dev_info;
 128        return ret;
 129}
 130EXPORT_SYMBOL(blk_get_backing_dev_info);
 131
 132void blk_rq_init(struct request_queue *q, struct request *rq)
 133{
 134        memset(rq, 0, sizeof(*rq));
 135
 136        INIT_LIST_HEAD(&rq->queuelist);
 137        INIT_LIST_HEAD(&rq->timeout_list);
 138        rq->cpu = -1;
 139        rq->q = q;
 140        rq->__sector = (sector_t) -1;
 141        INIT_HLIST_NODE(&rq->hash);
 142        RB_CLEAR_NODE(&rq->rb_node);
 143        rq->cmd = rq->__cmd;
 144        rq->cmd_len = BLK_MAX_CDB;
 145        rq->tag = -1;
 146        rq->ref_count = 1;
 147        rq->start_time = jiffies;
 148        set_start_time_ns(rq);
 149        rq->part = NULL;
 150}
 151EXPORT_SYMBOL(blk_rq_init);
 152
 153static void req_bio_endio(struct request *rq, struct bio *bio,
 154                          unsigned int nbytes, int error)
 155{
 156        if (error)
 157                clear_bit(BIO_UPTODATE, &bio->bi_flags);
 158        else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
 159                error = -EIO;
 160
 161        if (unlikely(nbytes > bio->bi_size)) {
 162                printk(KERN_ERR "%s: want %u bytes done, %u left\n",
 163                       __func__, nbytes, bio->bi_size);
 164                nbytes = bio->bi_size;
 165        }
 166
 167        if (unlikely(rq->cmd_flags & REQ_QUIET))
 168                set_bit(BIO_QUIET, &bio->bi_flags);
 169
 170        bio->bi_size -= nbytes;
 171        bio->bi_sector += (nbytes >> 9);
 172
 173        if (bio_integrity(bio))
 174                bio_integrity_advance(bio, nbytes);
 175
 176        /* don't actually finish bio if it's part of flush sequence */
 177        if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
 178                bio_endio(bio, error);
 179}
 180
 181void blk_dump_rq_flags(struct request *rq, char *msg)
 182{
 183        int bit;
 184
 185        printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
 186                rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
 187                rq->cmd_flags);
 188
 189        printk(KERN_INFO "  sector %llu, nr/cnr %u/%u\n",
 190               (unsigned long long)blk_rq_pos(rq),
 191               blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
 192        printk(KERN_INFO "  bio %p, biotail %p, buffer %p, len %u\n",
 193               rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
 194
 195        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 196                printk(KERN_INFO "  cdb: ");
 197                for (bit = 0; bit < BLK_MAX_CDB; bit++)
 198                        printk("%02x ", rq->cmd[bit]);
 199                printk("\n");
 200        }
 201}
 202EXPORT_SYMBOL(blk_dump_rq_flags);
 203
 204static void blk_delay_work(struct work_struct *work)
 205{
 206        struct request_queue *q;
 207
 208        q = container_of(work, struct request_queue, delay_work.work);
 209        spin_lock_irq(q->queue_lock);
 210        __blk_run_queue(q);
 211        spin_unlock_irq(q->queue_lock);
 212}
 213
 214/**
 215 * blk_delay_queue - restart queueing after defined interval
 216 * @q:          The &struct request_queue in question
 217 * @msecs:      Delay in msecs
 218 *
 219 * Description:
 220 *   Sometimes queueing needs to be postponed for a little while, to allow
 221 *   resources to come back. This function will make sure that queueing is
 222 *   restarted around the specified time.
 223 */
 224void blk_delay_queue(struct request_queue *q, unsigned long msecs)
 225{
 226        queue_delayed_work(kblockd_workqueue, &q->delay_work,
 227                                msecs_to_jiffies(msecs));
 228}
 229EXPORT_SYMBOL(blk_delay_queue);
 230
 231/**
 232 * blk_start_queue - restart a previously stopped queue
 233 * @q:    The &struct request_queue in question
 234 *
 235 * Description:
 236 *   blk_start_queue() will clear the stop flag on the queue, and call
 237 *   the request_fn for the queue if it was in a stopped state when
 238 *   entered. Also see blk_stop_queue(). Queue lock must be held.
 239 **/
 240void blk_start_queue(struct request_queue *q)
 241{
 242        WARN_ON(!irqs_disabled());
 243
 244        queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 245        __blk_run_queue(q);
 246}
 247EXPORT_SYMBOL(blk_start_queue);
 248
 249/**
 250 * blk_stop_queue - stop a queue
 251 * @q:    The &struct request_queue in question
 252 *
 253 * Description:
 254 *   The Linux block layer assumes that a block driver will consume all
 255 *   entries on the request queue when the request_fn strategy is called.
 256 *   Often this will not happen, because of hardware limitations (queue
 257 *   depth settings). If a device driver gets a 'queue full' response,
 258 *   or if it simply chooses not to queue more I/O at one point, it can
 259 *   call this function to prevent the request_fn from being called until
 260 *   the driver has signalled it's ready to go again. This happens by calling
 261 *   blk_start_queue() to restart queue operations. Queue lock must be held.
 262 **/
 263void blk_stop_queue(struct request_queue *q)
 264{
 265        cancel_delayed_work(&q->delay_work);
 266        queue_flag_set(QUEUE_FLAG_STOPPED, q);
 267}
 268EXPORT_SYMBOL(blk_stop_queue);
 269
 270/**
 271 * blk_sync_queue - cancel any pending callbacks on a queue
 272 * @q: the queue
 273 *
 274 * Description:
 275 *     The block layer may perform asynchronous callback activity
 276 *     on a queue, such as calling the unplug function after a timeout.
 277 *     A block device may call blk_sync_queue to ensure that any
 278 *     such activity is cancelled, thus allowing it to release resources
 279 *     that the callbacks might use. The caller must already have made sure
 280 *     that its ->make_request_fn will not re-add plugging prior to calling
 281 *     this function.
 282 *
 283 *     This function does not cancel any asynchronous activity arising
 284 *     out of elevator or throttling code. That would require elevaotor_exit()
 285 *     and blkcg_exit_queue() to be called with queue lock initialized.
 286 *
 287 */
 288void blk_sync_queue(struct request_queue *q)
 289{
 290        del_timer_sync(&q->timeout);
 291        cancel_delayed_work_sync(&q->delay_work);
 292}
 293EXPORT_SYMBOL(blk_sync_queue);
 294
 295/**
 296 * __blk_run_queue - run a single device queue
 297 * @q:  The queue to run
 298 *
 299 * Description:
 300 *    See @blk_run_queue. This variant must be called with the queue lock
 301 *    held and interrupts disabled.
 302 */
 303void __blk_run_queue(struct request_queue *q)
 304{
 305        if (unlikely(blk_queue_stopped(q)))
 306                return;
 307
 308        q->request_fn(q);
 309}
 310EXPORT_SYMBOL(__blk_run_queue);
 311
 312/**
 313 * blk_run_queue_async - run a single device queue in workqueue context
 314 * @q:  The queue to run
 315 *
 316 * Description:
 317 *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
 318 *    of us.
 319 */
 320void blk_run_queue_async(struct request_queue *q)
 321{
 322        if (likely(!blk_queue_stopped(q)))
 323                mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
 324}
 325EXPORT_SYMBOL(blk_run_queue_async);
 326
 327/**
 328 * blk_run_queue - run a single device queue
 329 * @q: The queue to run
 330 *
 331 * Description:
 332 *    Invoke request handling on this queue, if it has pending work to do.
 333 *    May be used to restart queueing when a request has completed.
 334 */
 335void blk_run_queue(struct request_queue *q)
 336{
 337        unsigned long flags;
 338
 339        spin_lock_irqsave(q->queue_lock, flags);
 340        __blk_run_queue(q);
 341        spin_unlock_irqrestore(q->queue_lock, flags);
 342}
 343EXPORT_SYMBOL(blk_run_queue);
 344
 345void blk_put_queue(struct request_queue *q)
 346{
 347        kobject_put(&q->kobj);
 348}
 349EXPORT_SYMBOL(blk_put_queue);
 350
 351/**
 352 * blk_drain_queue - drain requests from request_queue
 353 * @q: queue to drain
 354 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
 355 *
 356 * Drain requests from @q.  If @drain_all is set, all requests are drained.
 357 * If not, only ELVPRIV requests are drained.  The caller is responsible
 358 * for ensuring that no new requests which need to be drained are queued.
 359 */
 360void blk_drain_queue(struct request_queue *q, bool drain_all)
 361{
 362        int i;
 363
 364        while (true) {
 365                bool drain = false;
 366
 367                spin_lock_irq(q->queue_lock);
 368
 369                /*
 370                 * The caller might be trying to drain @q before its
 371                 * elevator is initialized.
 372                 */
 373                if (q->elevator)
 374                        elv_drain_elevator(q);
 375
 376                blkcg_drain_queue(q);
 377
 378                /*
 379                 * This function might be called on a queue which failed
 380                 * driver init after queue creation or is not yet fully
 381                 * active yet.  Some drivers (e.g. fd and loop) get unhappy
 382                 * in such cases.  Kick queue iff dispatch queue has
 383                 * something on it and @q has request_fn set.
 384                 */
 385                if (!list_empty(&q->queue_head) && q->request_fn)
 386                        __blk_run_queue(q);
 387
 388                drain |= q->nr_rqs_elvpriv;
 389
 390                /*
 391                 * Unfortunately, requests are queued at and tracked from
 392                 * multiple places and there's no single counter which can
 393                 * be drained.  Check all the queues and counters.
 394                 */
 395                if (drain_all) {
 396                        drain |= !list_empty(&q->queue_head);
 397                        for (i = 0; i < 2; i++) {
 398                                drain |= q->nr_rqs[i];
 399                                drain |= q->in_flight[i];
 400                                drain |= !list_empty(&q->flush_queue[i]);
 401                        }
 402                }
 403
 404                spin_unlock_irq(q->queue_lock);
 405
 406                if (!drain)
 407                        break;
 408                msleep(10);
 409        }
 410
 411        /*
 412         * With queue marked dead, any woken up waiter will fail the
 413         * allocation path, so the wakeup chaining is lost and we're
 414         * left with hung waiters. We need to wake up those waiters.
 415         */
 416        if (q->request_fn) {
 417                struct request_list *rl;
 418
 419                spin_lock_irq(q->queue_lock);
 420
 421                blk_queue_for_each_rl(rl, q)
 422                        for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
 423                                wake_up_all(&rl->wait[i]);
 424
 425                spin_unlock_irq(q->queue_lock);
 426        }
 427}
 428
 429/**
 430 * blk_queue_bypass_start - enter queue bypass mode
 431 * @q: queue of interest
 432 *
 433 * In bypass mode, only the dispatch FIFO queue of @q is used.  This
 434 * function makes @q enter bypass mode and drains all requests which were
 435 * throttled or issued before.  On return, it's guaranteed that no request
 436 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
 437 * inside queue or RCU read lock.
 438 */
 439void blk_queue_bypass_start(struct request_queue *q)
 440{
 441        bool drain;
 442
 443        spin_lock_irq(q->queue_lock);
 444        drain = !q->bypass_depth++;
 445        queue_flag_set(QUEUE_FLAG_BYPASS, q);
 446        spin_unlock_irq(q->queue_lock);
 447
 448        if (drain) {
 449                blk_drain_queue(q, false);
 450                /* ensure blk_queue_bypass() is %true inside RCU read lock */
 451                synchronize_rcu();
 452        }
 453}
 454EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
 455
 456/**
 457 * blk_queue_bypass_end - leave queue bypass mode
 458 * @q: queue of interest
 459 *
 460 * Leave bypass mode and restore the normal queueing behavior.
 461 */
 462void blk_queue_bypass_end(struct request_queue *q)
 463{
 464        spin_lock_irq(q->queue_lock);
 465        if (!--q->bypass_depth)
 466                queue_flag_clear(QUEUE_FLAG_BYPASS, q);
 467        WARN_ON_ONCE(q->bypass_depth < 0);
 468        spin_unlock_irq(q->queue_lock);
 469}
 470EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
 471
 472/**
 473 * blk_cleanup_queue - shutdown a request queue
 474 * @q: request queue to shutdown
 475 *
 476 * Mark @q DEAD, drain all pending requests, destroy and put it.  All
 477 * future requests will be failed immediately with -ENODEV.
 478 */
 479void blk_cleanup_queue(struct request_queue *q)
 480{
 481        spinlock_t *lock = q->queue_lock;
 482
 483        /* mark @q DEAD, no new request or merges will be allowed afterwards */
 484        mutex_lock(&q->sysfs_lock);
 485        queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
 486        spin_lock_irq(lock);
 487
 488        /*
 489         * Dead queue is permanently in bypass mode till released.  Note
 490         * that, unlike blk_queue_bypass_start(), we aren't performing
 491         * synchronize_rcu() after entering bypass mode to avoid the delay
 492         * as some drivers create and destroy a lot of queues while
 493         * probing.  This is still safe because blk_release_queue() will be
 494         * called only after the queue refcnt drops to zero and nothing,
 495         * RCU or not, would be traversing the queue by then.
 496         */
 497        q->bypass_depth++;
 498        queue_flag_set(QUEUE_FLAG_BYPASS, q);
 499
 500        queue_flag_set(QUEUE_FLAG_NOMERGES, q);
 501        queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 502        queue_flag_set(QUEUE_FLAG_DEAD, q);
 503        spin_unlock_irq(lock);
 504        mutex_unlock(&q->sysfs_lock);
 505
 506        /* drain all requests queued before DEAD marking */
 507        blk_drain_queue(q, true);
 508
 509        /* @q won't process any more request, flush async actions */
 510        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
 511        blk_sync_queue(q);
 512
 513        spin_lock_irq(lock);
 514        if (q->queue_lock != &q->__queue_lock)
 515                q->queue_lock = &q->__queue_lock;
 516        spin_unlock_irq(lock);
 517
 518        /* @q is and will stay empty, shutdown and put */
 519        blk_put_queue(q);
 520}
 521EXPORT_SYMBOL(blk_cleanup_queue);
 522
 523int blk_init_rl(struct request_list *rl, struct request_queue *q,
 524                gfp_t gfp_mask)
 525{
 526        if (unlikely(rl->rq_pool))
 527                return 0;
 528
 529        rl->q = q;
 530        rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
 531        rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
 532        init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
 533        init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
 534
 535        rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
 536                                          mempool_free_slab, request_cachep,
 537                                          gfp_mask, q->node);
 538        if (!rl->rq_pool)
 539                return -ENOMEM;
 540
 541        return 0;
 542}
 543
 544void blk_exit_rl(struct request_list *rl)
 545{
 546        if (rl->rq_pool)
 547                mempool_destroy(rl->rq_pool);
 548}
 549
 550struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 551{
 552        return blk_alloc_queue_node(gfp_mask, -1);
 553}
 554EXPORT_SYMBOL(blk_alloc_queue);
 555
 556struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 557{
 558        struct request_queue *q;
 559        int err;
 560
 561        q = kmem_cache_alloc_node(blk_requestq_cachep,
 562                                gfp_mask | __GFP_ZERO, node_id);
 563        if (!q)
 564                return NULL;
 565
 566        q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
 567        if (q->id < 0)
 568                goto fail_q;
 569
 570        q->backing_dev_info.ra_pages =
 571                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
 572        q->backing_dev_info.state = 0;
 573        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
 574        q->backing_dev_info.name = "block";
 575        q->node = node_id;
 576
 577        err = bdi_init(&q->backing_dev_info);
 578        if (err)
 579                goto fail_id;
 580
 581        setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
 582                    laptop_mode_timer_fn, (unsigned long) q);
 583        setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 584        INIT_LIST_HEAD(&q->queue_head);
 585        INIT_LIST_HEAD(&q->timeout_list);
 586        INIT_LIST_HEAD(&q->icq_list);
 587#ifdef CONFIG_BLK_CGROUP
 588        INIT_LIST_HEAD(&q->blkg_list);
 589#endif
 590        INIT_LIST_HEAD(&q->flush_queue[0]);
 591        INIT_LIST_HEAD(&q->flush_queue[1]);
 592        INIT_LIST_HEAD(&q->flush_data_in_flight);
 593        INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
 594
 595        kobject_init(&q->kobj, &blk_queue_ktype);
 596
 597        mutex_init(&q->sysfs_lock);
 598        spin_lock_init(&q->__queue_lock);
 599
 600        /*
 601         * By default initialize queue_lock to internal lock and driver can
 602         * override it later if need be.
 603         */
 604        q->queue_lock = &q->__queue_lock;
 605
 606        /*
 607         * A queue starts its life with bypass turned on to avoid
 608         * unnecessary bypass on/off overhead and nasty surprises during
 609         * init.  The initial bypass will be finished when the queue is
 610         * registered by blk_register_queue().
 611         */
 612        q->bypass_depth = 1;
 613        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 614
 615        if (blkcg_init_queue(q))
 616                goto fail_id;
 617
 618        return q;
 619
 620fail_id:
 621        ida_simple_remove(&blk_queue_ida, q->id);
 622fail_q:
 623        kmem_cache_free(blk_requestq_cachep, q);
 624        return NULL;
 625}
 626EXPORT_SYMBOL(blk_alloc_queue_node);
 627
 628/**
 629 * blk_init_queue  - prepare a request queue for use with a block device
 630 * @rfn:  The function to be called to process requests that have been
 631 *        placed on the queue.
 632 * @lock: Request queue spin lock
 633 *
 634 * Description:
 635 *    If a block device wishes to use the standard request handling procedures,
 636 *    which sorts requests and coalesces adjacent requests, then it must
 637 *    call blk_init_queue().  The function @rfn will be called when there
 638 *    are requests on the queue that need to be processed.  If the device
 639 *    supports plugging, then @rfn may not be called immediately when requests
 640 *    are available on the queue, but may be called at some time later instead.
 641 *    Plugged queues are generally unplugged when a buffer belonging to one
 642 *    of the requests on the queue is needed, or due to memory pressure.
 643 *
 644 *    @rfn is not required, or even expected, to remove all requests off the
 645 *    queue, but only as many as it can handle at a time.  If it does leave
 646 *    requests on the queue, it is responsible for arranging that the requests
 647 *    get dealt with eventually.
 648 *
 649 *    The queue spin lock must be held while manipulating the requests on the
 650 *    request queue; this lock will be taken also from interrupt context, so irq
 651 *    disabling is needed for it.
 652 *
 653 *    Function returns a pointer to the initialized request queue, or %NULL if
 654 *    it didn't succeed.
 655 *
 656 * Note:
 657 *    blk_init_queue() must be paired with a blk_cleanup_queue() call
 658 *    when the block device is deactivated (such as at module unload).
 659 **/
 660
 661struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
 662{
 663        return blk_init_queue_node(rfn, lock, -1);
 664}
 665EXPORT_SYMBOL(blk_init_queue);
 666
 667struct request_queue *
 668blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 669{
 670        struct request_queue *uninit_q, *q;
 671
 672        uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
 673        if (!uninit_q)
 674                return NULL;
 675
 676        q = blk_init_allocated_queue(uninit_q, rfn, lock);
 677        if (!q)
 678                blk_cleanup_queue(uninit_q);
 679
 680        return q;
 681}
 682EXPORT_SYMBOL(blk_init_queue_node);
 683
 684struct request_queue *
 685blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
 686                         spinlock_t *lock)
 687{
 688        if (!q)
 689                return NULL;
 690
 691        if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
 692                return NULL;
 693
 694        q->request_fn           = rfn;
 695        q->prep_rq_fn           = NULL;
 696        q->unprep_rq_fn         = NULL;
 697        q->queue_flags          |= QUEUE_FLAG_DEFAULT;
 698
 699        /* Override internal queue lock with supplied lock pointer */
 700        if (lock)
 701                q->queue_lock           = lock;
 702
 703        /*
 704         * This also sets hw/phys segments, boundary and size
 705         */
 706        blk_queue_make_request(q, blk_queue_bio);
 707
 708        q->sg_reserved_size = INT_MAX;
 709
 710        /* init elevator */
 711        if (elevator_init(q, NULL))
 712                return NULL;
 713        return q;
 714}
 715EXPORT_SYMBOL(blk_init_allocated_queue);
 716
 717bool blk_get_queue(struct request_queue *q)
 718{
 719        if (likely(!blk_queue_dead(q))) {
 720                __blk_get_queue(q);
 721                return true;
 722        }
 723
 724        return false;
 725}
 726EXPORT_SYMBOL(blk_get_queue);
 727
 728static inline void blk_free_request(struct request_list *rl, struct request *rq)
 729{
 730        if (rq->cmd_flags & REQ_ELVPRIV) {
 731                elv_put_request(rl->q, rq);
 732                if (rq->elv.icq)
 733                        put_io_context(rq->elv.icq->ioc);
 734        }
 735
 736        mempool_free(rq, rl->rq_pool);
 737}
 738
 739/*
 740 * ioc_batching returns true if the ioc is a valid batching request and
 741 * should be given priority access to a request.
 742 */
 743static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
 744{
 745        if (!ioc)
 746                return 0;
 747
 748        /*
 749         * Make sure the process is able to allocate at least 1 request
 750         * even if the batch times out, otherwise we could theoretically
 751         * lose wakeups.
 752         */
 753        return ioc->nr_batch_requests == q->nr_batching ||
 754                (ioc->nr_batch_requests > 0
 755                && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
 756}
 757
 758/*
 759 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
 760 * will cause the process to be a "batcher" on all queues in the system. This
 761 * is the behaviour we want though - once it gets a wakeup it should be given
 762 * a nice run.
 763 */
 764static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
 765{
 766        if (!ioc || ioc_batching(q, ioc))
 767                return;
 768
 769        ioc->nr_batch_requests = q->nr_batching;
 770        ioc->last_waited = jiffies;
 771}
 772
 773static void __freed_request(struct request_list *rl, int sync)
 774{
 775        struct request_queue *q = rl->q;
 776
 777        /*
 778         * bdi isn't aware of blkcg yet.  As all async IOs end up root
 779         * blkcg anyway, just use root blkcg state.
 780         */
 781        if (rl == &q->root_rl &&
 782            rl->count[sync] < queue_congestion_off_threshold(q))
 783                blk_clear_queue_congested(q, sync);
 784
 785        if (rl->count[sync] + 1 <= q->nr_requests) {
 786                if (waitqueue_active(&rl->wait[sync]))
 787                        wake_up(&rl->wait[sync]);
 788
 789                blk_clear_rl_full(rl, sync);
 790        }
 791}
 792
 793/*
 794 * A request has just been released.  Account for it, update the full and
 795 * congestion status, wake up any waiters.   Called under q->queue_lock.
 796 */
 797static void freed_request(struct request_list *rl, unsigned int flags)
 798{
 799        struct request_queue *q = rl->q;
 800        int sync = rw_is_sync(flags);
 801
 802        q->nr_rqs[sync]--;
 803        rl->count[sync]--;
 804        if (flags & REQ_ELVPRIV)
 805                q->nr_rqs_elvpriv--;
 806
 807        __freed_request(rl, sync);
 808
 809        if (unlikely(rl->starved[sync ^ 1]))
 810                __freed_request(rl, sync ^ 1);
 811}
 812
 813/*
 814 * Determine if elevator data should be initialized when allocating the
 815 * request associated with @bio.
 816 */
 817static bool blk_rq_should_init_elevator(struct bio *bio)
 818{
 819        if (!bio)
 820                return true;
 821
 822        /*
 823         * Flush requests do not use the elevator so skip initialization.
 824         * This allows a request to share the flush and elevator data.
 825         */
 826        if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
 827                return false;
 828
 829        return true;
 830}
 831
 832/**
 833 * rq_ioc - determine io_context for request allocation
 834 * @bio: request being allocated is for this bio (can be %NULL)
 835 *
 836 * Determine io_context to use for request allocation for @bio.  May return
 837 * %NULL if %current->io_context doesn't exist.
 838 */
 839static struct io_context *rq_ioc(struct bio *bio)
 840{
 841#ifdef CONFIG_BLK_CGROUP
 842        if (bio && bio->bi_ioc)
 843                return bio->bi_ioc;
 844#endif
 845        return current->io_context;
 846}
 847
 848/**
 849 * __get_request - get a free request
 850 * @rl: request list to allocate from
 851 * @rw_flags: RW and SYNC flags
 852 * @bio: bio to allocate request for (can be %NULL)
 853 * @gfp_mask: allocation mask
 854 *
 855 * Get a free request from @q.  This function may fail under memory
 856 * pressure or if @q is dead.
 857 *
 858 * Must be callled with @q->queue_lock held and,
 859 * Returns %NULL on failure, with @q->queue_lock held.
 860 * Returns !%NULL on success, with @q->queue_lock *not held*.
 861 */
 862static struct request *__get_request(struct request_list *rl, int rw_flags,
 863                                     struct bio *bio, gfp_t gfp_mask)
 864{
 865        struct request_queue *q = rl->q;
 866        struct request *rq;
 867        struct elevator_type *et = q->elevator->type;
 868        struct io_context *ioc = rq_ioc(bio);
 869        struct io_cq *icq = NULL;
 870        const bool is_sync = rw_is_sync(rw_flags) != 0;
 871        int may_queue;
 872
 873        if (unlikely(blk_queue_dead(q)))
 874                return NULL;
 875
 876        may_queue = elv_may_queue(q, rw_flags);
 877        if (may_queue == ELV_MQUEUE_NO)
 878                goto rq_starved;
 879
 880        if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
 881                if (rl->count[is_sync]+1 >= q->nr_requests) {
 882                        /*
 883                         * The queue will fill after this allocation, so set
 884                         * it as full, and mark this process as "batching".
 885                         * This process will be allowed to complete a batch of
 886                         * requests, others will be blocked.
 887                         */
 888                        if (!blk_rl_full(rl, is_sync)) {
 889                                ioc_set_batching(q, ioc);
 890                                blk_set_rl_full(rl, is_sync);
 891                        } else {
 892                                if (may_queue != ELV_MQUEUE_MUST
 893                                                && !ioc_batching(q, ioc)) {
 894                                        /*
 895                                         * The queue is full and the allocating
 896                                         * process is not a "batcher", and not
 897                                         * exempted by the IO scheduler
 898                                         */
 899                                        return NULL;
 900                                }
 901                        }
 902                }
 903                /*
 904                 * bdi isn't aware of blkcg yet.  As all async IOs end up
 905                 * root blkcg anyway, just use root blkcg state.
 906                 */
 907                if (rl == &q->root_rl)
 908                        blk_set_queue_congested(q, is_sync);
 909        }
 910
 911        /*
 912         * Only allow batching queuers to allocate up to 50% over the defined
 913         * limit of requests, otherwise we could have thousands of requests
 914         * allocated with any setting of ->nr_requests
 915         */
 916        if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
 917                return NULL;
 918
 919        q->nr_rqs[is_sync]++;
 920        rl->count[is_sync]++;
 921        rl->starved[is_sync] = 0;
 922
 923        /*
 924         * Decide whether the new request will be managed by elevator.  If
 925         * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
 926         * prevent the current elevator from being destroyed until the new
 927         * request is freed.  This guarantees icq's won't be destroyed and
 928         * makes creating new ones safe.
 929         *
 930         * Also, lookup icq while holding queue_lock.  If it doesn't exist,
 931         * it will be created after releasing queue_lock.
 932         */
 933        if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
 934                rw_flags |= REQ_ELVPRIV;
 935                q->nr_rqs_elvpriv++;
 936                if (et->icq_cache && ioc)
 937                        icq = ioc_lookup_icq(ioc, q);
 938        }
 939
 940        if (blk_queue_io_stat(q))
 941                rw_flags |= REQ_IO_STAT;
 942        spin_unlock_irq(q->queue_lock);
 943
 944        /* allocate and init request */
 945        rq = mempool_alloc(rl->rq_pool, gfp_mask);
 946        if (!rq)
 947                goto fail_alloc;
 948
 949        blk_rq_init(q, rq);
 950        blk_rq_set_rl(rq, rl);
 951        rq->cmd_flags = rw_flags | REQ_ALLOCED;
 952
 953        /* init elvpriv */
 954        if (rw_flags & REQ_ELVPRIV) {
 955                if (unlikely(et->icq_cache && !icq)) {
 956                        if (ioc)
 957                                icq = ioc_create_icq(ioc, q, gfp_mask);
 958                        if (!icq)
 959                                goto fail_elvpriv;
 960                }
 961
 962                rq->elv.icq = icq;
 963                if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
 964                        goto fail_elvpriv;
 965
 966                /* @rq->elv.icq holds io_context until @rq is freed */
 967                if (icq)
 968                        get_io_context(icq->ioc);
 969        }
 970out:
 971        /*
 972         * ioc may be NULL here, and ioc_batching will be false. That's
 973         * OK, if the queue is under the request limit then requests need
 974         * not count toward the nr_batch_requests limit. There will always
 975         * be some limit enforced by BLK_BATCH_TIME.
 976         */
 977        if (ioc_batching(q, ioc))
 978                ioc->nr_batch_requests--;
 979
 980        trace_block_getrq(q, bio, rw_flags & 1);
 981        return rq;
 982
 983fail_elvpriv:
 984        /*
 985         * elvpriv init failed.  ioc, icq and elvpriv aren't mempool backed
 986         * and may fail indefinitely under memory pressure and thus
 987         * shouldn't stall IO.  Treat this request as !elvpriv.  This will
 988         * disturb iosched and blkcg but weird is bettern than dead.
 989         */
 990        printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
 991                           dev_name(q->backing_dev_info.dev));
 992
 993        rq->cmd_flags &= ~REQ_ELVPRIV;
 994        rq->elv.icq = NULL;
 995
 996        spin_lock_irq(q->queue_lock);
 997        q->nr_rqs_elvpriv--;
 998        spin_unlock_irq(q->queue_lock);
 999        goto out;
1000
1001fail_alloc:
1002        /*
1003         * Allocation failed presumably due to memory. Undo anything we
1004         * might have messed up.
1005         *
1006         * Allocating task should really be put onto the front of the wait
1007         * queue, but this is pretty rare.
1008         */
1009        spin_lock_irq(q->queue_lock);
1010        freed_request(rl, rw_flags);
1011
1012        /*
1013         * in the very unlikely event that allocation failed and no
1014         * requests for this direction was pending, mark us starved so that
1015         * freeing of a request in the other direction will notice
1016         * us. another possible fix would be to split the rq mempool into
1017         * READ and WRITE
1018         */
1019rq_starved:
1020        if (unlikely(rl->count[is_sync] == 0))
1021                rl->starved[is_sync] = 1;
1022        return NULL;
1023}
1024
1025/**
1026 * get_request - get a free request
1027 * @q: request_queue to allocate request from
1028 * @rw_flags: RW and SYNC flags
1029 * @bio: bio to allocate request for (can be %NULL)
1030 * @gfp_mask: allocation mask
1031 *
1032 * Get a free request from @q.  If %__GFP_WAIT is set in @gfp_mask, this
1033 * function keeps retrying under memory pressure and fails iff @q is dead.
1034 *
1035 * Must be callled with @q->queue_lock held and,
1036 * Returns %NULL on failure, with @q->queue_lock held.
1037 * Returns !%NULL on success, with @q->queue_lock *not held*.
1038 */
1039static struct request *get_request(struct request_queue *q, int rw_flags,
1040                                   struct bio *bio, gfp_t gfp_mask)
1041{
1042        const bool is_sync = rw_is_sync(rw_flags) != 0;
1043        DEFINE_WAIT(wait);
1044        struct request_list *rl;
1045        struct request *rq;
1046
1047        rl = blk_get_rl(q, bio);        /* transferred to @rq on success */
1048retry:
1049        rq = __get_request(rl, rw_flags, bio, gfp_mask);
1050        if (rq)
1051                return rq;
1052
1053        if (!(gfp_mask & __GFP_WAIT) || unlikely(blk_queue_dead(q))) {
1054                blk_put_rl(rl);
1055                return NULL;
1056        }
1057
1058        /* wait on @rl and retry */
1059        prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1060                                  TASK_UNINTERRUPTIBLE);
1061
1062        trace_block_sleeprq(q, bio, rw_flags & 1);
1063
1064        spin_unlock_irq(q->queue_lock);
1065        io_schedule();
1066
1067        /*
1068         * After sleeping, we become a "batching" process and will be able
1069         * to allocate at least one request, and up to a big batch of them
1070         * for a small period time.  See ioc_batching, ioc_set_batching
1071         */
1072        ioc_set_batching(q, current->io_context);
1073
1074        spin_lock_irq(q->queue_lock);
1075        finish_wait(&rl->wait[is_sync], &wait);
1076
1077        goto retry;
1078}
1079
1080struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1081{
1082        struct request *rq;
1083
1084        BUG_ON(rw != READ && rw != WRITE);
1085
1086        /* create ioc upfront */
1087        create_io_context(gfp_mask, q->node);
1088
1089        spin_lock_irq(q->queue_lock);
1090        rq = get_request(q, rw, NULL, gfp_mask);
1091        if (!rq)
1092                spin_unlock_irq(q->queue_lock);
1093        /* q->queue_lock is unlocked at this point */
1094
1095        return rq;
1096}
1097EXPORT_SYMBOL(blk_get_request);
1098
1099/**
1100 * blk_make_request - given a bio, allocate a corresponding struct request.
1101 * @q: target request queue
1102 * @bio:  The bio describing the memory mappings that will be submitted for IO.
1103 *        It may be a chained-bio properly constructed by block/bio layer.
1104 * @gfp_mask: gfp flags to be used for memory allocation
1105 *
1106 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1107 * type commands. Where the struct request needs to be farther initialized by
1108 * the caller. It is passed a &struct bio, which describes the memory info of
1109 * the I/O transfer.
1110 *
1111 * The caller of blk_make_request must make sure that bi_io_vec
1112 * are set to describe the memory buffers. That bio_data_dir() will return
1113 * the needed direction of the request. (And all bio's in the passed bio-chain
1114 * are properly set accordingly)
1115 *
1116 * If called under none-sleepable conditions, mapped bio buffers must not
1117 * need bouncing, by calling the appropriate masked or flagged allocator,
1118 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1119 * BUG.
1120 *
1121 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1122 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1123 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1124 * completion of a bio that hasn't been submitted yet, thus resulting in a
1125 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1126 * of bio_alloc(), as that avoids the mempool deadlock.
1127 * If possible a big IO should be split into smaller parts when allocation
1128 * fails. Partial allocation should not be an error, or you risk a live-lock.
1129 */
1130struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1131                                 gfp_t gfp_mask)
1132{
1133        struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1134
1135        if (unlikely(!rq))
1136                return ERR_PTR(-ENOMEM);
1137
1138        for_each_bio(bio) {
1139                struct bio *bounce_bio = bio;
1140                int ret;
1141
1142                blk_queue_bounce(q, &bounce_bio);
1143                ret = blk_rq_append_bio(q, rq, bounce_bio);
1144                if (unlikely(ret)) {
1145                        blk_put_request(rq);
1146                        return ERR_PTR(ret);
1147                }
1148        }
1149
1150        return rq;
1151}
1152EXPORT_SYMBOL(blk_make_request);
1153
1154/**
1155 * blk_requeue_request - put a request back on queue
1156 * @q:          request queue where request should be inserted
1157 * @rq:         request to be inserted
1158 *
1159 * Description:
1160 *    Drivers often keep queueing requests until the hardware cannot accept
1161 *    more, when that condition happens we need to put the request back
1162 *    on the queue. Must be called with queue lock held.
1163 */
1164void blk_requeue_request(struct request_queue *q, struct request *rq)
1165{
1166        blk_delete_timer(rq);
1167        blk_clear_rq_complete(rq);
1168        trace_block_rq_requeue(q, rq);
1169
1170        if (blk_rq_tagged(rq))
1171                blk_queue_end_tag(q, rq);
1172
1173        BUG_ON(blk_queued_rq(rq));
1174
1175        elv_requeue_request(q, rq);
1176}
1177EXPORT_SYMBOL(blk_requeue_request);
1178
1179static void add_acct_request(struct request_queue *q, struct request *rq,
1180                             int where)
1181{
1182        drive_stat_acct(rq, 1);
1183        __elv_add_request(q, rq, where);
1184}
1185
1186static void part_round_stats_single(int cpu, struct hd_struct *part,
1187                                    unsigned long now)
1188{
1189        if (now == part->stamp)
1190                return;
1191
1192        if (part_in_flight(part)) {
1193                __part_stat_add(cpu, part, time_in_queue,
1194                                part_in_flight(part) * (now - part->stamp));
1195                __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1196        }
1197        part->stamp = now;
1198}
1199
1200/**
1201 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1202 * @cpu: cpu number for stats access
1203 * @part: target partition
1204 *
1205 * The average IO queue length and utilisation statistics are maintained
1206 * by observing the current state of the queue length and the amount of
1207 * time it has been in this state for.
1208 *
1209 * Normally, that accounting is done on IO completion, but that can result
1210 * in more than a second's worth of IO being accounted for within any one
1211 * second, leading to >100% utilisation.  To deal with that, we call this
1212 * function to do a round-off before returning the results when reading
1213 * /proc/diskstats.  This accounts immediately for all queue usage up to
1214 * the current jiffies and restarts the counters again.
1215 */
1216void part_round_stats(int cpu, struct hd_struct *part)
1217{
1218        unsigned long now = jiffies;
1219
1220        if (part->partno)
1221                part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1222        part_round_stats_single(cpu, part, now);
1223}
1224EXPORT_SYMBOL_GPL(part_round_stats);
1225
1226/*
1227 * queue lock must be held
1228 */
1229void __blk_put_request(struct request_queue *q, struct request *req)
1230{
1231        if (unlikely(!q))
1232                return;
1233        if (unlikely(--req->ref_count))
1234                return;
1235
1236        elv_completed_request(q, req);
1237
1238        /* this is a bio leak */
1239        WARN_ON(req->bio != NULL);
1240
1241        /*
1242         * Request may not have originated from ll_rw_blk. if not,
1243         * it didn't come out of our reserved rq pools
1244         */
1245        if (req->cmd_flags & REQ_ALLOCED) {
1246                unsigned int flags = req->cmd_flags;
1247                struct request_list *rl = blk_rq_rl(req);
1248
1249                BUG_ON(!list_empty(&req->queuelist));
1250                BUG_ON(!hlist_unhashed(&req->hash));
1251
1252                blk_free_request(rl, req);
1253                freed_request(rl, flags);
1254                blk_put_rl(rl);
1255        }
1256}
1257EXPORT_SYMBOL_GPL(__blk_put_request);
1258
1259void blk_put_request(struct request *req)
1260{
1261        unsigned long flags;
1262        struct request_queue *q = req->q;
1263
1264        spin_lock_irqsave(q->queue_lock, flags);
1265        __blk_put_request(q, req);
1266        spin_unlock_irqrestore(q->queue_lock, flags);
1267}
1268EXPORT_SYMBOL(blk_put_request);
1269
1270/**
1271 * blk_add_request_payload - add a payload to a request
1272 * @rq: request to update
1273 * @page: page backing the payload
1274 * @len: length of the payload.
1275 *
1276 * This allows to later add a payload to an already submitted request by
1277 * a block driver.  The driver needs to take care of freeing the payload
1278 * itself.
1279 *
1280 * Note that this is a quite horrible hack and nothing but handling of
1281 * discard requests should ever use it.
1282 */
1283void blk_add_request_payload(struct request *rq, struct page *page,
1284                unsigned int len)
1285{
1286        struct bio *bio = rq->bio;
1287
1288        bio->bi_io_vec->bv_page = page;
1289        bio->bi_io_vec->bv_offset = 0;
1290        bio->bi_io_vec->bv_len = len;
1291
1292        bio->bi_size = len;
1293        bio->bi_vcnt = 1;
1294        bio->bi_phys_segments = 1;
1295
1296        rq->__data_len = rq->resid_len = len;
1297        rq->nr_phys_segments = 1;
1298        rq->buffer = bio_data(bio);
1299}
1300EXPORT_SYMBOL_GPL(blk_add_request_payload);
1301
1302static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1303                                   struct bio *bio)
1304{
1305        const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1306
1307        if (!ll_back_merge_fn(q, req, bio))
1308                return false;
1309
1310        trace_block_bio_backmerge(q, bio);
1311
1312        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1313                blk_rq_set_mixed_merge(req);
1314
1315        req->biotail->bi_next = bio;
1316        req->biotail = bio;
1317        req->__data_len += bio->bi_size;
1318        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1319
1320        drive_stat_acct(req, 0);
1321        return true;
1322}
1323
1324static bool bio_attempt_front_merge(struct request_queue *q,
1325                                    struct request *req, struct bio *bio)
1326{
1327        const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1328
1329        if (!ll_front_merge_fn(q, req, bio))
1330                return false;
1331
1332        trace_block_bio_frontmerge(q, bio);
1333
1334        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1335                blk_rq_set_mixed_merge(req);
1336
1337        bio->bi_next = req->bio;
1338        req->bio = bio;
1339
1340        /*
1341         * may not be valid. if the low level driver said
1342         * it didn't need a bounce buffer then it better
1343         * not touch req->buffer either...
1344         */
1345        req->buffer = bio_data(bio);
1346        req->__sector = bio->bi_sector;
1347        req->__data_len += bio->bi_size;
1348        req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1349
1350        drive_stat_acct(req, 0);
1351        return true;
1352}
1353
1354/**
1355 * attempt_plug_merge - try to merge with %current's plugged list
1356 * @q: request_queue new bio is being queued at
1357 * @bio: new bio being queued
1358 * @request_count: out parameter for number of traversed plugged requests
1359 *
1360 * Determine whether @bio being queued on @q can be merged with a request
1361 * on %current's plugged list.  Returns %true if merge was successful,
1362 * otherwise %false.
1363 *
1364 * Plugging coalesces IOs from the same issuer for the same purpose without
1365 * going through @q->queue_lock.  As such it's more of an issuing mechanism
1366 * than scheduling, and the request, while may have elvpriv data, is not
1367 * added on the elevator at this point.  In addition, we don't have
1368 * reliable access to the elevator outside queue lock.  Only check basic
1369 * merging parameters without querying the elevator.
1370 */
1371static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1372                               unsigned int *request_count)
1373{
1374        struct blk_plug *plug;
1375        struct request *rq;
1376        bool ret = false;
1377
1378        plug = current->plug;
1379        if (!plug)
1380                goto out;
1381        *request_count = 0;
1382
1383        list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1384                int el_ret;
1385
1386                if (rq->q == q)
1387                        (*request_count)++;
1388
1389                if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1390                        continue;
1391
1392                el_ret = blk_try_merge(rq, bio);
1393                if (el_ret == ELEVATOR_BACK_MERGE) {
1394                        ret = bio_attempt_back_merge(q, rq, bio);
1395                        if (ret)
1396                                break;
1397                } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1398                        ret = bio_attempt_front_merge(q, rq, bio);
1399                        if (ret)
1400                                break;
1401                }
1402        }
1403out:
1404        return ret;
1405}
1406
1407void init_request_from_bio(struct request *req, struct bio *bio)
1408{
1409        req->cmd_type = REQ_TYPE_FS;
1410
1411        req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1412        if (bio->bi_rw & REQ_RAHEAD)
1413                req->cmd_flags |= REQ_FAILFAST_MASK;
1414
1415        req->errors = 0;
1416        req->__sector = bio->bi_sector;
1417        req->ioprio = bio_prio(bio);
1418        blk_rq_bio_prep(req->q, req, bio);
1419}
1420
1421void blk_queue_bio(struct request_queue *q, struct bio *bio)
1422{
1423        const bool sync = !!(bio->bi_rw & REQ_SYNC);
1424        struct blk_plug *plug;
1425        int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1426        struct request *req;
1427        unsigned int request_count = 0;
1428
1429        /*
1430         * low level driver can indicate that it wants pages above a
1431         * certain limit bounced to low memory (ie for highmem, or even
1432         * ISA dma in theory)
1433         */
1434        blk_queue_bounce(q, &bio);
1435
1436        if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1437                spin_lock_irq(q->queue_lock);
1438                where = ELEVATOR_INSERT_FLUSH;
1439                goto get_rq;
1440        }
1441
1442        /*
1443         * Check if we can merge with the plugged list before grabbing
1444         * any locks.
1445         */
1446        if (attempt_plug_merge(q, bio, &request_count))
1447                return;
1448
1449        spin_lock_irq(q->queue_lock);
1450
1451        el_ret = elv_merge(q, &req, bio);
1452        if (el_ret == ELEVATOR_BACK_MERGE) {
1453                if (bio_attempt_back_merge(q, req, bio)) {
1454                        elv_bio_merged(q, req, bio);
1455                        if (!attempt_back_merge(q, req))
1456                                elv_merged_request(q, req, el_ret);
1457                        goto out_unlock;
1458                }
1459        } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1460                if (bio_attempt_front_merge(q, req, bio)) {
1461                        elv_bio_merged(q, req, bio);
1462                        if (!attempt_front_merge(q, req))
1463                                elv_merged_request(q, req, el_ret);
1464                        goto out_unlock;
1465                }
1466        }
1467
1468get_rq:
1469        /*
1470         * This sync check and mask will be re-done in init_request_from_bio(),
1471         * but we need to set it earlier to expose the sync flag to the
1472         * rq allocator and io schedulers.
1473         */
1474        rw_flags = bio_data_dir(bio);
1475        if (sync)
1476                rw_flags |= REQ_SYNC;
1477
1478        /*
1479         * Grab a free request. This is might sleep but can not fail.
1480         * Returns with the queue unlocked.
1481         */
1482        req = get_request(q, rw_flags, bio, GFP_NOIO);
1483        if (unlikely(!req)) {
1484                bio_endio(bio, -ENODEV);        /* @q is dead */
1485                goto out_unlock;
1486        }
1487
1488        /*
1489         * After dropping the lock and possibly sleeping here, our request
1490         * may now be mergeable after it had proven unmergeable (above).
1491         * We don't worry about that case for efficiency. It won't happen
1492         * often, and the elevators are able to handle it.
1493         */
1494        init_request_from_bio(req, bio);
1495
1496        if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1497                req->cpu = raw_smp_processor_id();
1498
1499        plug = current->plug;
1500        if (plug) {
1501                /*
1502                 * If this is the first request added after a plug, fire
1503                 * of a plug trace. If others have been added before, check
1504                 * if we have multiple devices in this plug. If so, make a
1505                 * note to sort the list before dispatch.
1506                 */
1507                if (list_empty(&plug->list))
1508                        trace_block_plug(q);
1509                else {
1510                        if (!plug->should_sort) {
1511                                struct request *__rq;
1512
1513                                __rq = list_entry_rq(plug->list.prev);
1514                                if (__rq->q != q)
1515                                        plug->should_sort = 1;
1516                        }
1517                        if (request_count >= BLK_MAX_REQUEST_COUNT) {
1518                                blk_flush_plug_list(plug, false);
1519                                trace_block_plug(q);
1520                        }
1521                }
1522                list_add_tail(&req->queuelist, &plug->list);
1523                drive_stat_acct(req, 1);
1524        } else {
1525                spin_lock_irq(q->queue_lock);
1526                add_acct_request(q, req, where);
1527                __blk_run_queue(q);
1528out_unlock:
1529                spin_unlock_irq(q->queue_lock);
1530        }
1531}
1532EXPORT_SYMBOL_GPL(blk_queue_bio);       /* for device mapper only */
1533
1534/*
1535 * If bio->bi_dev is a partition, remap the location
1536 */
1537static inline void blk_partition_remap(struct bio *bio)
1538{
1539        struct block_device *bdev = bio->bi_bdev;
1540
1541        if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1542                struct hd_struct *p = bdev->bd_part;
1543
1544                bio->bi_sector += p->start_sect;
1545                bio->bi_bdev = bdev->bd_contains;
1546
1547                trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1548                                      bdev->bd_dev,
1549                                      bio->bi_sector - p->start_sect);
1550        }
1551}
1552
1553static void handle_bad_sector(struct bio *bio)
1554{
1555        char b[BDEVNAME_SIZE];
1556
1557        printk(KERN_INFO "attempt to access beyond end of device\n");
1558        printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1559                        bdevname(bio->bi_bdev, b),
1560                        bio->bi_rw,
1561                        (unsigned long long)bio->bi_sector + bio_sectors(bio),
1562                        (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1563
1564        set_bit(BIO_EOF, &bio->bi_flags);
1565}
1566
1567#ifdef CONFIG_FAIL_MAKE_REQUEST
1568
1569static DECLARE_FAULT_ATTR(fail_make_request);
1570
1571static int __init setup_fail_make_request(char *str)
1572{
1573        return setup_fault_attr(&fail_make_request, str);
1574}
1575__setup("fail_make_request=", setup_fail_make_request);
1576
1577static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1578{
1579        return part->make_it_fail && should_fail(&fail_make_request, bytes);
1580}
1581
1582static int __init fail_make_request_debugfs(void)
1583{
1584        struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1585                                                NULL, &fail_make_request);
1586
1587        return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1588}
1589
1590late_initcall(fail_make_request_debugfs);
1591
1592#else /* CONFIG_FAIL_MAKE_REQUEST */
1593
1594static inline bool should_fail_request(struct hd_struct *part,
1595                                        unsigned int bytes)
1596{
1597        return false;
1598}
1599
1600#endif /* CONFIG_FAIL_MAKE_REQUEST */
1601
1602/*
1603 * Check whether this bio extends beyond the end of the device.
1604 */
1605static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1606{
1607        sector_t maxsector;
1608
1609        if (!nr_sectors)
1610                return 0;
1611
1612        /* Test device or partition size, when known. */
1613        maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1614        if (maxsector) {
1615                sector_t sector = bio->bi_sector;
1616
1617                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1618                        /*
1619                         * This may well happen - the kernel calls bread()
1620                         * without checking the size of the device, e.g., when
1621                         * mounting a device.
1622                         */
1623                        handle_bad_sector(bio);
1624                        return 1;
1625                }
1626        }
1627
1628        return 0;
1629}
1630
1631static noinline_for_stack bool
1632generic_make_request_checks(struct bio *bio)
1633{
1634        struct request_queue *q;
1635        int nr_sectors = bio_sectors(bio);
1636        int err = -EIO;
1637        char b[BDEVNAME_SIZE];
1638        struct hd_struct *part;
1639
1640        might_sleep();
1641
1642        if (bio_check_eod(bio, nr_sectors))
1643                goto end_io;
1644
1645        q = bdev_get_queue(bio->bi_bdev);
1646        if (unlikely(!q)) {
1647                printk(KERN_ERR
1648                       "generic_make_request: Trying to access "
1649                        "nonexistent block-device %s (%Lu)\n",
1650                        bdevname(bio->bi_bdev, b),
1651                        (long long) bio->bi_sector);
1652                goto end_io;
1653        }
1654
1655        if (likely(bio_is_rw(bio) &&
1656                   nr_sectors > queue_max_hw_sectors(q))) {
1657                printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1658                       bdevname(bio->bi_bdev, b),
1659                       bio_sectors(bio),
1660                       queue_max_hw_sectors(q));
1661                goto end_io;
1662        }
1663
1664        part = bio->bi_bdev->bd_part;
1665        if (should_fail_request(part, bio->bi_size) ||
1666            should_fail_request(&part_to_disk(part)->part0,
1667                                bio->bi_size))
1668                goto end_io;
1669
1670        /*
1671         * If this device has partitions, remap block n
1672         * of partition p to block n+start(p) of the disk.
1673         */
1674        blk_partition_remap(bio);
1675
1676        if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1677                goto end_io;
1678
1679        if (bio_check_eod(bio, nr_sectors))
1680                goto end_io;
1681
1682        /*
1683         * Filter flush bio's early so that make_request based
1684         * drivers without flush support don't have to worry
1685         * about them.
1686         */
1687        if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1688                bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1689                if (!nr_sectors) {
1690                        err = 0;
1691                        goto end_io;
1692                }
1693        }
1694
1695        if ((bio->bi_rw & REQ_DISCARD) &&
1696            (!blk_queue_discard(q) ||
1697             ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
1698                err = -EOPNOTSUPP;
1699                goto end_io;
1700        }
1701
1702        if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
1703                err = -EOPNOTSUPP;
1704                goto end_io;
1705        }
1706
1707        /*
1708         * Various block parts want %current->io_context and lazy ioc
1709         * allocation ends up trading a lot of pain for a small amount of
1710         * memory.  Just allocate it upfront.  This may fail and block
1711         * layer knows how to live with it.
1712         */
1713        create_io_context(GFP_ATOMIC, q->node);
1714
1715        if (blk_throtl_bio(q, bio))
1716                return false;   /* throttled, will be resubmitted later */
1717
1718        trace_block_bio_queue(q, bio);
1719        return true;
1720
1721end_io:
1722        bio_endio(bio, err);
1723        return false;
1724}
1725
1726/**
1727 * generic_make_request - hand a buffer to its device driver for I/O
1728 * @bio:  The bio describing the location in memory and on the device.
1729 *
1730 * generic_make_request() is used to make I/O requests of block
1731 * devices. It is passed a &struct bio, which describes the I/O that needs
1732 * to be done.
1733 *
1734 * generic_make_request() does not return any status.  The
1735 * success/failure status of the request, along with notification of
1736 * completion, is delivered asynchronously through the bio->bi_end_io
1737 * function described (one day) else where.
1738 *
1739 * The caller of generic_make_request must make sure that bi_io_vec
1740 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1741 * set to describe the device address, and the
1742 * bi_end_io and optionally bi_private are set to describe how
1743 * completion notification should be signaled.
1744 *
1745 * generic_make_request and the drivers it calls may use bi_next if this
1746 * bio happens to be merged with someone else, and may resubmit the bio to
1747 * a lower device by calling into generic_make_request recursively, which
1748 * means the bio should NOT be touched after the call to ->make_request_fn.
1749 */
1750void generic_make_request(struct bio *bio)
1751{
1752        struct bio_list bio_list_on_stack;
1753
1754        if (!generic_make_request_checks(bio))
1755                return;
1756
1757        /*
1758         * We only want one ->make_request_fn to be active at a time, else
1759         * stack usage with stacked devices could be a problem.  So use
1760         * current->bio_list to keep a list of requests submited by a
1761         * make_request_fn function.  current->bio_list is also used as a
1762         * flag to say if generic_make_request is currently active in this
1763         * task or not.  If it is NULL, then no make_request is active.  If
1764         * it is non-NULL, then a make_request is active, and new requests
1765         * should be added at the tail
1766         */
1767        if (current->bio_list) {
1768                bio_list_add(current->bio_list, bio);
1769                return;
1770        }
1771
1772        /* following loop may be a bit non-obvious, and so deserves some
1773         * explanation.
1774         * Before entering the loop, bio->bi_next is NULL (as all callers
1775         * ensure that) so we have a list with a single bio.
1776         * We pretend that we have just taken it off a longer list, so
1777         * we assign bio_list to a pointer to the bio_list_on_stack,
1778         * thus initialising the bio_list of new bios to be
1779         * added.  ->make_request() may indeed add some more bios
1780         * through a recursive call to generic_make_request.  If it
1781         * did, we find a non-NULL value in bio_list and re-enter the loop
1782         * from the top.  In this case we really did just take the bio
1783         * of the top of the list (no pretending) and so remove it from
1784         * bio_list, and call into ->make_request() again.
1785         */
1786        BUG_ON(bio->bi_next);
1787        bio_list_init(&bio_list_on_stack);
1788        current->bio_list = &bio_list_on_stack;
1789        do {
1790                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1791
1792                q->make_request_fn(q, bio);
1793
1794                bio = bio_list_pop(current->bio_list);
1795        } while (bio);
1796        current->bio_list = NULL; /* deactivate */
1797}
1798EXPORT_SYMBOL(generic_make_request);
1799
1800/**
1801 * submit_bio - submit a bio to the block device layer for I/O
1802 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1803 * @bio: The &struct bio which describes the I/O
1804 *
1805 * submit_bio() is very similar in purpose to generic_make_request(), and
1806 * uses that function to do most of the work. Both are fairly rough
1807 * interfaces; @bio must be presetup and ready for I/O.
1808 *
1809 */
1810void submit_bio(int rw, struct bio *bio)
1811{
1812        bio->bi_rw |= rw;
1813
1814        /*
1815         * If it's a regular read/write or a barrier with data attached,
1816         * go through the normal accounting stuff before submission.
1817         */
1818        if (bio_has_data(bio)) {
1819                unsigned int count;
1820
1821                if (unlikely(rw & REQ_WRITE_SAME))
1822                        count = bdev_logical_block_size(bio->bi_bdev) >> 9;
1823                else
1824                        count = bio_sectors(bio);
1825
1826                if (rw & WRITE) {
1827                        count_vm_events(PGPGOUT, count);
1828                } else {
1829                        task_io_account_read(bio->bi_size);
1830                        count_vm_events(PGPGIN, count);
1831                }
1832
1833                if (unlikely(block_dump)) {
1834                        char b[BDEVNAME_SIZE];
1835                        printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1836                        current->comm, task_pid_nr(current),
1837                                (rw & WRITE) ? "WRITE" : "READ",
1838                                (unsigned long long)bio->bi_sector,
1839                                bdevname(bio->bi_bdev, b),
1840                                count);
1841                }
1842        }
1843
1844        generic_make_request(bio);
1845}
1846EXPORT_SYMBOL(submit_bio);
1847
1848/**
1849 * blk_rq_check_limits - Helper function to check a request for the queue limit
1850 * @q:  the queue
1851 * @rq: the request being checked
1852 *
1853 * Description:
1854 *    @rq may have been made based on weaker limitations of upper-level queues
1855 *    in request stacking drivers, and it may violate the limitation of @q.
1856 *    Since the block layer and the underlying device driver trust @rq
1857 *    after it is inserted to @q, it should be checked against @q before
1858 *    the insertion using this generic function.
1859 *
1860 *    This function should also be useful for request stacking drivers
1861 *    in some cases below, so export this function.
1862 *    Request stacking drivers like request-based dm may change the queue
1863 *    limits while requests are in the queue (e.g. dm's table swapping).
1864 *    Such request stacking drivers should check those requests agaist
1865 *    the new queue limits again when they dispatch those requests,
1866 *    although such checkings are also done against the old queue limits
1867 *    when submitting requests.
1868 */
1869int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1870{
1871        if (!rq_mergeable(rq))
1872                return 0;
1873
1874        if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
1875                printk(KERN_ERR "%s: over max size limit.\n", __func__);
1876                return -EIO;
1877        }
1878
1879        /*
1880         * queue's settings related to segment counting like q->bounce_pfn
1881         * may differ from that of other stacking queues.
1882         * Recalculate it to check the request correctly on this queue's
1883         * limitation.
1884         */
1885        blk_recalc_rq_segments(rq);
1886        if (rq->nr_phys_segments > queue_max_segments(q)) {
1887                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1888                return -EIO;
1889        }
1890
1891        return 0;
1892}
1893EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1894
1895/**
1896 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1897 * @q:  the queue to submit the request
1898 * @rq: the request being queued
1899 */
1900int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1901{
1902        unsigned long flags;
1903        int where = ELEVATOR_INSERT_BACK;
1904
1905        if (blk_rq_check_limits(q, rq))
1906                return -EIO;
1907
1908        if (rq->rq_disk &&
1909            should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1910                return -EIO;
1911
1912        spin_lock_irqsave(q->queue_lock, flags);
1913        if (unlikely(blk_queue_dead(q))) {
1914                spin_unlock_irqrestore(q->queue_lock, flags);
1915                return -ENODEV;
1916        }
1917
1918        /*
1919         * Submitting request must be dequeued before calling this function
1920         * because it will be linked to another request_queue
1921         */
1922        BUG_ON(blk_queued_rq(rq));
1923
1924        if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1925                where = ELEVATOR_INSERT_FLUSH;
1926
1927        add_acct_request(q, rq, where);
1928        if (where == ELEVATOR_INSERT_FLUSH)
1929                __blk_run_queue(q);
1930        spin_unlock_irqrestore(q->queue_lock, flags);
1931
1932        return 0;
1933}
1934EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1935
1936/**
1937 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1938 * @rq: request to examine
1939 *
1940 * Description:
1941 *     A request could be merge of IOs which require different failure
1942 *     handling.  This function determines the number of bytes which
1943 *     can be failed from the beginning of the request without
1944 *     crossing into area which need to be retried further.
1945 *
1946 * Return:
1947 *     The number of bytes to fail.
1948 *
1949 * Context:
1950 *     queue_lock must be held.
1951 */
1952unsigned int blk_rq_err_bytes(const struct request *rq)
1953{
1954        unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1955        unsigned int bytes = 0;
1956        struct bio *bio;
1957
1958        if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1959                return blk_rq_bytes(rq);
1960
1961        /*
1962         * Currently the only 'mixing' which can happen is between
1963         * different fastfail types.  We can safely fail portions
1964         * which have all the failfast bits that the first one has -
1965         * the ones which are at least as eager to fail as the first
1966         * one.
1967         */
1968        for (bio = rq->bio; bio; bio = bio->bi_next) {
1969                if ((bio->bi_rw & ff) != ff)
1970                        break;
1971                bytes += bio->bi_size;
1972        }
1973
1974        /* this could lead to infinite loop */
1975        BUG_ON(blk_rq_bytes(rq) && !bytes);
1976        return bytes;
1977}
1978EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1979
1980static void blk_account_io_completion(struct request *req, unsigned int bytes)
1981{
1982        if (blk_do_io_stat(req)) {
1983                const int rw = rq_data_dir(req);
1984                struct hd_struct *part;
1985                int cpu;
1986
1987                cpu = part_stat_lock();
1988                part = req->part;
1989                part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1990                part_stat_unlock();
1991        }
1992}
1993
1994static void blk_account_io_done(struct request *req)
1995{
1996        /*
1997         * Account IO completion.  flush_rq isn't accounted as a
1998         * normal IO on queueing nor completion.  Accounting the
1999         * containing request is enough.
2000         */
2001        if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
2002                unsigned long duration = jiffies - req->start_time;
2003                const int rw = rq_data_dir(req);
2004                struct hd_struct *part;
2005                int cpu;
2006
2007                cpu = part_stat_lock();
2008                part = req->part;
2009
2010                part_stat_inc(cpu, part, ios[rw]);
2011                part_stat_add(cpu, part, ticks[rw], duration);
2012                part_round_stats(cpu, part);
2013                part_dec_in_flight(part, rw);
2014
2015                hd_struct_put(part);
2016                part_stat_unlock();
2017        }
2018}
2019
2020/**
2021 * blk_peek_request - peek at the top of a request queue
2022 * @q: request queue to peek at
2023 *
2024 * Description:
2025 *     Return the request at the top of @q.  The returned request
2026 *     should be started using blk_start_request() before LLD starts
2027 *     processing it.
2028 *
2029 * Return:
2030 *     Pointer to the request at the top of @q if available.  Null
2031 *     otherwise.
2032 *
2033 * Context:
2034 *     queue_lock must be held.
2035 */
2036struct request *blk_peek_request(struct request_queue *q)
2037{
2038        struct request *rq;
2039        int ret;
2040
2041        while ((rq = __elv_next_request(q)) != NULL) {
2042                if (!(rq->cmd_flags & REQ_STARTED)) {
2043                        /*
2044                         * This is the first time the device driver
2045                         * sees this request (possibly after
2046                         * requeueing).  Notify IO scheduler.
2047                         */
2048                        if (rq->cmd_flags & REQ_SORTED)
2049                                elv_activate_rq(q, rq);
2050
2051                        /*
2052                         * just mark as started even if we don't start
2053                         * it, a request that has been delayed should
2054                         * not be passed by new incoming requests
2055                         */
2056                        rq->cmd_flags |= REQ_STARTED;
2057                        trace_block_rq_issue(q, rq);
2058                }
2059
2060                if (!q->boundary_rq || q->boundary_rq == rq) {
2061                        q->end_sector = rq_end_sector(rq);
2062                        q->boundary_rq = NULL;
2063                }
2064
2065                if (rq->cmd_flags & REQ_DONTPREP)
2066                        break;
2067
2068                if (q->dma_drain_size && blk_rq_bytes(rq)) {
2069                        /*
2070                         * make sure space for the drain appears we
2071                         * know we can do this because max_hw_segments
2072                         * has been adjusted to be one fewer than the
2073                         * device can handle
2074                         */
2075                        rq->nr_phys_segments++;
2076                }
2077
2078                if (!q->prep_rq_fn)
2079                        break;
2080
2081                ret = q->prep_rq_fn(q, rq);
2082                if (ret == BLKPREP_OK) {
2083                        break;
2084                } else if (ret == BLKPREP_DEFER) {
2085                        /*
2086                         * the request may have been (partially) prepped.
2087                         * we need to keep this request in the front to
2088                         * avoid resource deadlock.  REQ_STARTED will
2089                         * prevent other fs requests from passing this one.
2090                         */
2091                        if (q->dma_drain_size && blk_rq_bytes(rq) &&
2092                            !(rq->cmd_flags & REQ_DONTPREP)) {
2093                                /*
2094                                 * remove the space for the drain we added
2095                                 * so that we don't add it again
2096                                 */
2097                                --rq->nr_phys_segments;
2098                        }
2099
2100                        rq = NULL;
2101                        break;
2102                } else if (ret == BLKPREP_KILL) {
2103                        rq->cmd_flags |= REQ_QUIET;
2104                        /*
2105                         * Mark this request as started so we don't trigger
2106                         * any debug logic in the end I/O path.
2107                         */
2108                        blk_start_request(rq);
2109                        __blk_end_request_all(rq, -EIO);
2110                } else {
2111                        printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2112                        break;
2113                }
2114        }
2115
2116        return rq;
2117}
2118EXPORT_SYMBOL(blk_peek_request);
2119
2120void blk_dequeue_request(struct request *rq)
2121{
2122        struct request_queue *q = rq->q;
2123
2124        BUG_ON(list_empty(&rq->queuelist));
2125        BUG_ON(ELV_ON_HASH(rq));
2126
2127        list_del_init(&rq->queuelist);
2128
2129        /*
2130         * the time frame between a request being removed from the lists
2131         * and to it is freed is accounted as io that is in progress at
2132         * the driver side.
2133         */
2134        if (blk_account_rq(rq)) {
2135                q->in_flight[rq_is_sync(rq)]++;
2136                set_io_start_time_ns(rq);
2137        }
2138}
2139
2140/**
2141 * blk_start_request - start request processing on the driver
2142 * @req: request to dequeue
2143 *
2144 * Description:
2145 *     Dequeue @req and start timeout timer on it.  This hands off the
2146 *     request to the driver.
2147 *
2148 *     Block internal functions which don't want to start timer should
2149 *     call blk_dequeue_request().
2150 *
2151 * Context:
2152 *     queue_lock must be held.
2153 */
2154void blk_start_request(struct request *req)
2155{
2156        blk_dequeue_request(req);
2157
2158        /*
2159         * We are now handing the request to the hardware, initialize
2160         * resid_len to full count and add the timeout handler.
2161         */
2162        req->resid_len = blk_rq_bytes(req);
2163        if (unlikely(blk_bidi_rq(req)))
2164                req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2165
2166        blk_add_timer(req);
2167}
2168EXPORT_SYMBOL(blk_start_request);
2169
2170/**
2171 * blk_fetch_request - fetch a request from a request queue
2172 * @q: request queue to fetch a request from
2173 *
2174 * Description:
2175 *     Return the request at the top of @q.  The request is started on
2176 *     return and LLD can start processing it immediately.
2177 *
2178 * Return:
2179 *     Pointer to the request at the top of @q if available.  Null
2180 *     otherwise.
2181 *
2182 * Context:
2183 *     queue_lock must be held.
2184 */
2185struct request *blk_fetch_request(struct request_queue *q)
2186{
2187        struct request *rq;
2188
2189        rq = blk_peek_request(q);
2190        if (rq)
2191                blk_start_request(rq);
2192        return rq;
2193}
2194EXPORT_SYMBOL(blk_fetch_request);
2195
2196/**
2197 * blk_update_request - Special helper function for request stacking drivers
2198 * @req:      the request being processed
2199 * @error:    %0 for success, < %0 for error
2200 * @nr_bytes: number of bytes to complete @req
2201 *
2202 * Description:
2203 *     Ends I/O on a number of bytes attached to @req, but doesn't complete
2204 *     the request structure even if @req doesn't have leftover.
2205 *     If @req has leftover, sets it up for the next range of segments.
2206 *
2207 *     This special helper function is only for request stacking drivers
2208 *     (e.g. request-based dm) so that they can handle partial completion.
2209 *     Actual device drivers should use blk_end_request instead.
2210 *
2211 *     Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2212 *     %false return from this function.
2213 *
2214 * Return:
2215 *     %false - this request doesn't have any more data
2216 *     %true  - this request has more data
2217 **/
2218bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2219{
2220        int total_bytes, bio_nbytes, next_idx = 0;
2221        struct bio *bio;
2222
2223        if (!req->bio)
2224                return false;
2225
2226        trace_block_rq_complete(req->q, req);
2227
2228        /*
2229         * For fs requests, rq is just carrier of independent bio's
2230         * and each partial completion should be handled separately.
2231         * Reset per-request error on each partial completion.
2232         *
2233         * TODO: tj: This is too subtle.  It would be better to let
2234         * low level drivers do what they see fit.
2235         */
2236        if (req->cmd_type == REQ_TYPE_FS)
2237                req->errors = 0;
2238
2239        if (error && req->cmd_type == REQ_TYPE_FS &&
2240            !(req->cmd_flags & REQ_QUIET)) {
2241                char *error_type;
2242
2243                switch (error) {
2244                case -ENOLINK:
2245                        error_type = "recoverable transport";
2246                        break;
2247                case -EREMOTEIO:
2248                        error_type = "critical target";
2249                        break;
2250                case -EBADE:
2251                        error_type = "critical nexus";
2252                        break;
2253                case -EIO:
2254                default:
2255                        error_type = "I/O";
2256                        break;
2257                }
2258                printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2259                                   error_type, req->rq_disk ?
2260                                   req->rq_disk->disk_name : "?",
2261                                   (unsigned long long)blk_rq_pos(req));
2262
2263        }
2264
2265        blk_account_io_completion(req, nr_bytes);
2266
2267        total_bytes = bio_nbytes = 0;
2268        while ((bio = req->bio) != NULL) {
2269                int nbytes;
2270
2271                if (nr_bytes >= bio->bi_size) {
2272                        req->bio = bio->bi_next;
2273                        nbytes = bio->bi_size;
2274                        req_bio_endio(req, bio, nbytes, error);
2275                        next_idx = 0;
2276                        bio_nbytes = 0;
2277                } else {
2278                        int idx = bio->bi_idx + next_idx;
2279
2280                        if (unlikely(idx >= bio->bi_vcnt)) {
2281                                blk_dump_rq_flags(req, "__end_that");
2282                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2283                                       __func__, idx, bio->bi_vcnt);
2284                                break;
2285                        }
2286
2287                        nbytes = bio_iovec_idx(bio, idx)->bv_len;
2288                        BIO_BUG_ON(nbytes > bio->bi_size);
2289
2290                        /*
2291                         * not a complete bvec done
2292                         */
2293                        if (unlikely(nbytes > nr_bytes)) {
2294                                bio_nbytes += nr_bytes;
2295                                total_bytes += nr_bytes;
2296                                break;
2297                        }
2298
2299                        /*
2300                         * advance to the next vector
2301                         */
2302                        next_idx++;
2303                        bio_nbytes += nbytes;
2304                }
2305
2306                total_bytes += nbytes;
2307                nr_bytes -= nbytes;
2308
2309                bio = req->bio;
2310                if (bio) {
2311                        /*
2312                         * end more in this run, or just return 'not-done'
2313                         */
2314                        if (unlikely(nr_bytes <= 0))
2315                                break;
2316                }
2317        }
2318
2319        /*
2320         * completely done
2321         */
2322        if (!req->bio) {
2323                /*
2324                 * Reset counters so that the request stacking driver
2325                 * can find how many bytes remain in the request
2326                 * later.
2327                 */
2328                req->__data_len = 0;
2329                return false;
2330        }
2331
2332        /*
2333         * if the request wasn't completed, update state
2334         */
2335        if (bio_nbytes) {
2336                req_bio_endio(req, bio, bio_nbytes, error);
2337                bio->bi_idx += next_idx;
2338                bio_iovec(bio)->bv_offset += nr_bytes;
2339                bio_iovec(bio)->bv_len -= nr_bytes;
2340        }
2341
2342        req->__data_len -= total_bytes;
2343        req->buffer = bio_data(req->bio);
2344
2345        /* update sector only for requests with clear definition of sector */
2346        if (req->cmd_type == REQ_TYPE_FS)
2347                req->__sector += total_bytes >> 9;
2348
2349        /* mixed attributes always follow the first bio */
2350        if (req->cmd_flags & REQ_MIXED_MERGE) {
2351                req->cmd_flags &= ~REQ_FAILFAST_MASK;
2352                req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2353        }
2354
2355        /*
2356         * If total number of sectors is less than the first segment
2357         * size, something has gone terribly wrong.
2358         */
2359        if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2360                blk_dump_rq_flags(req, "request botched");
2361                req->__data_len = blk_rq_cur_bytes(req);
2362        }
2363
2364        /* recalculate the number of segments */
2365        blk_recalc_rq_segments(req);
2366
2367        return true;
2368}
2369EXPORT_SYMBOL_GPL(blk_update_request);
2370
2371static bool blk_update_bidi_request(struct request *rq, int error,
2372                                    unsigned int nr_bytes,
2373                                    unsigned int bidi_bytes)
2374{
2375        if (blk_update_request(rq, error, nr_bytes))
2376                return true;
2377
2378        /* Bidi request must be completed as a whole */
2379        if (unlikely(blk_bidi_rq(rq)) &&
2380            blk_update_request(rq->next_rq, error, bidi_bytes))
2381                return true;
2382
2383        if (blk_queue_add_random(rq->q))
2384                add_disk_randomness(rq->rq_disk);
2385
2386        return false;
2387}
2388
2389/**
2390 * blk_unprep_request - unprepare a request
2391 * @req:        the request
2392 *
2393 * This function makes a request ready for complete resubmission (or
2394 * completion).  It happens only after all error handling is complete,
2395 * so represents the appropriate moment to deallocate any resources
2396 * that were allocated to the request in the prep_rq_fn.  The queue
2397 * lock is held when calling this.
2398 */
2399void blk_unprep_request(struct request *req)
2400{
2401        struct request_queue *q = req->q;
2402
2403        req->cmd_flags &= ~REQ_DONTPREP;
2404        if (q->unprep_rq_fn)
2405                q->unprep_rq_fn(q, req);
2406}
2407EXPORT_SYMBOL_GPL(blk_unprep_request);
2408
2409/*
2410 * queue lock must be held
2411 */
2412static void blk_finish_request(struct request *req, int error)
2413{
2414        if (blk_rq_tagged(req))
2415                blk_queue_end_tag(req->q, req);
2416
2417        BUG_ON(blk_queued_rq(req));
2418
2419        if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2420                laptop_io_completion(&req->q->backing_dev_info);
2421
2422        blk_delete_timer(req);
2423
2424        if (req->cmd_flags & REQ_DONTPREP)
2425                blk_unprep_request(req);
2426
2427
2428        blk_account_io_done(req);
2429
2430        if (req->end_io)
2431                req->end_io(req, error);
2432        else {
2433                if (blk_bidi_rq(req))
2434                        __blk_put_request(req->next_rq->q, req->next_rq);
2435
2436                __blk_put_request(req->q, req);
2437        }
2438}
2439
2440/**
2441 * blk_end_bidi_request - Complete a bidi request
2442 * @rq:         the request to complete
2443 * @error:      %0 for success, < %0 for error
2444 * @nr_bytes:   number of bytes to complete @rq
2445 * @bidi_bytes: number of bytes to complete @rq->next_rq
2446 *
2447 * Description:
2448 *     Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2449 *     Drivers that supports bidi can safely call this member for any
2450 *     type of request, bidi or uni.  In the later case @bidi_bytes is
2451 *     just ignored.
2452 *
2453 * Return:
2454 *     %false - we are done with this request
2455 *     %true  - still buffers pending for this request
2456 **/
2457static bool blk_end_bidi_request(struct request *rq, int error,
2458                                 unsigned int nr_bytes, unsigned int bidi_bytes)
2459{
2460        struct request_queue *q = rq->q;
2461        unsigned long flags;
2462
2463        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2464                return true;
2465
2466        spin_lock_irqsave(q->queue_lock, flags);
2467        blk_finish_request(rq, error);
2468        spin_unlock_irqrestore(q->queue_lock, flags);
2469
2470        return false;
2471}
2472
2473/**
2474 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2475 * @rq:         the request to complete
2476 * @error:      %0 for success, < %0 for error
2477 * @nr_bytes:   number of bytes to complete @rq
2478 * @bidi_bytes: number of bytes to complete @rq->next_rq
2479 *
2480 * Description:
2481 *     Identical to blk_end_bidi_request() except that queue lock is
2482 *     assumed to be locked on entry and remains so on return.
2483 *
2484 * Return:
2485 *     %false - we are done with this request
2486 *     %true  - still buffers pending for this request
2487 **/
2488bool __blk_end_bidi_request(struct request *rq, int error,
2489                                   unsigned int nr_bytes, unsigned int bidi_bytes)
2490{
2491        if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2492                return true;
2493
2494        blk_finish_request(rq, error);
2495
2496        return false;
2497}
2498
2499/**
2500 * blk_end_request - Helper function for drivers to complete the request.
2501 * @rq:       the request being processed
2502 * @error:    %0 for success, < %0 for error
2503 * @nr_bytes: number of bytes to complete
2504 *
2505 * Description:
2506 *     Ends I/O on a number of bytes attached to @rq.
2507 *     If @rq has leftover, sets it up for the next range of segments.
2508 *
2509 * Return:
2510 *     %false - we are done with this request
2511 *     %true  - still buffers pending for this request
2512 **/
2513bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2514{
2515        return blk_end_bidi_request(rq, error, nr_bytes, 0);
2516}
2517EXPORT_SYMBOL(blk_end_request);
2518
2519/**
2520 * blk_end_request_all - Helper function for drives to finish the request.
2521 * @rq: the request to finish
2522 * @error: %0 for success, < %0 for error
2523 *
2524 * Description:
2525 *     Completely finish @rq.
2526 */
2527void blk_end_request_all(struct request *rq, int error)
2528{
2529        bool pending;
2530        unsigned int bidi_bytes = 0;
2531
2532        if (unlikely(blk_bidi_rq(rq)))
2533                bidi_bytes = blk_rq_bytes(rq->next_rq);
2534
2535        pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2536        BUG_ON(pending);
2537}
2538EXPORT_SYMBOL(blk_end_request_all);
2539
2540/**
2541 * blk_end_request_cur - Helper function to finish the current request chunk.
2542 * @rq: the request to finish the current chunk for
2543 * @error: %0 for success, < %0 for error
2544 *
2545 * Description:
2546 *     Complete the current consecutively mapped chunk from @rq.
2547 *
2548 * Return:
2549 *     %false - we are done with this request
2550 *     %true  - still buffers pending for this request
2551 */
2552bool blk_end_request_cur(struct request *rq, int error)
2553{
2554        return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2555}
2556EXPORT_SYMBOL(blk_end_request_cur);
2557
2558/**
2559 * blk_end_request_err - Finish a request till the next failure boundary.
2560 * @rq: the request to finish till the next failure boundary for
2561 * @error: must be negative errno
2562 *
2563 * Description:
2564 *     Complete @rq till the next failure boundary.
2565 *
2566 * Return:
2567 *     %false - we are done with this request
2568 *     %true  - still buffers pending for this request
2569 */
2570bool blk_end_request_err(struct request *rq, int error)
2571{
2572        WARN_ON(error >= 0);
2573        return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2574}
2575EXPORT_SYMBOL_GPL(blk_end_request_err);
2576
2577/**
2578 * __blk_end_request - Helper function for drivers to complete the request.
2579 * @rq:       the request being processed
2580 * @error:    %0 for success, < %0 for error
2581 * @nr_bytes: number of bytes to complete
2582 *
2583 * Description:
2584 *     Must be called with queue lock held unlike blk_end_request().
2585 *
2586 * Return:
2587 *     %false - we are done with this request
2588 *     %true  - still buffers pending for this request
2589 **/
2590bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2591{
2592        return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2593}
2594EXPORT_SYMBOL(__blk_end_request);
2595
2596/**
2597 * __blk_end_request_all - Helper function for drives to finish the request.
2598 * @rq: the request to finish
2599 * @error: %0 for success, < %0 for error
2600 *
2601 * Description:
2602 *     Completely finish @rq.  Must be called with queue lock held.
2603 */
2604void __blk_end_request_all(struct request *rq, int error)
2605{
2606        bool pending;
2607        unsigned int bidi_bytes = 0;
2608
2609        if (unlikely(blk_bidi_rq(rq)))
2610                bidi_bytes = blk_rq_bytes(rq->next_rq);
2611
2612        pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2613        BUG_ON(pending);
2614}
2615EXPORT_SYMBOL(__blk_end_request_all);
2616
2617/**
2618 * __blk_end_request_cur - Helper function to finish the current request chunk.
2619 * @rq: the request to finish the current chunk for
2620 * @error: %0 for success, < %0 for error
2621 *
2622 * Description:
2623 *     Complete the current consecutively mapped chunk from @rq.  Must
2624 *     be called with queue lock held.
2625 *
2626 * Return:
2627 *     %false - we are done with this request
2628 *     %true  - still buffers pending for this request
2629 */
2630bool __blk_end_request_cur(struct request *rq, int error)
2631{
2632        return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2633}
2634EXPORT_SYMBOL(__blk_end_request_cur);
2635
2636/**
2637 * __blk_end_request_err - Finish a request till the next failure boundary.
2638 * @rq: the request to finish till the next failure boundary for
2639 * @error: must be negative errno
2640 *
2641 * Description:
2642 *     Complete @rq till the next failure boundary.  Must be called
2643 *     with queue lock held.
2644 *
2645 * Return:
2646 *     %false - we are done with this request
2647 *     %true  - still buffers pending for this request
2648 */
2649bool __blk_end_request_err(struct request *rq, int error)
2650{
2651        WARN_ON(error >= 0);
2652        return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2653}
2654EXPORT_SYMBOL_GPL(__blk_end_request_err);
2655
2656void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2657                     struct bio *bio)
2658{
2659        /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2660        rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2661
2662        if (bio_has_data(bio)) {
2663                rq->nr_phys_segments = bio_phys_segments(q, bio);
2664                rq->buffer = bio_data(bio);
2665        }
2666        rq->__data_len = bio->bi_size;
2667        rq->bio = rq->biotail = bio;
2668
2669        if (bio->bi_bdev)
2670                rq->rq_disk = bio->bi_bdev->bd_disk;
2671}
2672
2673#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2674/**
2675 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2676 * @rq: the request to be flushed
2677 *
2678 * Description:
2679 *     Flush all pages in @rq.
2680 */
2681void rq_flush_dcache_pages(struct request *rq)
2682{
2683        struct req_iterator iter;
2684        struct bio_vec *bvec;
2685
2686        rq_for_each_segment(bvec, rq, iter)
2687                flush_dcache_page(bvec->bv_page);
2688}
2689EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2690#endif
2691
2692/**
2693 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2694 * @q : the queue of the device being checked
2695 *
2696 * Description:
2697 *    Check if underlying low-level drivers of a device are busy.
2698 *    If the drivers want to export their busy state, they must set own
2699 *    exporting function using blk_queue_lld_busy() first.
2700 *
2701 *    Basically, this function is used only by request stacking drivers
2702 *    to stop dispatching requests to underlying devices when underlying
2703 *    devices are busy.  This behavior helps more I/O merging on the queue
2704 *    of the request stacking driver and prevents I/O throughput regression
2705 *    on burst I/O load.
2706 *
2707 * Return:
2708 *    0 - Not busy (The request stacking driver should dispatch request)
2709 *    1 - Busy (The request stacking driver should stop dispatching request)
2710 */
2711int blk_lld_busy(struct request_queue *q)
2712{
2713        if (q->lld_busy_fn)
2714                return q->lld_busy_fn(q);
2715
2716        return 0;
2717}
2718EXPORT_SYMBOL_GPL(blk_lld_busy);
2719
2720/**
2721 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2722 * @rq: the clone request to be cleaned up
2723 *
2724 * Description:
2725 *     Free all bios in @rq for a cloned request.
2726 */
2727void blk_rq_unprep_clone(struct request *rq)
2728{
2729        struct bio *bio;
2730
2731        while ((bio = rq->bio) != NULL) {
2732                rq->bio = bio->bi_next;
2733
2734                bio_put(bio);
2735        }
2736}
2737EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2738
2739/*
2740 * Copy attributes of the original request to the clone request.
2741 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2742 */
2743static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2744{
2745        dst->cpu = src->cpu;
2746        dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2747        dst->cmd_type = src->cmd_type;
2748        dst->__sector = blk_rq_pos(src);
2749        dst->__data_len = blk_rq_bytes(src);
2750        dst->nr_phys_segments = src->nr_phys_segments;
2751        dst->ioprio = src->ioprio;
2752        dst->extra_len = src->extra_len;
2753}
2754
2755/**
2756 * blk_rq_prep_clone - Helper function to setup clone request
2757 * @rq: the request to be setup
2758 * @rq_src: original request to be cloned
2759 * @bs: bio_set that bios for clone are allocated from
2760 * @gfp_mask: memory allocation mask for bio
2761 * @bio_ctr: setup function to be called for each clone bio.
2762 *           Returns %0 for success, non %0 for failure.
2763 * @data: private data to be passed to @bio_ctr
2764 *
2765 * Description:
2766 *     Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2767 *     The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2768 *     are not copied, and copying such parts is the caller's responsibility.
2769 *     Also, pages which the original bios are pointing to are not copied
2770 *     and the cloned bios just point same pages.
2771 *     So cloned bios must be completed before original bios, which means
2772 *     the caller must complete @rq before @rq_src.
2773 */
2774int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2775                      struct bio_set *bs, gfp_t gfp_mask,
2776                      int (*bio_ctr)(struct bio *, struct bio *, void *),
2777                      void *data)
2778{
2779        struct bio *bio, *bio_src;
2780
2781        if (!bs)
2782                bs = fs_bio_set;
2783
2784        blk_rq_init(NULL, rq);
2785
2786        __rq_for_each_bio(bio_src, rq_src) {
2787                bio = bio_clone_bioset(bio_src, gfp_mask, bs);
2788                if (!bio)
2789                        goto free_and_out;
2790
2791                if (bio_ctr && bio_ctr(bio, bio_src, data))
2792                        goto free_and_out;
2793
2794                if (rq->bio) {
2795                        rq->biotail->bi_next = bio;
2796                        rq->biotail = bio;
2797                } else
2798                        rq->bio = rq->biotail = bio;
2799        }
2800
2801        __blk_rq_prep_clone(rq, rq_src);
2802
2803        return 0;
2804
2805free_and_out:
2806        if (bio)
2807                bio_put(bio);
2808        blk_rq_unprep_clone(rq);
2809
2810        return -ENOMEM;
2811}
2812EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2813
2814int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2815{
2816        return queue_work(kblockd_workqueue, work);
2817}
2818EXPORT_SYMBOL(kblockd_schedule_work);
2819
2820int kblockd_schedule_delayed_work(struct request_queue *q,
2821                        struct delayed_work *dwork, unsigned long delay)
2822{
2823        return queue_delayed_work(kblockd_workqueue, dwork, delay);
2824}
2825EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2826
2827#define PLUG_MAGIC      0x91827364
2828
2829/**
2830 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2831 * @plug:       The &struct blk_plug that needs to be initialized
2832 *
2833 * Description:
2834 *   Tracking blk_plug inside the task_struct will help with auto-flushing the
2835 *   pending I/O should the task end up blocking between blk_start_plug() and
2836 *   blk_finish_plug(). This is important from a performance perspective, but
2837 *   also ensures that we don't deadlock. For instance, if the task is blocking
2838 *   for a memory allocation, memory reclaim could end up wanting to free a
2839 *   page belonging to that request that is currently residing in our private
2840 *   plug. By flushing the pending I/O when the process goes to sleep, we avoid
2841 *   this kind of deadlock.
2842 */
2843void blk_start_plug(struct blk_plug *plug)
2844{
2845        struct task_struct *tsk = current;
2846
2847        plug->magic = PLUG_MAGIC;
2848        INIT_LIST_HEAD(&plug->list);
2849        INIT_LIST_HEAD(&plug->cb_list);
2850        plug->should_sort = 0;
2851
2852        /*
2853         * If this is a nested plug, don't actually assign it. It will be
2854         * flushed on its own.
2855         */
2856        if (!tsk->plug) {
2857                /*
2858                 * Store ordering should not be needed here, since a potential
2859                 * preempt will imply a full memory barrier
2860                 */
2861                tsk->plug = plug;
2862        }
2863}
2864EXPORT_SYMBOL(blk_start_plug);
2865
2866static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2867{
2868        struct request *rqa = container_of(a, struct request, queuelist);
2869        struct request *rqb = container_of(b, struct request, queuelist);
2870
2871        return !(rqa->q < rqb->q ||
2872                (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
2873}
2874
2875/*
2876 * If 'from_schedule' is true, then postpone the dispatch of requests
2877 * until a safe kblockd context. We due this to avoid accidental big
2878 * additional stack usage in driver dispatch, in places where the originally
2879 * plugger did not intend it.
2880 */
2881static void queue_unplugged(struct request_queue *q, unsigned int depth,
2882                            bool from_schedule)
2883        __releases(q->queue_lock)
2884{
2885        trace_block_unplug(q, depth, !from_schedule);
2886
2887        /*
2888         * Don't mess with dead queue.
2889         */
2890        if (unlikely(blk_queue_dead(q))) {
2891                spin_unlock(q->queue_lock);
2892                return;
2893        }
2894
2895        /*
2896         * If we are punting this to kblockd, then we can safely drop
2897         * the queue_lock before waking kblockd (which needs to take
2898         * this lock).
2899         */
2900        if (from_schedule) {
2901                spin_unlock(q->queue_lock);
2902                blk_run_queue_async(q);
2903        } else {
2904                __blk_run_queue(q);
2905                spin_unlock(q->queue_lock);
2906        }
2907
2908}
2909
2910static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
2911{
2912        LIST_HEAD(callbacks);
2913
2914        while (!list_empty(&plug->cb_list)) {
2915                list_splice_init(&plug->cb_list, &callbacks);
2916
2917                while (!list_empty(&callbacks)) {
2918                        struct blk_plug_cb *cb = list_first_entry(&callbacks,
2919                                                          struct blk_plug_cb,
2920                                                          list);
2921                        list_del(&cb->list);
2922                        cb->callback(cb, from_schedule);
2923                }
2924        }
2925}
2926
2927struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
2928                                      int size)
2929{
2930        struct blk_plug *plug = current->plug;
2931        struct blk_plug_cb *cb;
2932
2933        if (!plug)
2934                return NULL;
2935
2936        list_for_each_entry(cb, &plug->cb_list, list)
2937                if (cb->callback == unplug && cb->data == data)
2938                        return cb;
2939
2940        /* Not currently on the callback list */
2941        BUG_ON(size < sizeof(*cb));
2942        cb = kzalloc(size, GFP_ATOMIC);
2943        if (cb) {
2944                cb->data = data;
2945                cb->callback = unplug;
2946                list_add(&cb->list, &plug->cb_list);
2947        }
2948        return cb;
2949}
2950EXPORT_SYMBOL(blk_check_plugged);
2951
2952void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2953{
2954        struct request_queue *q;
2955        unsigned long flags;
2956        struct request *rq;
2957        LIST_HEAD(list);
2958        unsigned int depth;
2959
2960        BUG_ON(plug->magic != PLUG_MAGIC);
2961
2962        flush_plug_callbacks(plug, from_schedule);
2963        if (list_empty(&plug->list))
2964                return;
2965
2966        list_splice_init(&plug->list, &list);
2967
2968        if (plug->should_sort) {
2969                list_sort(NULL, &list, plug_rq_cmp);
2970                plug->should_sort = 0;
2971        }
2972
2973        q = NULL;
2974        depth = 0;
2975
2976        /*
2977         * Save and disable interrupts here, to avoid doing it for every
2978         * queue lock we have to take.
2979         */
2980        local_irq_save(flags);
2981        while (!list_empty(&list)) {
2982                rq = list_entry_rq(list.next);
2983                list_del_init(&rq->queuelist);
2984                BUG_ON(!rq->q);
2985                if (rq->q != q) {
2986                        /*
2987                         * This drops the queue lock
2988                         */
2989                        if (q)
2990                                queue_unplugged(q, depth, from_schedule);
2991                        q = rq->q;
2992                        depth = 0;
2993                        spin_lock(q->queue_lock);
2994                }
2995
2996                /*
2997                 * Short-circuit if @q is dead
2998                 */
2999                if (unlikely(blk_queue_dead(q))) {
3000                        __blk_end_request_all(rq, -ENODEV);
3001                        continue;
3002                }
3003
3004                /*
3005                 * rq is already accounted, so use raw insert
3006                 */
3007                if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
3008                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3009                else
3010                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3011
3012                depth++;
3013        }
3014
3015        /*
3016         * This drops the queue lock
3017         */
3018        if (q)
3019                queue_unplugged(q, depth, from_schedule);
3020
3021        local_irq_restore(flags);
3022}
3023
3024void blk_finish_plug(struct blk_plug *plug)
3025{
3026        blk_flush_plug_list(plug, false);
3027
3028        if (plug == current->plug)
3029                current->plug = NULL;
3030}
3031EXPORT_SYMBOL(blk_finish_plug);
3032
3033int __init blk_dev_init(void)
3034{
3035        BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3036                        sizeof(((struct request *)0)->cmd_flags));
3037
3038        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3039        kblockd_workqueue = alloc_workqueue("kblockd",
3040                                            WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3041        if (!kblockd_workqueue)
3042                panic("Failed to create kblockd\n");
3043
3044        request_cachep = kmem_cache_create("blkdev_requests",
3045                        sizeof(struct request), 0, SLAB_PANIC, NULL);
3046
3047        blk_requestq_cachep = kmem_cache_create("blkdev_queue",
3048                        sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3049
3050        return 0;
3051}
3052
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.