linux/drivers/md/dm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
   9#include "dm-uevent.h"
  10
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/moduleparam.h>
  15#include <linux/blkpg.h>
  16#include <linux/bio.h>
  17#include <linux/mempool.h>
  18#include <linux/slab.h>
  19#include <linux/idr.h>
  20#include <linux/hdreg.h>
  21#include <linux/delay.h>
  22
  23#include <trace/events/block.h>
  24
  25#define DM_MSG_PREFIX "core"
  26
  27#ifdef CONFIG_PRINTK
  28/*
  29 * ratelimit state to be used in DMXXX_LIMIT().
  30 */
  31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
  32                       DEFAULT_RATELIMIT_INTERVAL,
  33                       DEFAULT_RATELIMIT_BURST);
  34EXPORT_SYMBOL(dm_ratelimit_state);
  35#endif
  36
  37/*
  38 * Cookies are numeric values sent with CHANGE and REMOVE
  39 * uevents while resuming, removing or renaming the device.
  40 */
  41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  42#define DM_COOKIE_LENGTH 24
  43
  44static const char *_name = DM_NAME;
  45
  46static unsigned int major = 0;
  47static unsigned int _major = 0;
  48
  49static DEFINE_IDR(_minor_idr);
  50
  51static DEFINE_SPINLOCK(_minor_lock);
  52/*
  53 * For bio-based dm.
  54 * One of these is allocated per bio.
  55 */
  56struct dm_io {
  57        struct mapped_device *md;
  58        int error;
  59        atomic_t io_count;
  60        struct bio *bio;
  61        unsigned long start_time;
  62        spinlock_t endio_lock;
  63};
  64
  65/*
  66 * For bio-based dm.
  67 * One of these is allocated per target within a bio.  Hopefully
  68 * this will be simplified out one day.
  69 */
  70struct dm_target_io {
  71        struct dm_io *io;
  72        struct dm_target *ti;
  73        union map_info info;
  74};
  75
  76/*
  77 * For request-based dm.
  78 * One of these is allocated per request.
  79 */
  80struct dm_rq_target_io {
  81        struct mapped_device *md;
  82        struct dm_target *ti;
  83        struct request *orig, clone;
  84        int error;
  85        union map_info info;
  86};
  87
  88/*
  89 * For request-based dm.
  90 * One of these is allocated per bio.
  91 */
  92struct dm_rq_clone_bio_info {
  93        struct bio *orig;
  94        struct dm_rq_target_io *tio;
  95};
  96
  97union map_info *dm_get_mapinfo(struct bio *bio)
  98{
  99        if (bio && bio->bi_private)
 100                return &((struct dm_target_io *)bio->bi_private)->info;
 101        return NULL;
 102}
 103
 104union map_info *dm_get_rq_mapinfo(struct request *rq)
 105{
 106        if (rq && rq->end_io_data)
 107                return &((struct dm_rq_target_io *)rq->end_io_data)->info;
 108        return NULL;
 109}
 110EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 111
 112#define MINOR_ALLOCED ((void *)-1)
 113
 114/*
 115 * Bits for the md->flags field.
 116 */
 117#define DMF_BLOCK_IO_FOR_SUSPEND 0
 118#define DMF_SUSPENDED 1
 119#define DMF_FROZEN 2
 120#define DMF_FREEING 3
 121#define DMF_DELETING 4
 122#define DMF_NOFLUSH_SUSPENDING 5
 123#define DMF_MERGE_IS_OPTIONAL 6
 124
 125/*
 126 * Work processed by per-device workqueue.
 127 */
 128struct mapped_device {
 129        struct rw_semaphore io_lock;
 130        struct mutex suspend_lock;
 131        rwlock_t map_lock;
 132        atomic_t holders;
 133        atomic_t open_count;
 134
 135        unsigned long flags;
 136
 137        struct request_queue *queue;
 138        unsigned type;
 139        /* Protect queue and type against concurrent access. */
 140        struct mutex type_lock;
 141
 142        struct target_type *immutable_target_type;
 143
 144        struct gendisk *disk;
 145        char name[16];
 146
 147        void *interface_ptr;
 148
 149        /*
 150         * A list of ios that arrived while we were suspended.
 151         */
 152        atomic_t pending[2];
 153        wait_queue_head_t wait;
 154        struct work_struct work;
 155        struct bio_list deferred;
 156        spinlock_t deferred_lock;
 157
 158        /*
 159         * Processing queue (flush)
 160         */
 161        struct workqueue_struct *wq;
 162
 163        /*
 164         * The current mapping.
 165         */
 166        struct dm_table *map;
 167
 168        /*
 169         * io objects are allocated from here.
 170         */
 171        mempool_t *io_pool;
 172        mempool_t *tio_pool;
 173
 174        struct bio_set *bs;
 175
 176        /*
 177         * Event handling.
 178         */
 179        atomic_t event_nr;
 180        wait_queue_head_t eventq;
 181        atomic_t uevent_seq;
 182        struct list_head uevent_list;
 183        spinlock_t uevent_lock; /* Protect access to uevent_list */
 184
 185        /*
 186         * freeze/thaw support require holding onto a super block
 187         */
 188        struct super_block *frozen_sb;
 189        struct block_device *bdev;
 190
 191        /* forced geometry settings */
 192        struct hd_geometry geometry;
 193
 194        /* sysfs handle */
 195        struct kobject kobj;
 196
 197        /* zero-length flush that will be cloned and submitted to targets */
 198        struct bio flush_bio;
 199};
 200
 201/*
 202 * For mempools pre-allocation at the table loading time.
 203 */
 204struct dm_md_mempools {
 205        mempool_t *io_pool;
 206        mempool_t *tio_pool;
 207        struct bio_set *bs;
 208};
 209
 210#define MIN_IOS 256
 211static struct kmem_cache *_io_cache;
 212static struct kmem_cache *_tio_cache;
 213static struct kmem_cache *_rq_tio_cache;
 214static struct kmem_cache *_rq_bio_info_cache;
 215
 216static int __init local_init(void)
 217{
 218        int r = -ENOMEM;
 219
 220        /* allocate a slab for the dm_ios */
 221        _io_cache = KMEM_CACHE(dm_io, 0);
 222        if (!_io_cache)
 223                return r;
 224
 225        /* allocate a slab for the target ios */
 226        _tio_cache = KMEM_CACHE(dm_target_io, 0);
 227        if (!_tio_cache)
 228                goto out_free_io_cache;
 229
 230        _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
 231        if (!_rq_tio_cache)
 232                goto out_free_tio_cache;
 233
 234        _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
 235        if (!_rq_bio_info_cache)
 236                goto out_free_rq_tio_cache;
 237
 238        r = dm_uevent_init();
 239        if (r)
 240                goto out_free_rq_bio_info_cache;
 241
 242        _major = major;
 243        r = register_blkdev(_major, _name);
 244        if (r < 0)
 245                goto out_uevent_exit;
 246
 247        if (!_major)
 248                _major = r;
 249
 250        return 0;
 251
 252out_uevent_exit:
 253        dm_uevent_exit();
 254out_free_rq_bio_info_cache:
 255        kmem_cache_destroy(_rq_bio_info_cache);
 256out_free_rq_tio_cache:
 257        kmem_cache_destroy(_rq_tio_cache);
 258out_free_tio_cache:
 259        kmem_cache_destroy(_tio_cache);
 260out_free_io_cache:
 261        kmem_cache_destroy(_io_cache);
 262
 263        return r;
 264}
 265
 266static void local_exit(void)
 267{
 268        kmem_cache_destroy(_rq_bio_info_cache);
 269        kmem_cache_destroy(_rq_tio_cache);
 270        kmem_cache_destroy(_tio_cache);
 271        kmem_cache_destroy(_io_cache);
 272        unregister_blkdev(_major, _name);
 273        dm_uevent_exit();
 274
 275        _major = 0;
 276
 277        DMINFO("cleaned up");
 278}
 279
 280static int (*_inits[])(void) __initdata = {
 281        local_init,
 282        dm_target_init,
 283        dm_linear_init,
 284        dm_stripe_init,
 285        dm_io_init,
 286        dm_kcopyd_init,
 287        dm_interface_init,
 288};
 289
 290static void (*_exits[])(void) = {
 291        local_exit,
 292        dm_target_exit,
 293        dm_linear_exit,
 294        dm_stripe_exit,
 295        dm_io_exit,
 296        dm_kcopyd_exit,
 297        dm_interface_exit,
 298};
 299
 300static int __init dm_init(void)
 301{
 302        const int count = ARRAY_SIZE(_inits);
 303
 304        int r, i;
 305
 306        for (i = 0; i < count; i++) {
 307                r = _inits[i]();
 308                if (r)
 309                        goto bad;
 310        }
 311
 312        return 0;
 313
 314      bad:
 315        while (i--)
 316                _exits[i]();
 317
 318        return r;
 319}
 320
 321static void __exit dm_exit(void)
 322{
 323        int i = ARRAY_SIZE(_exits);
 324
 325        while (i--)
 326                _exits[i]();
 327
 328        /*
 329         * Should be empty by this point.
 330         */
 331        idr_remove_all(&_minor_idr);
 332        idr_destroy(&_minor_idr);
 333}
 334
 335/*
 336 * Block device functions
 337 */
 338int dm_deleting_md(struct mapped_device *md)
 339{
 340        return test_bit(DMF_DELETING, &md->flags);
 341}
 342
 343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 344{
 345        struct mapped_device *md;
 346
 347        spin_lock(&_minor_lock);
 348
 349        md = bdev->bd_disk->private_data;
 350        if (!md)
 351                goto out;
 352
 353        if (test_bit(DMF_FREEING, &md->flags) ||
 354            dm_deleting_md(md)) {
 355                md = NULL;
 356                goto out;
 357        }
 358
 359        dm_get(md);
 360        atomic_inc(&md->open_count);
 361
 362out:
 363        spin_unlock(&_minor_lock);
 364
 365        return md ? 0 : -ENXIO;
 366}
 367
 368static int dm_blk_close(struct gendisk *disk, fmode_t mode)
 369{
 370        struct mapped_device *md = disk->private_data;
 371
 372        spin_lock(&_minor_lock);
 373
 374        atomic_dec(&md->open_count);
 375        dm_put(md);
 376
 377        spin_unlock(&_minor_lock);
 378
 379        return 0;
 380}
 381
 382int dm_open_count(struct mapped_device *md)
 383{
 384        return atomic_read(&md->open_count);
 385}
 386
 387/*
 388 * Guarantees nothing is using the device before it's deleted.
 389 */
 390int dm_lock_for_deletion(struct mapped_device *md)
 391{
 392        int r = 0;
 393
 394        spin_lock(&_minor_lock);
 395
 396        if (dm_open_count(md))
 397                r = -EBUSY;
 398        else
 399                set_bit(DMF_DELETING, &md->flags);
 400
 401        spin_unlock(&_minor_lock);
 402
 403        return r;
 404}
 405
 406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 407{
 408        struct mapped_device *md = bdev->bd_disk->private_data;
 409
 410        return dm_get_geometry(md, geo);
 411}
 412
 413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 414                        unsigned int cmd, unsigned long arg)
 415{
 416        struct mapped_device *md = bdev->bd_disk->private_data;
 417        struct dm_table *map = dm_get_live_table(md);
 418        struct dm_target *tgt;
 419        int r = -ENOTTY;
 420
 421        if (!map || !dm_table_get_size(map))
 422                goto out;
 423
 424        /* We only support devices that have a single target */
 425        if (dm_table_get_num_targets(map) != 1)
 426                goto out;
 427
 428        tgt = dm_table_get_target(map, 0);
 429
 430        if (dm_suspended_md(md)) {
 431                r = -EAGAIN;
 432                goto out;
 433        }
 434
 435        if (tgt->type->ioctl)
 436                r = tgt->type->ioctl(tgt, cmd, arg);
 437
 438out:
 439        dm_table_put(map);
 440
 441        return r;
 442}
 443
 444static struct dm_io *alloc_io(struct mapped_device *md)
 445{
 446        return mempool_alloc(md->io_pool, GFP_NOIO);
 447}
 448
 449static void free_io(struct mapped_device *md, struct dm_io *io)
 450{
 451        mempool_free(io, md->io_pool);
 452}
 453
 454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 455{
 456        mempool_free(tio, md->tio_pool);
 457}
 458
 459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
 460                                            gfp_t gfp_mask)
 461{
 462        return mempool_alloc(md->tio_pool, gfp_mask);
 463}
 464
 465static void free_rq_tio(struct dm_rq_target_io *tio)
 466{
 467        mempool_free(tio, tio->md->tio_pool);
 468}
 469
 470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
 471{
 472        return mempool_alloc(md->io_pool, GFP_ATOMIC);
 473}
 474
 475static void free_bio_info(struct dm_rq_clone_bio_info *info)
 476{
 477        mempool_free(info, info->tio->md->io_pool);
 478}
 479
 480static int md_in_flight(struct mapped_device *md)
 481{
 482        return atomic_read(&md->pending[READ]) +
 483               atomic_read(&md->pending[WRITE]);
 484}
 485
 486static void start_io_acct(struct dm_io *io)
 487{
 488        struct mapped_device *md = io->md;
 489        int cpu;
 490        int rw = bio_data_dir(io->bio);
 491
 492        io->start_time = jiffies;
 493
 494        cpu = part_stat_lock();
 495        part_round_stats(cpu, &dm_disk(md)->part0);
 496        part_stat_unlock();
 497        atomic_set(&dm_disk(md)->part0.in_flight[rw],
 498                atomic_inc_return(&md->pending[rw]));
 499}
 500
 501static void end_io_acct(struct dm_io *io)
 502{
 503        struct mapped_device *md = io->md;
 504        struct bio *bio = io->bio;
 505        unsigned long duration = jiffies - io->start_time;
 506        int pending, cpu;
 507        int rw = bio_data_dir(bio);
 508
 509        cpu = part_stat_lock();
 510        part_round_stats(cpu, &dm_disk(md)->part0);
 511        part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
 512        part_stat_unlock();
 513
 514        /*
 515         * After this is decremented the bio must not be touched if it is
 516         * a flush.
 517         */
 518        pending = atomic_dec_return(&md->pending[rw]);
 519        atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
 520        pending += atomic_read(&md->pending[rw^0x1]);
 521
 522        /* nudge anyone waiting on suspend queue */
 523        if (!pending)
 524                wake_up(&md->wait);
 525}
 526
 527/*
 528 * Add the bio to the list of deferred io.
 529 */
 530static void queue_io(struct mapped_device *md, struct bio *bio)
 531{
 532        unsigned long flags;
 533
 534        spin_lock_irqsave(&md->deferred_lock, flags);
 535        bio_list_add(&md->deferred, bio);
 536        spin_unlock_irqrestore(&md->deferred_lock, flags);
 537        queue_work(md->wq, &md->work);
 538}
 539
 540/*
 541 * Everyone (including functions in this file), should use this
 542 * function to access the md->map field, and make sure they call
 543 * dm_table_put() when finished.
 544 */
 545struct dm_table *dm_get_live_table(struct mapped_device *md)
 546{
 547        struct dm_table *t;
 548        unsigned long flags;
 549
 550        read_lock_irqsave(&md->map_lock, flags);
 551        t = md->map;
 552        if (t)
 553                dm_table_get(t);
 554        read_unlock_irqrestore(&md->map_lock, flags);
 555
 556        return t;
 557}
 558
 559/*
 560 * Get the geometry associated with a dm device
 561 */
 562int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
 563{
 564        *geo = md->geometry;
 565
 566        return 0;
 567}
 568
 569/*
 570 * Set the geometry of a device.
 571 */
 572int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
 573{
 574        sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 575
 576        if (geo->start > sz) {
 577                DMWARN("Start sector is beyond the geometry limits.");
 578                return -EINVAL;
 579        }
 580
 581        md->geometry = *geo;
 582
 583        return 0;
 584}
 585
 586/*-----------------------------------------------------------------
 587 * CRUD START:
 588 *   A more elegant soln is in the works that uses the queue
 589 *   merge fn, unfortunately there are a couple of changes to
 590 *   the block layer that I want to make for this.  So in the
 591 *   interests of getting something for people to use I give
 592 *   you this clearly demarcated crap.
 593 *---------------------------------------------------------------*/
 594
 595static int __noflush_suspending(struct mapped_device *md)
 596{
 597        return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 598}
 599
 600/*
 601 * Decrements the number of outstanding ios that a bio has been
 602 * cloned into, completing the original io if necc.
 603 */
 604static void dec_pending(struct dm_io *io, int error)
 605{
 606        unsigned long flags;
 607        int io_error;
 608        struct bio *bio;
 609        struct mapped_device *md = io->md;
 610
 611        /* Push-back supersedes any I/O errors */
 612        if (unlikely(error)) {
 613                spin_lock_irqsave(&io->endio_lock, flags);
 614                if (!(io->error > 0 && __noflush_suspending(md)))
 615                        io->error = error;
 616                spin_unlock_irqrestore(&io->endio_lock, flags);
 617        }
 618
 619        if (atomic_dec_and_test(&io->io_count)) {
 620                if (io->error == DM_ENDIO_REQUEUE) {
 621                        /*
 622                         * Target requested pushing back the I/O.
 623                         */
 624                        spin_lock_irqsave(&md->deferred_lock, flags);
 625                        if (__noflush_suspending(md))
 626                                bio_list_add_head(&md->deferred, io->bio);
 627                        else
 628                                /* noflush suspend was interrupted. */
 629                                io->error = -EIO;
 630                        spin_unlock_irqrestore(&md->deferred_lock, flags);
 631                }
 632
 633                io_error = io->error;
 634                bio = io->bio;
 635                end_io_acct(io);
 636                free_io(md, io);
 637
 638                if (io_error == DM_ENDIO_REQUEUE)
 639                        return;
 640
 641                if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
 642                        /*
 643                         * Preflush done for flush with data, reissue
 644                         * without REQ_FLUSH.
 645                         */
 646                        bio->bi_rw &= ~REQ_FLUSH;
 647                        queue_io(md, bio);
 648                } else {
 649                        /* done with normal IO or empty flush */
 650                        trace_block_bio_complete(md->queue, bio, io_error);
 651                        bio_endio(bio, io_error);
 652                }
 653        }
 654}
 655
 656static void clone_endio(struct bio *bio, int error)
 657{
 658        int r = 0;
 659        struct dm_target_io *tio = bio->bi_private;
 660        struct dm_io *io = tio->io;
 661        struct mapped_device *md = tio->io->md;
 662        dm_endio_fn endio = tio->ti->type->end_io;
 663
 664        if (!bio_flagged(bio, BIO_UPTODATE) && !error)
 665                error = -EIO;
 666
 667        if (endio) {
 668                r = endio(tio->ti, bio, error, &tio->info);
 669                if (r < 0 || r == DM_ENDIO_REQUEUE)
 670                        /*
 671                         * error and requeue request are handled
 672                         * in dec_pending().
 673                         */
 674                        error = r;
 675                else if (r == DM_ENDIO_INCOMPLETE)
 676                        /* The target will handle the io */
 677                        return;
 678                else if (r) {
 679                        DMWARN("unimplemented target endio return value: %d", r);
 680                        BUG();
 681                }
 682        }
 683
 684        /*
 685         * Store md for cleanup instead of tio which is about to get freed.
 686         */
 687        bio->bi_private = md->bs;
 688
 689        free_tio(md, tio);
 690        bio_put(bio);
 691        dec_pending(io, error);
 692}
 693
 694/*
 695 * Partial completion handling for request-based dm
 696 */
 697static void end_clone_bio(struct bio *clone, int error)
 698{
 699        struct dm_rq_clone_bio_info *info = clone->bi_private;
 700        struct dm_rq_target_io *tio = info->tio;
 701        struct bio *bio = info->orig;
 702        unsigned int nr_bytes = info->orig->bi_size;
 703
 704        bio_put(clone);
 705
 706        if (tio->error)
 707                /*
 708                 * An error has already been detected on the request.
 709                 * Once error occurred, just let clone->end_io() handle
 710                 * the remainder.
 711                 */
 712                return;
 713        else if (error) {
 714                /*
 715                 * Don't notice the error to the upper layer yet.
 716                 * The error handling decision is made by the target driver,
 717                 * when the request is completed.
 718                 */
 719                tio->error = error;
 720                return;
 721        }
 722
 723        /*
 724         * I/O for the bio successfully completed.
 725         * Notice the data completion to the upper layer.
 726         */
 727
 728        /*
 729         * bios are processed from the head of the list.
 730         * So the completing bio should always be rq->bio.
 731         * If it's not, something wrong is happening.
 732         */
 733        if (tio->orig->bio != bio)
 734                DMERR("bio completion is going in the middle of the request");
 735
 736        /*
 737         * Update the original request.
 738         * Do not use blk_end_request() here, because it may complete
 739         * the original request before the clone, and break the ordering.
 740         */
 741        blk_update_request(tio->orig, 0, nr_bytes);
 742}
 743
 744/*
 745 * Don't touch any member of the md after calling this function because
 746 * the md may be freed in dm_put() at the end of this function.
 747 * Or do dm_get() before calling this function and dm_put() later.
 748 */
 749static void rq_completed(struct mapped_device *md, int rw, int run_queue)
 750{
 751        atomic_dec(&md->pending[rw]);
 752
 753        /* nudge anyone waiting on suspend queue */
 754        if (!md_in_flight(md))
 755                wake_up(&md->wait);
 756
 757        /*
 758         * Run this off this callpath, as drivers could invoke end_io while
 759         * inside their request_fn (and holding the queue lock). Calling
 760         * back into ->request_fn() could deadlock attempting to grab the
 761         * queue lock again.
 762         */
 763        if (run_queue)
 764                blk_run_queue_async(md->queue);
 765
 766        /*
 767         * dm_put() must be at the end of this function. See the comment above
 768         */
 769        dm_put(md);
 770}
 771
 772static void free_rq_clone(struct request *clone)
 773{
 774        struct dm_rq_target_io *tio = clone->end_io_data;
 775
 776        blk_rq_unprep_clone(clone);
 777        free_rq_tio(tio);
 778}
 779
 780/*
 781 * Complete the clone and the original request.
 782 * Must be called without queue lock.
 783 */
 784static void dm_end_request(struct request *clone, int error)
 785{
 786        int rw = rq_data_dir(clone);
 787        struct dm_rq_target_io *tio = clone->end_io_data;
 788        struct mapped_device *md = tio->md;
 789        struct request *rq = tio->orig;
 790
 791        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 792                rq->errors = clone->errors;
 793                rq->resid_len = clone->resid_len;
 794
 795                if (rq->sense)
 796                        /*
 797                         * We are using the sense buffer of the original
 798                         * request.
 799                         * So setting the length of the sense data is enough.
 800                         */
 801                        rq->sense_len = clone->sense_len;
 802        }
 803
 804        free_rq_clone(clone);
 805        blk_end_request_all(rq, error);
 806        rq_completed(md, rw, true);
 807}
 808
 809static void dm_unprep_request(struct request *rq)
 810{
 811        struct request *clone = rq->special;
 812
 813        rq->special = NULL;
 814        rq->cmd_flags &= ~REQ_DONTPREP;
 815
 816        free_rq_clone(clone);
 817}
 818
 819/*
 820 * Requeue the original request of a clone.
 821 */
 822void dm_requeue_unmapped_request(struct request *clone)
 823{
 824        int rw = rq_data_dir(clone);
 825        struct dm_rq_target_io *tio = clone->end_io_data;
 826        struct mapped_device *md = tio->md;
 827        struct request *rq = tio->orig;
 828        struct request_queue *q = rq->q;
 829        unsigned long flags;
 830
 831        dm_unprep_request(rq);
 832
 833        spin_lock_irqsave(q->queue_lock, flags);
 834        blk_requeue_request(q, rq);
 835        spin_unlock_irqrestore(q->queue_lock, flags);
 836
 837        rq_completed(md, rw, 0);
 838}
 839EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
 840
 841static void __stop_queue(struct request_queue *q)
 842{
 843        blk_stop_queue(q);
 844}
 845
 846static void stop_queue(struct request_queue *q)
 847{
 848        unsigned long flags;
 849
 850        spin_lock_irqsave(q->queue_lock, flags);
 851        __stop_queue(q);
 852        spin_unlock_irqrestore(q->queue_lock, flags);
 853}
 854
 855static void __start_queue(struct request_queue *q)
 856{
 857        if (blk_queue_stopped(q))
 858                blk_start_queue(q);
 859}
 860
 861static void start_queue(struct request_queue *q)
 862{
 863        unsigned long flags;
 864
 865        spin_lock_irqsave(q->queue_lock, flags);
 866        __start_queue(q);
 867        spin_unlock_irqrestore(q->queue_lock, flags);
 868}
 869
 870static void dm_done(struct request *clone, int error, bool mapped)
 871{
 872        int r = error;
 873        struct dm_rq_target_io *tio = clone->end_io_data;
 874        dm_request_endio_fn rq_end_io = NULL;
 875
 876        if (tio->ti) {
 877                rq_end_io = tio->ti->type->rq_end_io;
 878
 879                if (mapped && rq_end_io)
 880                        r = rq_end_io(tio->ti, clone, error, &tio->info);
 881        }
 882
 883        if (r <= 0)
 884                /* The target wants to complete the I/O */
 885                dm_end_request(clone, r);
 886        else if (r == DM_ENDIO_INCOMPLETE)
 887                /* The target will handle the I/O */
 888                return;
 889        else if (r == DM_ENDIO_REQUEUE)
 890                /* The target wants to requeue the I/O */
 891                dm_requeue_unmapped_request(clone);
 892        else {
 893                DMWARN("unimplemented target endio return value: %d", r);
 894                BUG();
 895        }
 896}
 897
 898/*
 899 * Request completion handler for request-based dm
 900 */
 901static void dm_softirq_done(struct request *rq)
 902{
 903        bool mapped = true;
 904        struct request *clone = rq->completion_data;
 905        struct dm_rq_target_io *tio = clone->end_io_data;
 906
 907        if (rq->cmd_flags & REQ_FAILED)
 908                mapped = false;
 909
 910        dm_done(clone, tio->error, mapped);
 911}
 912
 913/*
 914 * Complete the clone and the original request with the error status
 915 * through softirq context.
 916 */
 917static void dm_complete_request(struct request *clone, int error)
 918{
 919        struct dm_rq_target_io *tio = clone->end_io_data;
 920        struct request *rq = tio->orig;
 921
 922        tio->error = error;
 923        rq->completion_data = clone;
 924        blk_complete_request(rq);
 925}
 926
 927/*
 928 * Complete the not-mapped clone and the original request with the error status
 929 * through softirq context.
 930 * Target's rq_end_io() function isn't called.
 931 * This may be used when the target's map_rq() function fails.
 932 */
 933void dm_kill_unmapped_request(struct request *clone, int error)
 934{
 935        struct dm_rq_target_io *tio = clone->end_io_data;
 936        struct request *rq = tio->orig;
 937
 938        rq->cmd_flags |= REQ_FAILED;
 939        dm_complete_request(clone, error);
 940}
 941EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
 942
 943/*
 944 * Called with the queue lock held
 945 */
 946static void end_clone_request(struct request *clone, int error)
 947{
 948        /*
 949         * For just cleaning up the information of the queue in which
 950         * the clone was dispatched.
 951         * The clone is *NOT* freed actually here because it is alloced from
 952         * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
 953         */
 954        __blk_put_request(clone->q, clone);
 955
 956        /*
 957         * Actual request completion is done in a softirq context which doesn't
 958         * hold the queue lock.  Otherwise, deadlock could occur because:
 959         *     - another request may be submitted by the upper level driver
 960         *       of the stacking during the completion
 961         *     - the submission which requires queue lock may be done
 962         *       against this queue
 963         */
 964        dm_complete_request(clone, error);
 965}
 966
 967/*
 968 * Return maximum size of I/O possible at the supplied sector up to the current
 969 * target boundary.
 970 */
 971static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
 972{
 973        sector_t target_offset = dm_target_offset(ti, sector);
 974
 975        return ti->len - target_offset;
 976}
 977
 978static sector_t max_io_len(sector_t sector, struct dm_target *ti)
 979{
 980        sector_t len = max_io_len_target_boundary(sector, ti);
 981        sector_t offset, max_len;
 982
 983        /*
 984         * Does the target need to split even further?
 985         */
 986        if (ti->max_io_len) {
 987                offset = dm_target_offset(ti, sector);
 988                if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
 989                        max_len = sector_div(offset, ti->max_io_len);
 990                else
 991                        max_len = offset & (ti->max_io_len - 1);
 992                max_len = ti->max_io_len - max_len;
 993
 994                if (len > max_len)
 995                        len = max_len;
 996        }
 997
 998        return len;
 999}
1000
1001int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1002{
1003        if (len > UINT_MAX) {
1004                DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1005                      (unsigned long long)len, UINT_MAX);
1006                ti->error = "Maximum size of target IO is too large";
1007                return -EINVAL;
1008        }
1009
1010        ti->max_io_len = (uint32_t) len;
1011
1012        return 0;
1013}
1014EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1015
1016static void __map_bio(struct dm_target *ti, struct bio *clone,
1017                      struct dm_target_io *tio)
1018{
1019        int r;
1020        sector_t sector;
1021        struct mapped_device *md;
1022
1023        clone->bi_end_io = clone_endio;
1024        clone->bi_private = tio;
1025
1026        /*
1027         * Map the clone.  If r == 0 we don't need to do
1028         * anything, the target has assumed ownership of
1029         * this io.
1030         */
1031        atomic_inc(&tio->io->io_count);
1032        sector = clone->bi_sector;
1033        r = ti->type->map(ti, clone, &tio->info);
1034        if (r == DM_MAPIO_REMAPPED) {
1035                /* the bio has been remapped so dispatch it */
1036
1037                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1038                                      tio->io->bio->bi_bdev->bd_dev, sector);
1039
1040                generic_make_request(clone);
1041        } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1042                /* error the io and bail out, or requeue it if needed */
1043                md = tio->io->md;
1044                dec_pending(tio->io, r);
1045                /*
1046                 * Store bio_set for cleanup.
1047                 */
1048                clone->bi_end_io = NULL;
1049                clone->bi_private = md->bs;
1050                bio_put(clone);
1051                free_tio(md, tio);
1052        } else if (r) {
1053                DMWARN("unimplemented target map return value: %d", r);
1054                BUG();
1055        }
1056}
1057
1058struct clone_info {
1059        struct mapped_device *md;
1060        struct dm_table *map;
1061        struct bio *bio;
1062        struct dm_io *io;
1063        sector_t sector;
1064        sector_t sector_count;
1065        unsigned short idx;
1066};
1067
1068static void dm_bio_destructor(struct bio *bio)
1069{
1070        struct bio_set *bs = bio->bi_private;
1071
1072        bio_free(bio, bs);
1073}
1074
1075/*
1076 * Creates a little bio that just does part of a bvec.
1077 */
1078static struct bio *split_bvec(struct bio *bio, sector_t sector,
1079                              unsigned short idx, unsigned int offset,
1080                              unsigned int len, struct bio_set *bs)
1081{
1082        struct bio *clone;
1083        struct bio_vec *bv = bio->bi_io_vec + idx;
1084
1085        clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1086        clone->bi_destructor = dm_bio_destructor;
1087        *clone->bi_io_vec = *bv;
1088
1089        clone->bi_sector = sector;
1090        clone->bi_bdev = bio->bi_bdev;
1091        clone->bi_rw = bio->bi_rw;
1092        clone->bi_vcnt = 1;
1093        clone->bi_size = to_bytes(len);
1094        clone->bi_io_vec->bv_offset = offset;
1095        clone->bi_io_vec->bv_len = clone->bi_size;
1096        clone->bi_flags |= 1 << BIO_CLONED;
1097
1098        if (bio_integrity(bio)) {
1099                bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1100                bio_integrity_trim(clone,
1101                                   bio_sector_offset(bio, idx, offset), len);
1102        }
1103
1104        return clone;
1105}
1106
1107/*
1108 * Creates a bio that consists of range of complete bvecs.
1109 */
1110static struct bio *clone_bio(struct bio *bio, sector_t sector,
1111                             unsigned short idx, unsigned short bv_count,
1112                             unsigned int len, struct bio_set *bs)
1113{
1114        struct bio *clone;
1115
1116        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1117        __bio_clone(clone, bio);
1118        clone->bi_destructor = dm_bio_destructor;
1119        clone->bi_sector = sector;
1120        clone->bi_idx = idx;
1121        clone->bi_vcnt = idx + bv_count;
1122        clone->bi_size = to_bytes(len);
1123        clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1124
1125        if (bio_integrity(bio)) {
1126                bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1127
1128                if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1129                        bio_integrity_trim(clone,
1130                                           bio_sector_offset(bio, idx, 0), len);
1131        }
1132
1133        return clone;
1134}
1135
1136static struct dm_target_io *alloc_tio(struct clone_info *ci,
1137                                      struct dm_target *ti)
1138{
1139        struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1140
1141        tio->io = ci->io;
1142        tio->ti = ti;
1143        memset(&tio->info, 0, sizeof(tio->info));
1144
1145        return tio;
1146}
1147
1148static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1149                                   unsigned request_nr, sector_t len)
1150{
1151        struct dm_target_io *tio = alloc_tio(ci, ti);
1152        struct bio *clone;
1153
1154        tio->info.target_request_nr = request_nr;
1155
1156        /*
1157         * Discard requests require the bio's inline iovecs be initialized.
1158         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1159         * and discard, so no need for concern about wasted bvec allocations.
1160         */
1161        clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1162        __bio_clone(clone, ci->bio);
1163        clone->bi_destructor = dm_bio_destructor;
1164        if (len) {
1165                clone->bi_sector = ci->sector;
1166                clone->bi_size = to_bytes(len);
1167        }
1168
1169        __map_bio(ti, clone, tio);
1170}
1171
1172static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1173                                    unsigned num_requests, sector_t len)
1174{
1175        unsigned request_nr;
1176
1177        for (request_nr = 0; request_nr < num_requests; request_nr++)
1178                __issue_target_request(ci, ti, request_nr, len);
1179}
1180
1181static int __clone_and_map_empty_flush(struct clone_info *ci)
1182{
1183        unsigned target_nr = 0;
1184        struct dm_target *ti;
1185
1186        BUG_ON(bio_has_data(ci->bio));
1187        while ((ti = dm_table_get_target(ci->map, target_nr++)))
1188                __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
1189
1190        return 0;
1191}
1192
1193/*
1194 * Perform all io with a single clone.
1195 */
1196static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1197{
1198        struct bio *clone, *bio = ci->bio;
1199        struct dm_target_io *tio;
1200
1201        tio = alloc_tio(ci, ti);
1202        clone = clone_bio(bio, ci->sector, ci->idx,
1203                          bio->bi_vcnt - ci->idx, ci->sector_count,
1204                          ci->md->bs);
1205        __map_bio(ti, clone, tio);
1206        ci->sector_count = 0;
1207}
1208
1209static int __clone_and_map_discard(struct clone_info *ci)
1210{
1211        struct dm_target *ti;
1212        sector_t len;
1213
1214        do {
1215                ti = dm_table_find_target(ci->map, ci->sector);
1216                if (!dm_target_is_valid(ti))
1217                        return -EIO;
1218
1219                /*
1220                 * Even though the device advertised discard support,
1221                 * that does not mean every target supports it, and
1222                 * reconfiguration might also have changed that since the
1223                 * check was performed.
1224                 */
1225                if (!ti->num_discard_requests)
1226                        return -EOPNOTSUPP;
1227
1228                if (!ti->split_discard_requests)
1229                        len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1230                else
1231                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
1232
1233                __issue_target_requests(ci, ti, ti->num_discard_requests, len);
1234
1235                ci->sector += len;
1236        } while (ci->sector_count -= len);
1237
1238        return 0;
1239}
1240
1241static int __clone_and_map(struct clone_info *ci)
1242{
1243        struct bio *clone, *bio = ci->bio;
1244        struct dm_target *ti;
1245        sector_t len = 0, max;
1246        struct dm_target_io *tio;
1247
1248        if (unlikely(bio->bi_rw & REQ_DISCARD))
1249                return __clone_and_map_discard(ci);
1250
1251        ti = dm_table_find_target(ci->map, ci->sector);
1252        if (!dm_target_is_valid(ti))
1253                return -EIO;
1254
1255        max = max_io_len(ci->sector, ti);
1256
1257        if (ci->sector_count <= max) {
1258                /*
1259                 * Optimise for the simple case where we can do all of
1260                 * the remaining io with a single clone.
1261                 */
1262                __clone_and_map_simple(ci, ti);
1263
1264        } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1265                /*
1266                 * There are some bvecs that don't span targets.
1267                 * Do as many of these as possible.
1268                 */
1269                int i;
1270                sector_t remaining = max;
1271                sector_t bv_len;
1272
1273                for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1274                        bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1275
1276                        if (bv_len > remaining)
1277                                break;
1278
1279                        remaining -= bv_len;
1280                        len += bv_len;
1281                }
1282
1283                tio = alloc_tio(ci, ti);
1284                clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1285                                  ci->md->bs);
1286                __map_bio(ti, clone, tio);
1287
1288                ci->sector += len;
1289                ci->sector_count -= len;
1290                ci->idx = i;
1291
1292        } else {
1293                /*
1294                 * Handle a bvec that must be split between two or more targets.
1295                 */
1296                struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1297                sector_t remaining = to_sector(bv->bv_len);
1298                unsigned int offset = 0;
1299
1300                do {
1301                        if (offset) {
1302                                ti = dm_table_find_target(ci->map, ci->sector);
1303                                if (!dm_target_is_valid(ti))
1304                                        return -EIO;
1305
1306                                max = max_io_len(ci->sector, ti);
1307                        }
1308
1309                        len = min(remaining, max);
1310
1311                        tio = alloc_tio(ci, ti);
1312                        clone = split_bvec(bio, ci->sector, ci->idx,
1313                                           bv->bv_offset + offset, len,
1314                                           ci->md->bs);
1315
1316                        __map_bio(ti, clone, tio);
1317
1318                        ci->sector += len;
1319                        ci->sector_count -= len;
1320                        offset += to_bytes(len);
1321                } while (remaining -= len);
1322
1323                ci->idx++;
1324        }
1325
1326        return 0;
1327}
1328
1329/*
1330 * Split the bio into several clones and submit it to targets.
1331 */
1332static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1333{
1334        struct clone_info ci;
1335        int error = 0;
1336
1337        ci.map = dm_get_live_table(md);
1338        if (unlikely(!ci.map)) {
1339                bio_io_error(bio);
1340                return;
1341        }
1342
1343        ci.md = md;
1344        ci.io = alloc_io(md);
1345        ci.io->error = 0;
1346        atomic_set(&ci.io->io_count, 1);
1347        ci.io->bio = bio;
1348        ci.io->md = md;
1349        spin_lock_init(&ci.io->endio_lock);
1350        ci.sector = bio->bi_sector;
1351        ci.idx = bio->bi_idx;
1352
1353        start_io_acct(ci.io);
1354        if (bio->bi_rw & REQ_FLUSH) {
1355                ci.bio = &ci.md->flush_bio;
1356                ci.sector_count = 0;
1357                error = __clone_and_map_empty_flush(&ci);
1358                /* dec_pending submits any data associated with flush */
1359        } else {
1360                ci.bio = bio;
1361                ci.sector_count = bio_sectors(bio);
1362                while (ci.sector_count && !error)
1363                        error = __clone_and_map(&ci);
1364        }
1365
1366        /* drop the extra reference count */
1367        dec_pending(ci.io, error);
1368        dm_table_put(ci.map);
1369}
1370/*-----------------------------------------------------------------
1371 * CRUD END
1372 *---------------------------------------------------------------*/
1373
1374static int dm_merge_bvec(struct request_queue *q,
1375                         struct bvec_merge_data *bvm,
1376                         struct bio_vec *biovec)
1377{
1378        struct mapped_device *md = q->queuedata;
1379        struct dm_table *map = dm_get_live_table(md);
1380        struct dm_target *ti;
1381        sector_t max_sectors;
1382        int max_size = 0;
1383
1384        if (unlikely(!map))
1385                goto out;
1386
1387        ti = dm_table_find_target(map, bvm->bi_sector);
1388        if (!dm_target_is_valid(ti))
1389                goto out_table;
1390
1391        /*
1392         * Find maximum amount of I/O that won't need splitting
1393         */
1394        max_sectors = min(max_io_len(bvm->bi_sector, ti),
1395                          (sector_t) BIO_MAX_SECTORS);
1396        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1397        if (max_size < 0)
1398                max_size = 0;
1399
1400        /*
1401         * merge_bvec_fn() returns number of bytes
1402         * it can accept at this offset
1403         * max is precomputed maximal io size
1404         */
1405        if (max_size && ti->type->merge)
1406                max_size = ti->type->merge(ti, bvm, biovec, max_size);
1407        /*
1408         * If the target doesn't support merge method and some of the devices
1409         * provided their merge_bvec method (we know this by looking at
1410         * queue_max_hw_sectors), then we can't allow bios with multiple vector
1411         * entries.  So always set max_size to 0, and the code below allows
1412         * just one page.
1413         */
1414        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1415
1416                max_size = 0;
1417
1418out_table:
1419        dm_table_put(map);
1420
1421out:
1422        /*
1423         * Always allow an entire first page
1424         */
1425        if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1426                max_size = biovec->bv_len;
1427
1428        return max_size;
1429}
1430
1431/*
1432 * The request function that just remaps the bio built up by
1433 * dm_merge_bvec.
1434 */
1435static void _dm_request(struct request_queue *q, struct bio *bio)
1436{
1437        int rw = bio_data_dir(bio);
1438        struct mapped_device *md = q->queuedata;
1439        int cpu;
1440
1441        down_read(&md->io_lock);
1442
1443        cpu = part_stat_lock();
1444        part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1445        part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1446        part_stat_unlock();
1447
1448        /* if we're suspended, we have to queue this io for later */
1449        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1450                up_read(&md->io_lock);
1451
1452                if (bio_rw(bio) != READA)
1453                        queue_io(md, bio);
1454                else
1455                        bio_io_error(bio);
1456                return;
1457        }
1458
1459        __split_and_process_bio(md, bio);
1460        up_read(&md->io_lock);
1461        return;
1462}
1463
1464static int dm_request_based(struct mapped_device *md)
1465{
1466        return blk_queue_stackable(md->queue);
1467}
1468
1469static void dm_request(struct request_queue *q, struct bio *bio)
1470{
1471        struct mapped_device *md = q->queuedata;
1472
1473        if (dm_request_based(md))
1474                blk_queue_bio(q, bio);
1475        else
1476                _dm_request(q, bio);
1477}
1478
1479void dm_dispatch_request(struct request *rq)
1480{
1481        int r;
1482
1483        if (blk_queue_io_stat(rq->q))
1484                rq->cmd_flags |= REQ_IO_STAT;
1485
1486        rq->start_time = jiffies;
1487        r = blk_insert_cloned_request(rq->q, rq);
1488        if (r)
1489                dm_complete_request(rq, r);
1490}
1491EXPORT_SYMBOL_GPL(dm_dispatch_request);
1492
1493static void dm_rq_bio_destructor(struct bio *bio)
1494{
1495        struct dm_rq_clone_bio_info *info = bio->bi_private;
1496        struct mapped_device *md = info->tio->md;
1497
1498        free_bio_info(info);
1499        bio_free(bio, md->bs);
1500}
1501
1502static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1503                                 void *data)
1504{
1505        struct dm_rq_target_io *tio = data;
1506        struct mapped_device *md = tio->md;
1507        struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1508
1509        if (!info)
1510                return -ENOMEM;
1511
1512        info->orig = bio_orig;
1513        info->tio = tio;
1514        bio->bi_end_io = end_clone_bio;
1515        bio->bi_private = info;
1516        bio->bi_destructor = dm_rq_bio_destructor;
1517
1518        return 0;
1519}
1520
1521static int setup_clone(struct request *clone, struct request *rq,
1522                       struct dm_rq_target_io *tio)
1523{
1524        int r;
1525
1526        r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1527                              dm_rq_bio_constructor, tio);
1528        if (r)
1529                return r;
1530
1531        clone->cmd = rq->cmd;
1532        clone->cmd_len = rq->cmd_len;
1533        clone->sense = rq->sense;
1534        clone->buffer = rq->buffer;
1535        clone->end_io = end_clone_request;
1536        clone->end_io_data = tio;
1537
1538        return 0;
1539}
1540
1541static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1542                                gfp_t gfp_mask)
1543{
1544        struct request *clone;
1545        struct dm_rq_target_io *tio;
1546
1547        tio = alloc_rq_tio(md, gfp_mask);
1548        if (!tio)
1549                return NULL;
1550
1551        tio->md = md;
1552        tio->ti = NULL;
1553        tio->orig = rq;
1554        tio->error = 0;
1555        memset(&tio->info, 0, sizeof(tio->info));
1556
1557        clone = &tio->clone;
1558        if (setup_clone(clone, rq, tio)) {
1559                /* -ENOMEM */
1560                free_rq_tio(tio);
1561                return NULL;
1562        }
1563
1564        return clone;
1565}
1566
1567/*
1568 * Called with the queue lock held.
1569 */
1570static int dm_prep_fn(struct request_queue *q, struct request *rq)
1571{
1572        struct mapped_device *md = q->queuedata;
1573        struct request *clone;
1574
1575        if (unlikely(rq->special)) {
1576                DMWARN("Already has something in rq->special.");
1577                return BLKPREP_KILL;
1578        }
1579
1580        clone = clone_rq(rq, md, GFP_ATOMIC);
1581        if (!clone)
1582                return BLKPREP_DEFER;
1583
1584        rq->special = clone;
1585        rq->cmd_flags |= REQ_DONTPREP;
1586
1587        return BLKPREP_OK;
1588}
1589
1590/*
1591 * Returns:
1592 * 0  : the request has been processed (not requeued)
1593 * !0 : the request has been requeued
1594 */
1595static int map_request(struct dm_target *ti, struct request *clone,
1596                       struct mapped_device *md)
1597{
1598        int r, requeued = 0;
1599        struct dm_rq_target_io *tio = clone->end_io_data;
1600
1601        tio->ti = ti;
1602        r = ti->type->map_rq(ti, clone, &tio->info);
1603        switch (r) {
1604        case DM_MAPIO_SUBMITTED:
1605                /* The target has taken the I/O to submit by itself later */
1606                break;
1607        case DM_MAPIO_REMAPPED:
1608                /* The target has remapped the I/O so dispatch it */
1609                trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1610                                     blk_rq_pos(tio->orig));
1611                dm_dispatch_request(clone);
1612                break;
1613        case DM_MAPIO_REQUEUE:
1614                /* The target wants to requeue the I/O */
1615                dm_requeue_unmapped_request(clone);
1616                requeued = 1;
1617                break;
1618        default:
1619                if (r > 0) {
1620                        DMWARN("unimplemented target map return value: %d", r);
1621                        BUG();
1622                }
1623
1624                /* The target wants to complete the I/O */
1625                dm_kill_unmapped_request(clone, r);
1626                break;
1627        }
1628
1629        return requeued;
1630}
1631
1632static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1633{
1634        struct request *clone;
1635
1636        blk_start_request(orig);
1637        clone = orig->special;
1638        atomic_inc(&md->pending[rq_data_dir(clone)]);
1639
1640        /*
1641         * Hold the md reference here for the in-flight I/O.
1642         * We can't rely on the reference count by device opener,
1643         * because the device may be closed during the request completion
1644         * when all bios are completed.
1645         * See the comment in rq_completed() too.
1646         */
1647        dm_get(md);
1648
1649        return clone;
1650}
1651
1652/*
1653 * q->request_fn for request-based dm.
1654 * Called with the queue lock held.
1655 */
1656static void dm_request_fn(struct request_queue *q)
1657{
1658        struct mapped_device *md = q->queuedata;
1659        struct dm_table *map = dm_get_live_table(md);
1660        struct dm_target *ti;
1661        struct request *rq, *clone;
1662        sector_t pos;
1663
1664        /*
1665         * For suspend, check blk_queue_stopped() and increment
1666         * ->pending within a single queue_lock not to increment the
1667         * number of in-flight I/Os after the queue is stopped in
1668         * dm_suspend().
1669         */
1670        while (!blk_queue_stopped(q)) {
1671                rq = blk_peek_request(q);
1672                if (!rq)
1673                        goto delay_and_out;
1674
1675                /* always use block 0 to find the target for flushes for now */
1676                pos = 0;
1677                if (!(rq->cmd_flags & REQ_FLUSH))
1678                        pos = blk_rq_pos(rq);
1679
1680                ti = dm_table_find_target(map, pos);
1681                if (!dm_target_is_valid(ti)) {
1682                        /*
1683                         * Must perform setup, that dm_done() requires,
1684                         * before calling dm_kill_unmapped_request
1685                         */
1686                        DMERR_LIMIT("request attempted access beyond the end of device");
1687                        clone = dm_start_request(md, rq);
1688                        dm_kill_unmapped_request(clone, -EIO);
1689                        continue;
1690                }
1691
1692                if (ti->type->busy && ti->type->busy(ti))
1693                        goto delay_and_out;
1694
1695                clone = dm_start_request(md, rq);
1696
1697                spin_unlock(q->queue_lock);
1698                if (map_request(ti, clone, md))
1699                        goto requeued;
1700
1701                BUG_ON(!irqs_disabled());
1702                spin_lock(q->queue_lock);
1703        }
1704
1705        goto out;
1706
1707requeued:
1708        BUG_ON(!irqs_disabled());
1709        spin_lock(q->queue_lock);
1710
1711delay_and_out:
1712        blk_delay_queue(q, HZ / 10);
1713out:
1714        dm_table_put(map);
1715}
1716
1717int dm_underlying_device_busy(struct request_queue *q)
1718{
1719        return blk_lld_busy(q);
1720}
1721EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1722
1723static int dm_lld_busy(struct request_queue *q)
1724{
1725        int r;
1726        struct mapped_device *md = q->queuedata;
1727        struct dm_table *map = dm_get_live_table(md);
1728
1729        if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1730                r = 1;
1731        else
1732                r = dm_table_any_busy_target(map);
1733
1734        dm_table_put(map);
1735
1736        return r;
1737}
1738
1739static int dm_any_congested(void *congested_data, int bdi_bits)
1740{
1741        int r = bdi_bits;
1742        struct mapped_device *md = congested_data;
1743        struct dm_table *map;
1744
1745        if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1746                map = dm_get_live_table(md);
1747                if (map) {
1748                        /*
1749                         * Request-based dm cares about only own queue for
1750                         * the query about congestion status of request_queue
1751                         */
1752                        if (dm_request_based(md))
1753                                r = md->queue->backing_dev_info.state &
1754                                    bdi_bits;
1755                        else
1756                                r = dm_table_any_congested(map, bdi_bits);
1757
1758                        dm_table_put(map);
1759                }
1760        }
1761
1762        return r;
1763}
1764
1765/*-----------------------------------------------------------------
1766 * An IDR is used to keep track of allocated minor numbers.
1767 *---------------------------------------------------------------*/
1768static void free_minor(int minor)
1769{
1770        spin_lock(&_minor_lock);
1771        idr_remove(&_minor_idr, minor);
1772        spin_unlock(&_minor_lock);
1773}
1774
1775/*
1776 * See if the device with a specific minor # is free.
1777 */
1778static int specific_minor(int minor)
1779{
1780        int r, m;
1781
1782        if (minor >= (1 << MINORBITS))
1783                return -EINVAL;
1784
1785        r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1786        if (!r)
1787                return -ENOMEM;
1788
1789        spin_lock(&_minor_lock);
1790
1791        if (idr_find(&_minor_idr, minor)) {
1792                r = -EBUSY;
1793                goto out;
1794        }
1795
1796        r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1797        if (r)
1798                goto out;
1799
1800        if (m != minor) {
1801                idr_remove(&_minor_idr, m);
1802                r = -EBUSY;
1803                goto out;
1804        }
1805
1806out:
1807        spin_unlock(&_minor_lock);
1808        return r;
1809}
1810
1811static int next_free_minor(int *minor)
1812{
1813        int r, m;
1814
1815        r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1816        if (!r)
1817                return -ENOMEM;
1818
1819        spin_lock(&_minor_lock);
1820
1821        r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1822        if (r)
1823                goto out;
1824
1825        if (m >= (1 << MINORBITS)) {
1826                idr_remove(&_minor_idr, m);
1827                r = -ENOSPC;
1828                goto out;
1829        }
1830
1831        *minor = m;
1832
1833out:
1834        spin_unlock(&_minor_lock);
1835        return r;
1836}
1837
1838static const struct block_device_operations dm_blk_dops;
1839
1840static void dm_wq_work(struct work_struct *work);
1841
1842static void dm_init_md_queue(struct mapped_device *md)
1843{
1844        /*
1845         * Request-based dm devices cannot be stacked on top of bio-based dm
1846         * devices.  The type of this dm device has not been decided yet.
1847         * The type is decided at the first table loading time.
1848         * To prevent problematic device stacking, clear the queue flag
1849         * for request stacking support until then.
1850         *
1851         * This queue is new, so no concurrency on the queue_flags.
1852         */
1853        queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1854
1855        md->queue->queuedata = md;
1856        md->queue->backing_dev_info.congested_fn = dm_any_congested;
1857        md->queue->backing_dev_info.congested_data = md;
1858        blk_queue_make_request(md->queue, dm_request);
1859        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1860        blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1861}
1862
1863/*
1864 * Allocate and initialise a blank device with a given minor.
1865 */
1866static struct mapped_device *alloc_dev(int minor)
1867{
1868        int r;
1869        struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1870        void *old_md;
1871
1872        if (!md) {
1873                DMWARN("unable to allocate device, out of memory.");
1874                return NULL;
1875        }
1876
1877        if (!try_module_get(THIS_MODULE))
1878                goto bad_module_get;
1879
1880        /* get a minor number for the dev */
1881        if (minor == DM_ANY_MINOR)
1882                r = next_free_minor(&minor);
1883        else
1884                r = specific_minor(minor);
1885        if (r < 0)
1886                goto bad_minor;
1887
1888        md->type = DM_TYPE_NONE;
1889        init_rwsem(&md->io_lock);
1890        mutex_init(&md->suspend_lock);
1891        mutex_init(&md->type_lock);
1892        spin_lock_init(&md->deferred_lock);
1893        rwlock_init(&md->map_lock);
1894        atomic_set(&md->holders, 1);
1895        atomic_set(&md->open_count, 0);
1896        atomic_set(&md->event_nr, 0);
1897        atomic_set(&md->uevent_seq, 0);
1898        INIT_LIST_HEAD(&md->uevent_list);
1899        spin_lock_init(&md->uevent_lock);
1900
1901        md->queue = blk_alloc_queue(GFP_KERNEL);
1902        if (!md->queue)
1903                goto bad_queue;
1904
1905        dm_init_md_queue(md);
1906
1907        md->disk = alloc_disk(1);
1908        if (!md->disk)
1909                goto bad_disk;
1910
1911        atomic_set(&md->pending[0], 0);
1912        atomic_set(&md->pending[1], 0);
1913        init_waitqueue_head(&md->wait);
1914        INIT_WORK(&md->work, dm_wq_work);
1915        init_waitqueue_head(&md->eventq);
1916
1917        md->disk->major = _major;
1918        md->disk->first_minor = minor;
1919        md->disk->fops = &dm_blk_dops;
1920        md->disk->queue = md->queue;
1921        md->disk->private_data = md;
1922        sprintf(md->disk->disk_name, "dm-%d", minor);
1923        add_disk(md->disk);
1924        format_dev_t(md->name, MKDEV(_major, minor));
1925
1926        md->wq = alloc_workqueue("kdmflush",
1927                                 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1928        if (!md->wq)
1929                goto bad_thread;
1930
1931        md->bdev = bdget_disk(md->disk, 0);
1932        if (!md->bdev)
1933                goto bad_bdev;
1934
1935        bio_init(&md->flush_bio);
1936        md->flush_bio.bi_bdev = md->bdev;
1937        md->flush_bio.bi_rw = WRITE_FLUSH;
1938
1939        /* Populate the mapping, nobody knows we exist yet */
1940        spin_lock(&_minor_lock);
1941        old_md = idr_replace(&_minor_idr, md, minor);
1942        spin_unlock(&_minor_lock);
1943
1944        BUG_ON(old_md != MINOR_ALLOCED);
1945
1946        return md;
1947
1948bad_bdev:
1949        destroy_workqueue(md->wq);
1950bad_thread:
1951        del_gendisk(md->disk);
1952        put_disk(md->disk);
1953bad_disk:
1954        blk_cleanup_queue(md->queue);
1955bad_queue:
1956        free_minor(minor);
1957bad_minor:
1958        module_put(THIS_MODULE);
1959bad_module_get:
1960        kfree(md);
1961        return NULL;
1962}
1963
1964static void unlock_fs(struct mapped_device *md);
1965
1966static void free_dev(struct mapped_device *md)
1967{
1968        int minor = MINOR(disk_devt(md->disk));
1969
1970        unlock_fs(md);
1971        bdput(md->bdev);
1972        destroy_workqueue(md->wq);
1973        if (md->tio_pool)
1974                mempool_destroy(md->tio_pool);
1975        if (md->io_pool)
1976                mempool_destroy(md->io_pool);
1977        if (md->bs)
1978                bioset_free(md->bs);
1979        blk_integrity_unregister(md->disk);
1980        del_gendisk(md->disk);
1981        free_minor(minor);
1982
1983        spin_lock(&_minor_lock);
1984        md->disk->private_data = NULL;
1985        spin_unlock(&_minor_lock);
1986
1987        put_disk(md->disk);
1988        blk_cleanup_queue(md->queue);
1989        module_put(THIS_MODULE);
1990        kfree(md);
1991}
1992
1993static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1994{
1995        struct dm_md_mempools *p;
1996
1997        if (md->io_pool && md->tio_pool && md->bs)
1998                /* the md already has necessary mempools */
1999                goto out;
2000
2001        p = dm_table_get_md_mempools(t);
2002        BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2003
2004        md->io_pool = p->io_pool;
2005        p->io_pool = NULL;
2006        md->tio_pool = p->tio_pool;
2007        p->tio_pool = NULL;
2008        md->bs = p->bs;
2009        p->bs = NULL;
2010
2011out:
2012        /* mempool bind completed, now no need any mempools in the table */
2013        dm_table_free_md_mempools(t);
2014}
2015
2016/*
2017 * Bind a table to the device.
2018 */
2019static void event_callback(void *context)
2020{
2021        unsigned long flags;
2022        LIST_HEAD(uevents);
2023        struct mapped_device *md = (struct mapped_device *) context;
2024
2025        spin_lock_irqsave(&md->uevent_lock, flags);
2026        list_splice_init(&md->uevent_list, &uevents);
2027        spin_unlock_irqrestore(&md->uevent_lock, flags);
2028
2029        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2030
2031        atomic_inc(&md->event_nr);
2032        wake_up(&md->eventq);
2033}
2034
2035/*
2036 * Protected by md->suspend_lock obtained by dm_swap_table().
2037 */
2038static void __set_size(struct mapped_device *md, sector_t size)
2039{
2040        set_capacity(md->disk, size);
2041
2042        i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2043}
2044
2045/*
2046 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2047 *
2048 * If this function returns 0, then the device is either a non-dm
2049 * device without a merge_bvec_fn, or it is a dm device that is
2050 * able to split any bios it receives that are too big.
2051 */
2052int dm_queue_merge_is_compulsory(struct request_queue *q)
2053{
2054        struct mapped_device *dev_md;
2055
2056        if (!q->merge_bvec_fn)
2057                return 0;
2058
2059        if (q->make_request_fn == dm_request) {
2060                dev_md = q->queuedata;
2061                if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2062                        return 0;
2063        }
2064
2065        return 1;
2066}
2067
2068static int dm_device_merge_is_compulsory(struct dm_target *ti,
2069                                         struct dm_dev *dev, sector_t start,
2070                                         sector_t len, void *data)
2071{
2072        struct block_device *bdev = dev->bdev;
2073        struct request_queue *q = bdev_get_queue(bdev);
2074
2075        return dm_queue_merge_is_compulsory(q);
2076}
2077
2078/*
2079 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2080 * on the properties of the underlying devices.
2081 */
2082static int dm_table_merge_is_optional(struct dm_table *table)
2083{
2084        unsigned i = 0;
2085        struct dm_target *ti;
2086
2087        while (i < dm_table_get_num_targets(table)) {
2088                ti = dm_table_get_target(table, i++);
2089
2090                if (ti->type->iterate_devices &&
2091                    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2092                        return 0;
2093        }
2094
2095        return 1;
2096}
2097
2098/*
2099 * Returns old map, which caller must destroy.
2100 */
2101static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2102                               struct queue_limits *limits)
2103{
2104        struct dm_table *old_map;
2105        struct request_queue *q = md->queue;
2106        sector_t size;
2107        unsigned long flags;
2108        int merge_is_optional;
2109
2110        size = dm_table_get_size(t);
2111
2112        /*
2113         * Wipe any geometry if the size of the table changed.
2114         */
2115        if (size != get_capacity(md->disk))
2116                memset(&md->geometry, 0, sizeof(md->geometry));
2117
2118        __set_size(md, size);
2119
2120        dm_table_event_callback(t, event_callback, md);
2121
2122        /*
2123         * The queue hasn't been stopped yet, if the old table type wasn't
2124         * for request-based during suspension.  So stop it to prevent
2125         * I/O mapping before resume.
2126         * This must be done before setting the queue restrictions,
2127         * because request-based dm may be run just after the setting.
2128         */
2129        if (dm_table_request_based(t) && !blk_queue_stopped(q))
2130                stop_queue(q);
2131
2132        __bind_mempools(md, t);
2133
2134        merge_is_optional = dm_table_merge_is_optional(t);
2135
2136        write_lock_irqsave(&md->map_lock, flags);
2137        old_map = md->map;
2138        md->map = t;
2139        md->immutable_target_type = dm_table_get_immutable_target_type(t);
2140
2141        dm_table_set_restrictions(t, q, limits);
2142        if (merge_is_optional)
2143                set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2144        else
2145                clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2146        write_unlock_irqrestore(&md->map_lock, flags);
2147
2148        return old_map;
2149}
2150
2151/*
2152 * Returns unbound table for the caller to free.
2153 */
2154static struct dm_table *__unbind(struct mapped_device *md)
2155{
2156        struct dm_table *map = md->map;
2157        unsigned long flags;
2158
2159        if (!map)
2160                return NULL;
2161
2162        dm_table_event_callback(map, NULL, NULL);
2163        write_lock_irqsave(&md->map_lock, flags);
2164        md->map = NULL;
2165        write_unlock_irqrestore(&md->map_lock, flags);
2166
2167        return map;
2168}
2169
2170/*
2171 * Constructor for a new device.
2172 */
2173int dm_create(int minor, struct mapped_device **result)
2174{
2175        struct mapped_device *md;
2176
2177        md = alloc_dev(minor);
2178        if (!md)
2179                return -ENXIO;
2180
2181        dm_sysfs_init(md);
2182
2183        *result = md;
2184        return 0;
2185}
2186
2187/*
2188 * Functions to manage md->type.
2189 * All are required to hold md->type_lock.
2190 */
2191void dm_lock_md_type(struct mapped_device *md)
2192{
2193        mutex_lock(&md->type_lock);
2194}
2195
2196void dm_unlock_md_type(struct mapped_device *md)
2197{
2198        mutex_unlock(&md->type_lock);
2199}
2200
2201void dm_set_md_type(struct mapped_device *md, unsigned type)
2202{
2203        md->type = type;
2204}
2205
2206unsigned dm_get_md_type(struct mapped_device *md)
2207{
2208        return md->type;
2209}
2210
2211struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2212{
2213        return md->immutable_target_type;
2214}
2215
2216/*
2217 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2218 */
2219static int dm_init_request_based_queue(struct mapped_device *md)
2220{
2221        struct request_queue *q = NULL;
2222
2223        if (md->queue->elevator)
2224                return 1;
2225
2226        /* Fully initialize the queue */
2227        q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2228        if (!q)
2229                return 0;
2230
2231        md->queue = q;
2232        dm_init_md_queue(md);
2233        blk_queue_softirq_done(md->queue, dm_softirq_done);
2234        blk_queue_prep_rq(md->queue, dm_prep_fn);
2235        blk_queue_lld_busy(md->queue, dm_lld_busy);
2236
2237        elv_register_queue(md->queue);
2238
2239        return 1;
2240}
2241
2242/*
2243 * Setup the DM device's queue based on md's type
2244 */
2245int dm_setup_md_queue(struct mapped_device *md)
2246{
2247        if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2248            !dm_init_request_based_queue(md)) {
2249                DMWARN("Cannot initialize queue for request-based mapped device");
2250                return -EINVAL;
2251        }
2252
2253        return 0;
2254}
2255
2256static struct mapped_device *dm_find_md(dev_t dev)
2257{
2258        struct mapped_device *md;
2259        unsigned minor = MINOR(dev);
2260
2261        if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2262                return NULL;
2263
2264        spin_lock(&_minor_lock);
2265
2266        md = idr_find(&_minor_idr, minor);
2267        if (md && (md == MINOR_ALLOCED ||
2268                   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2269                   dm_deleting_md(md) ||
2270                   test_bit(DMF_FREEING, &md->flags))) {
2271                md = NULL;
2272                goto out;
2273        }
2274
2275out:
2276        spin_unlock(&_minor_lock);
2277
2278        return md;
2279}
2280
2281struct mapped_device *dm_get_md(dev_t dev)
2282{
2283        struct mapped_device *md = dm_find_md(dev);
2284
2285        if (md)
2286                dm_get(md);
2287
2288        return md;
2289}
2290EXPORT_SYMBOL_GPL(dm_get_md);
2291
2292void *dm_get_mdptr(struct mapped_device *md)
2293{
2294        return md->interface_ptr;
2295}
2296
2297void dm_set_mdptr(struct mapped_device *md, void *ptr)
2298{
2299        md->interface_ptr = ptr;
2300}
2301
2302void dm_get(struct mapped_device *md)
2303{
2304        atomic_inc(&md->holders);
2305        BUG_ON(test_bit(DMF_FREEING, &md->flags));
2306}
2307
2308const char *dm_device_name(struct mapped_device *md)
2309{
2310        return md->name;
2311}
2312EXPORT_SYMBOL_GPL(dm_device_name);
2313
2314static void __dm_destroy(struct mapped_device *md, bool wait)
2315{
2316        struct dm_table *map;
2317
2318        might_sleep();
2319
2320        spin_lock(&_minor_lock);
2321        map = dm_get_live_table(md);
2322        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2323        set_bit(DMF_FREEING, &md->flags);
2324        spin_unlock(&_minor_lock);
2325
2326        if (!dm_suspended_md(md)) {
2327                dm_table_presuspend_targets(map);
2328                dm_table_postsuspend_targets(map);
2329        }
2330
2331        /*
2332         * Rare, but there may be I/O requests still going to complete,
2333         * for example.  Wait for all references to disappear.
2334         * No one should increment the reference count of the mapped_device,
2335         * after the mapped_device state becomes DMF_FREEING.
2336         */
2337        if (wait)
2338                while (atomic_read(&md->holders))
2339                        msleep(1);
2340        else if (atomic_read(&md->holders))
2341                DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2342                       dm_device_name(md), atomic_read(&md->holders));
2343
2344        dm_sysfs_exit(md);
2345        dm_table_put(map);
2346        dm_table_destroy(__unbind(md));
2347        free_dev(md);
2348}
2349
2350void dm_destroy(struct mapped_device *md)
2351{
2352        __dm_destroy(md, true);
2353}
2354
2355void dm_destroy_immediate(struct mapped_device *md)
2356{
2357        __dm_destroy(md, false);
2358}
2359
2360void dm_put(struct mapped_device *md)
2361{
2362        atomic_dec(&md->holders);
2363}
2364EXPORT_SYMBOL_GPL(dm_put);
2365
2366static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2367{
2368        int r = 0;
2369        DECLARE_WAITQUEUE(wait, current);
2370
2371        add_wait_queue(&md->wait, &wait);
2372
2373        while (1) {
2374                set_current_state(interruptible);
2375
2376                if (!md_in_flight(md))
2377                        break;
2378
2379                if (interruptible == TASK_INTERRUPTIBLE &&
2380                    signal_pending(current)) {
2381                        r = -EINTR;
2382                        break;
2383                }
2384
2385                io_schedule();
2386        }
2387        set_current_state(TASK_RUNNING);
2388
2389        remove_wait_queue(&md->wait, &wait);
2390
2391        return r;
2392}
2393
2394/*
2395 * Process the deferred bios
2396 */
2397static void dm_wq_work(struct work_struct *work)
2398{
2399        struct mapped_device *md = container_of(work, struct mapped_device,
2400                                                work);
2401        struct bio *c;
2402
2403        down_read(&md->io_lock);
2404
2405        while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2406                spin_lock_irq(&md->deferred_lock);
2407                c = bio_list_pop(&md->deferred);
2408                spin_unlock_irq(&md->deferred_lock);
2409
2410                if (!c)
2411                        break;
2412
2413                up_read(&md->io_lock);
2414
2415                if (dm_request_based(md))
2416                        generic_make_request(c);
2417                else
2418                        __split_and_process_bio(md, c);
2419
2420                down_read(&md->io_lock);
2421        }
2422
2423        up_read(&md->io_lock);
2424}
2425
2426static void dm_queue_flush(struct mapped_device *md)
2427{
2428        clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2429        smp_mb__after_clear_bit();
2430        queue_work(md->wq, &md->work);
2431}
2432
2433/*
2434 * Swap in a new table, returning the old one for the caller to destroy.
2435 */
2436struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2437{
2438        struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
2439        struct queue_limits limits;
2440        int r;
2441
2442        mutex_lock(&md->suspend_lock);
2443
2444        /* device must be suspended */
2445        if (!dm_suspended_md(md))
2446                goto out;
2447
2448        /*
2449         * If the new table has no data devices, retain the existing limits.
2450         * This helps multipath with queue_if_no_path if all paths disappear,
2451         * then new I/O is queued based on these limits, and then some paths
2452         * reappear.
2453         */
2454        if (dm_table_has_no_data_devices(table)) {
2455                live_map = dm_get_live_table(md);
2456                if (live_map)
2457                        limits = md->queue->limits;
2458                dm_table_put(live_map);
2459        }
2460
2461        r = dm_calculate_queue_limits(table, &limits);
2462        if (r) {
2463                map = ERR_PTR(r);
2464                goto out;
2465        }
2466
2467        map = __bind(md, table, &limits);
2468
2469out:
2470        mutex_unlock(&md->suspend_lock);
2471        return map;
2472}
2473
2474/*
2475 * Functions to lock and unlock any filesystem running on the
2476 * device.
2477 */
2478static int lock_fs(struct mapped_device *md)
2479{
2480        int r;
2481
2482        WARN_ON(md->frozen_sb);
2483
2484        md->frozen_sb = freeze_bdev(md->bdev);
2485        if (IS_ERR(md->frozen_sb)) {
2486                r = PTR_ERR(md->frozen_sb);
2487                md->frozen_sb = NULL;
2488                return r;
2489        }
2490
2491        set_bit(DMF_FROZEN, &md->flags);
2492
2493        return 0;
2494}
2495
2496static void unlock_fs(struct mapped_device *md)
2497{
2498        if (!test_bit(DMF_FROZEN, &md->flags))
2499                return;
2500
2501        thaw_bdev(md->bdev, md->frozen_sb);
2502        md->frozen_sb = NULL;
2503        clear_bit(DMF_FROZEN, &md->flags);
2504}
2505
2506/*
2507 * We need to be able to change a mapping table under a mounted
2508 * filesystem.  For example we might want to move some data in
2509 * the background.  Before the table can be swapped with
2510 * dm_bind_table, dm_suspend must be called to flush any in
2511 * flight bios and ensure that any further io gets deferred.
2512 */
2513/*
2514 * Suspend mechanism in request-based dm.
2515 *
2516 * 1. Flush all I/Os by lock_fs() if needed.
2517 * 2. Stop dispatching any I/O by stopping the request_queue.
2518 * 3. Wait for all in-flight I/Os to be completed or requeued.
2519 *
2520 * To abort suspend, start the request_queue.
2521 */
2522int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2523{
2524        struct dm_table *map = NULL;
2525        int r = 0;
2526        int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2527        int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2528
2529        mutex_lock(&md->suspend_lock);
2530
2531        if (dm_suspended_md(md)) {
2532                r = -EINVAL;
2533                goto out_unlock;
2534        }
2535
2536        map = dm_get_live_table(md);
2537
2538        /*
2539         * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2540         * This flag is cleared before dm_suspend returns.
2541         */
2542        if (noflush)
2543                set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2544
2545        /* This does not get reverted if there's an error later. */
2546        dm_table_presuspend_targets(map);
2547
2548        /*
2549         * Flush I/O to the device.
2550         * Any I/O submitted after lock_fs() may not be flushed.
2551         * noflush takes precedence over do_lockfs.
2552         * (lock_fs() flushes I/Os and waits for them to complete.)
2553         */
2554        if (!noflush && do_lockfs) {
2555                r = lock_fs(md);
2556                if (r)
2557                        goto out;
2558        }
2559
2560        /*
2561         * Here we must make sure that no processes are submitting requests
2562         * to target drivers i.e. no one may be executing
2563         * __split_and_process_bio. This is called from dm_request and
2564         * dm_wq_work.
2565         *
2566         * To get all processes out of __split_and_process_bio in dm_request,
2567         * we take the write lock. To prevent any process from reentering
2568         * __split_and_process_bio from dm_request and quiesce the thread
2569         * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2570         * flush_workqueue(md->wq).
2571         */
2572        down_write(&md->io_lock);
2573        set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2574        up_write(&md->io_lock);
2575
2576        /*
2577         * Stop md->queue before flushing md->wq in case request-based
2578         * dm defers requests to md->wq from md->queue.
2579         */
2580        if (dm_request_based(md))
2581                stop_queue(md->queue);
2582
2583        flush_workqueue(md->wq);
2584
2585        /*
2586         * At this point no more requests are entering target request routines.
2587         * We call dm_wait_for_completion to wait for all existing requests
2588         * to finish.
2589         */
2590        r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2591
2592        down_write(&md->io_lock);
2593        if (noflush)
2594                clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2595        up_write(&md->io_lock);
2596
2597        /* were we interrupted ? */
2598        if (r < 0) {
2599                dm_queue_flush(md);
2600
2601                if (dm_request_based(md))
2602                        start_queue(md->queue);
2603
2604                unlock_fs(md);
2605                goto out; /* pushback list is already flushed, so skip flush */
2606        }
2607
2608        /*
2609         * If dm_wait_for_completion returned 0, the device is completely
2610         * quiescent now. There is no request-processing activity. All new
2611         * requests are being added to md->deferred list.
2612         */
2613
2614        set_bit(DMF_SUSPENDED, &md->flags);
2615
2616        dm_table_postsuspend_targets(map);
2617
2618out:
2619        dm_table_put(map);
2620
2621out_unlock:
2622        mutex_unlock(&md->suspend_lock);
2623        return r;
2624}
2625
2626int dm_resume(struct mapped_device *md)
2627{
2628        int r = -EINVAL;
2629        struct dm_table *map = NULL;
2630
2631        mutex_lock(&md->suspend_lock);
2632        if (!dm_suspended_md(md))
2633                goto out;
2634
2635        map = dm_get_live_table(md);
2636        if (!map || !dm_table_get_size(map))
2637                goto out;
2638
2639        r = dm_table_resume_targets(map);
2640        if (r)
2641                goto out;
2642
2643        dm_queue_flush(md);
2644
2645        /*
2646         * Flushing deferred I/Os must be done after targets are resumed
2647         * so that mapping of targets can work correctly.
2648         * Request-based dm is queueing the deferred I/Os in its request_queue.
2649         */
2650        if (dm_request_based(md))
2651                start_queue(md->queue);
2652
2653        unlock_fs(md);
2654
2655        clear_bit(DMF_SUSPENDED, &md->flags);
2656
2657        r = 0;
2658out:
2659        dm_table_put(map);
2660        mutex_unlock(&md->suspend_lock);
2661
2662        return r;
2663}
2664
2665/*-----------------------------------------------------------------
2666 * Event notification.
2667 *---------------------------------------------------------------*/
2668int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2669                       unsigned cookie)
2670{
2671        char udev_cookie[DM_COOKIE_LENGTH];
2672        char *envp[] = { udev_cookie, NULL };
2673
2674        if (!cookie)
2675                return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2676        else {
2677                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2678                         DM_COOKIE_ENV_VAR_NAME, cookie);
2679                return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2680                                          action, envp);
2681        }
2682}
2683
2684uint32_t dm_next_uevent_seq(struct mapped_device *md)
2685{
2686        return atomic_add_return(1, &md->uevent_seq);
2687}
2688
2689uint32_t dm_get_event_nr(struct mapped_device *md)
2690{
2691        return atomic_read(&md->event_nr);
2692}
2693
2694int dm_wait_event(struct mapped_device *md, int event_nr)
2695{
2696        return wait_event_interruptible(md->eventq,
2697                        (event_nr != atomic_read(&md->event_nr)));
2698}
2699
2700void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2701{
2702        unsigned long flags;
2703
2704        spin_lock_irqsave(&md->uevent_lock, flags);
2705        list_add(elist, &md->uevent_list);
2706        spin_unlock_irqrestore(&md->uevent_lock, flags);
2707}
2708
2709/*
2710 * The gendisk is only valid as long as you have a reference
2711 * count on 'md'.
2712 */
2713struct gendisk *dm_disk(struct mapped_device *md)
2714{
2715        return md->disk;
2716}
2717
2718struct kobject *dm_kobject(struct mapped_device *md)
2719{
2720        return &md->kobj;
2721}
2722
2723/*
2724 * struct mapped_device should not be exported outside of dm.c
2725 * so use this check to verify that kobj is part of md structure
2726 */
2727struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2728{
2729        struct mapped_device *md;
2730
2731        md = container_of(kobj, struct mapped_device, kobj);
2732        if (&md->kobj != kobj)
2733                return NULL;
2734
2735        if (test_bit(DMF_FREEING, &md->flags) ||
2736            dm_deleting_md(md))
2737                return NULL;
2738
2739        dm_get(md);
2740        return md;
2741}
2742
2743int dm_suspended_md(struct mapped_device *md)
2744{
2745        return test_bit(DMF_SUSPENDED, &md->flags);
2746}
2747
2748int dm_suspended(struct dm_target *ti)
2749{
2750        return dm_suspended_md(dm_table_get_md(ti->table));
2751}
2752EXPORT_SYMBOL_GPL(dm_suspended);
2753
2754int dm_noflush_suspending(struct dm_target *ti)
2755{
2756        return __noflush_suspending(dm_table_get_md(ti->table));
2757}
2758EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2759
2760struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2761{
2762        struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2763        unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
2764
2765        if (!pools)
2766                return NULL;
2767
2768        pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2769                         mempool_create_slab_pool(MIN_IOS, _io_cache) :
2770                         mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2771        if (!pools->io_pool)
2772                goto free_pools_and_out;
2773
2774        pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2775                          mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2776                          mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2777        if (!pools->tio_pool)
2778                goto free_io_pool_and_out;
2779
2780        pools->bs = bioset_create(pool_size, 0);
2781        if (!pools->bs)
2782                goto free_tio_pool_and_out;
2783
2784        if (integrity && bioset_integrity_create(pools->bs, pool_size))
2785                goto free_bioset_and_out;
2786
2787        return pools;
2788
2789free_bioset_and_out:
2790        bioset_free(pools->bs);
2791
2792free_tio_pool_and_out:
2793        mempool_destroy(pools->tio_pool);
2794
2795free_io_pool_and_out:
2796        mempool_destroy(pools->io_pool);
2797
2798free_pools_and_out:
2799        kfree(pools);
2800
2801        return NULL;
2802}
2803
2804void dm_free_md_mempools(struct dm_md_mempools *pools)
2805{
2806        if (!pools)
2807                return;
2808
2809        if (pools->io_pool)
2810                mempool_destroy(pools->io_pool);
2811
2812        if (pools->tio_pool)
2813                mempool_destroy(pools->tio_pool);
2814
2815        if (pools->bs)
2816                bioset_free(pools->bs);
2817
2818        kfree(pools);
2819}
2820
2821static const struct block_device_operations dm_blk_dops = {
2822        .open = dm_blk_open,
2823        .release = dm_blk_close,
2824        .ioctl = dm_blk_ioctl,
2825        .getgeo = dm_blk_getgeo,
2826        .owner = THIS_MODULE
2827};
2828
2829EXPORT_SYMBOL(dm_get_mapinfo);
2830
2831/*
2832 * module hooks
2833 */
2834module_init(dm_init);
2835module_exit(dm_exit);
2836
2837module_param(major, uint, 0);
2838MODULE_PARM_DESC(major, "The major number of the device mapper");
2839MODULE_DESCRIPTION(DM_NAME " driver");
2840MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2841MODULE_LICENSE("GPL");
2842
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.