linux/drivers/md/dm.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
   9#include "dm-uevent.h"
  10
  11#include <linux/init.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/moduleparam.h>
  15#include <linux/blkpg.h>
  16#include <linux/bio.h>
  17#include <linux/mempool.h>
  18#include <linux/slab.h>
  19#include <linux/idr.h>
  20#include <linux/hdreg.h>
  21#include <linux/delay.h>
  22
  23#include <trace/events/block.h>
  24
  25#define DM_MSG_PREFIX "core"
  26
  27#ifdef CONFIG_PRINTK
  28/*
  29 * ratelimit state to be used in DMXXX_LIMIT().
  30 */
  31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
  32                       DEFAULT_RATELIMIT_INTERVAL,
  33                       DEFAULT_RATELIMIT_BURST);
  34EXPORT_SYMBOL(dm_ratelimit_state);
  35#endif
  36
  37/*
  38 * Cookies are numeric values sent with CHANGE and REMOVE
  39 * uevents while resuming, removing or renaming the device.
  40 */
  41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
  42#define DM_COOKIE_LENGTH 24
  43
  44static const char *_name = DM_NAME;
  45
  46static unsigned int major = 0;
  47static unsigned int _major = 0;
  48
  49static DEFINE_IDR(_minor_idr);
  50
  51static DEFINE_SPINLOCK(_minor_lock);
  52/*
  53 * For bio-based dm.
  54 * One of these is allocated per bio.
  55 */
  56struct dm_io {
  57        struct mapped_device *md;
  58        int error;
  59        atomic_t io_count;
  60        struct bio *bio;
  61        unsigned long start_time;
  62        spinlock_t endio_lock;
  63};
  64
  65/*
  66 * For bio-based dm.
  67 * One of these is allocated per target within a bio.  Hopefully
  68 * this will be simplified out one day.
  69 */
  70struct dm_target_io {
  71        struct dm_io *io;
  72        struct dm_target *ti;
  73        union map_info info;
  74};
  75
  76/*
  77 * For request-based dm.
  78 * One of these is allocated per request.
  79 */
  80struct dm_rq_target_io {
  81        struct mapped_device *md;
  82        struct dm_target *ti;
  83        struct request *orig, clone;
  84        int error;
  85        union map_info info;
  86};
  87
  88/*
  89 * For request-based dm.
  90 * One of these is allocated per bio.
  91 */
  92struct dm_rq_clone_bio_info {
  93        struct bio *orig;
  94        struct dm_rq_target_io *tio;
  95};
  96
  97union map_info *dm_get_mapinfo(struct bio *bio)
  98{
  99        if (bio && bio->bi_private)
 100                return &((struct dm_target_io *)bio->bi_private)->info;
 101        return NULL;
 102}
 103
 104union map_info *dm_get_rq_mapinfo(struct request *rq)
 105{
 106        if (rq && rq->end_io_data)
 107                return &((struct dm_rq_target_io *)rq->end_io_data)->info;
 108        return NULL;
 109}
 110EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 111
 112#define MINOR_ALLOCED ((void *)-1)
 113
 114/*
 115 * Bits for the md->flags field.
 116 */
 117#define DMF_BLOCK_IO_FOR_SUSPEND 0
 118#define DMF_SUSPENDED 1
 119#define DMF_FROZEN 2
 120#define DMF_FREEING 3
 121#define DMF_DELETING 4
 122#define DMF_NOFLUSH_SUSPENDING 5
 123#define DMF_MERGE_IS_OPTIONAL 6
 124
 125/*
 126 * Work processed by per-device workqueue.
 127 */
 128struct mapped_device {
 129        struct rw_semaphore io_lock;
 130        struct mutex suspend_lock;
 131        rwlock_t map_lock;
 132        atomic_t holders;
 133        atomic_t open_count;
 134
 135        unsigned long flags;
 136
 137        struct request_queue *queue;
 138        unsigned type;
 139        /* Protect queue and type against concurrent access. */
 140        struct mutex type_lock;
 141
 142        struct target_type *immutable_target_type;
 143
 144        struct gendisk *disk;
 145        char name[16];
 146
 147        void *interface_ptr;
 148
 149        /*
 150         * A list of ios that arrived while we were suspended.
 151         */
 152        atomic_t pending[2];
 153        wait_queue_head_t wait;
 154        struct work_struct work;
 155        struct bio_list deferred;
 156        spinlock_t deferred_lock;
 157
 158        /*
 159         * Processing queue (flush)
 160         */
 161        struct workqueue_struct *wq;
 162
 163        /*
 164         * The current mapping.
 165         */
 166        struct dm_table *map;
 167
 168        /*
 169         * io objects are allocated from here.
 170         */
 171        mempool_t *io_pool;
 172        mempool_t *tio_pool;
 173
 174        struct bio_set *bs;
 175
 176        /*
 177         * Event handling.
 178         */
 179        atomic_t event_nr;
 180        wait_queue_head_t eventq;
 181        atomic_t uevent_seq;
 182        struct list_head uevent_list;
 183        spinlock_t uevent_lock; /* Protect access to uevent_list */
 184
 185        /*
 186         * freeze/thaw support require holding onto a super block
 187         */
 188        struct super_block *frozen_sb;
 189        struct block_device *bdev;
 190
 191        /* forced geometry settings */
 192        struct hd_geometry geometry;
 193
 194        /* sysfs handle */
 195        struct kobject kobj;
 196
 197        /* zero-length flush that will be cloned and submitted to targets */
 198        struct bio flush_bio;
 199};
 200
 201/*
 202 * For mempools pre-allocation at the table loading time.
 203 */
 204struct dm_md_mempools {
 205        mempool_t *io_pool;
 206        mempool_t *tio_pool;
 207        struct bio_set *bs;
 208};
 209
 210#define MIN_IOS 256
 211static struct kmem_cache *_io_cache;
 212static struct kmem_cache *_tio_cache;
 213static struct kmem_cache *_rq_tio_cache;
 214static struct kmem_cache *_rq_bio_info_cache;
 215
 216static int __init local_init(void)
 217{
 218        int r = -ENOMEM;
 219
 220        /* allocate a slab for the dm_ios */
 221        _io_cache = KMEM_CACHE(dm_io, 0);
 222        if (!_io_cache)
 223                return r;
 224
 225        /* allocate a slab for the target ios */
 226        _tio_cache = KMEM_CACHE(dm_target_io, 0);
 227        if (!_tio_cache)
 228                goto out_free_io_cache;
 229
 230        _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
 231        if (!_rq_tio_cache)
 232                goto out_free_tio_cache;
 233
 234        _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
 235        if (!_rq_bio_info_cache)
 236                goto out_free_rq_tio_cache;
 237
 238        r = dm_uevent_init();
 239        if (r)
 240                goto out_free_rq_bio_info_cache;
 241
 242        _major = major;
 243        r = register_blkdev(_major, _name);
 244        if (r < 0)
 245                goto out_uevent_exit;
 246
 247        if (!_major)
 248                _major = r;
 249
 250        return 0;
 251
 252out_uevent_exit:
 253        dm_uevent_exit();
 254out_free_rq_bio_info_cache:
 255        kmem_cache_destroy(_rq_bio_info_cache);
 256out_free_rq_tio_cache:
 257        kmem_cache_destroy(_rq_tio_cache);
 258out_free_tio_cache:
 259        kmem_cache_destroy(_tio_cache);
 260out_free_io_cache:
 261        kmem_cache_destroy(_io_cache);
 262
 263        return r;
 264}
 265
 266static void local_exit(void)
 267{
 268        kmem_cache_destroy(_rq_bio_info_cache);
 269        kmem_cache_destroy(_rq_tio_cache);
 270        kmem_cache_destroy(_tio_cache);
 271        kmem_cache_destroy(_io_cache);
 272        unregister_blkdev(_major, _name);
 273        dm_uevent_exit();
 274
 275        _major = 0;
 276
 277        DMINFO("cleaned up");
 278}
 279
 280static int (*_inits[])(void) __initdata = {
 281        local_init,
 282        dm_target_init,
 283        dm_linear_init,
 284        dm_stripe_init,
 285        dm_io_init,
 286        dm_kcopyd_init,
 287        dm_interface_init,
 288};
 289
 290static void (*_exits[])(void) = {
 291        local_exit,
 292        dm_target_exit,
 293        dm_linear_exit,
 294        dm_stripe_exit,
 295        dm_io_exit,
 296        dm_kcopyd_exit,
 297        dm_interface_exit,
 298};
 299
 300static int __init dm_init(void)
 301{
 302        const int count = ARRAY_SIZE(_inits);
 303
 304        int r, i;
 305
 306        for (i = 0; i < count; i++) {
 307                r = _inits[i]();
 308                if (r)
 309                        goto bad;
 310        }
 311
 312        return 0;
 313
 314      bad:
 315        while (i--)
 316                _exits[i]();
 317
 318        return r;
 319}
 320
 321static void __exit dm_exit(void)
 322{
 323        int i = ARRAY_SIZE(_exits);
 324
 325        while (i--)
 326                _exits[i]();
 327
 328        /*
 329         * Should be empty by this point.
 330         */
 331        idr_remove_all(&_minor_idr);
 332        idr_destroy(&_minor_idr);
 333}
 334
 335/*
 336 * Block device functions
 337 */
 338int dm_deleting_md(struct mapped_device *md)
 339{
 340        return test_bit(DMF_DELETING, &md->flags);
 341}
 342
 343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
 344{
 345        struct mapped_device *md;
 346
 347        spin_lock(&_minor_lock);
 348
 349        md = bdev->bd_disk->private_data;
 350        if (!md)
 351                goto out;
 352
 353        if (test_bit(DMF_FREEING, &md->flags) ||
 354            dm_deleting_md(md)) {
 355                md = NULL;
 356                goto out;
 357        }
 358
 359        dm_get(md);
 360        atomic_inc(&md->open_count);
 361
 362out:
 363        spin_unlock(&_minor_lock);
 364
 365        return md ? 0 : -ENXIO;
 366}
 367
 368static int dm_blk_close(struct gendisk *disk, fmode_t mode)
 369{
 370        struct mapped_device *md = disk->private_data;
 371
 372        spin_lock(&_minor_lock);
 373
 374        atomic_dec(&md->open_count);
 375        dm_put(md);
 376
 377        spin_unlock(&_minor_lock);
 378
 379        return 0;
 380}
 381
 382int dm_open_count(struct mapped_device *md)
 383{
 384        return atomic_read(&md->open_count);
 385}
 386
 387/*
 388 * Guarantees nothing is using the device before it's deleted.
 389 */
 390int dm_lock_for_deletion(struct mapped_device *md)
 391{
 392        int r = 0;
 393
 394        spin_lock(&_minor_lock);
 395
 396        if (dm_open_count(md))
 397                r = -EBUSY;
 398        else
 399                set_bit(DMF_DELETING, &md->flags);
 400
 401        spin_unlock(&_minor_lock);
 402
 403        return r;
 404}
 405
 406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 407{
 408        struct mapped_device *md = bdev->bd_disk->private_data;
 409
 410        return dm_get_geometry(md, geo);
 411}
 412
 413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
 414                        unsigned int cmd, unsigned long arg)
 415{
 416        struct mapped_device *md = bdev->bd_disk->private_data;
 417        struct dm_table *map = dm_get_live_table(md);
 418        struct dm_target *tgt;
 419        int r = -ENOTTY;
 420
 421        if (!map || !dm_table_get_size(map))
 422                goto out;
 423
 424        /* We only support devices that have a single target */
 425        if (dm_table_get_num_targets(map) != 1)
 426                goto out;
 427
 428        tgt = dm_table_get_target(map, 0);
 429
 430        if (dm_suspended_md(md)) {
 431                r = -EAGAIN;
 432                goto out;
 433        }
 434
 435        if (tgt->type->ioctl)
 436                r = tgt->type->ioctl(tgt, cmd, arg);
 437
 438out:
 439        dm_table_put(map);
 440
 441        return r;
 442}
 443
 444static struct dm_io *alloc_io(struct mapped_device *md)
 445{
 446        return mempool_alloc(md->io_pool, GFP_NOIO);
 447}
 448
 449static void free_io(struct mapped_device *md, struct dm_io *io)
 450{
 451        mempool_free(io, md->io_pool);
 452}
 453
 454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
 455{
 456        mempool_free(tio, md->tio_pool);
 457}
 458
 459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
 460                                            gfp_t gfp_mask)
 461{
 462        return mempool_alloc(md->tio_pool, gfp_mask);
 463}
 464
 465static void free_rq_tio(struct dm_rq_target_io *tio)
 466{
 467        mempool_free(tio, tio->md->tio_pool);
 468}
 469
 470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
 471{
 472        return mempool_alloc(md->io_pool, GFP_ATOMIC);
 473}
 474
 475static void free_bio_info(struct dm_rq_clone_bio_info *info)
 476{
 477        mempool_free(info, info->tio->md->io_pool);
 478}
 479
 480static int md_in_flight(struct mapped_device *md)
 481{
 482        return atomic_read(&md->pending[READ]) +
 483               atomic_read(&md->pending[WRITE]);
 484}
 485
 486static void start_io_acct(struct dm_io *io)
 487{
 488        struct mapped_device *md = io->md;
 489        int cpu;
 490        int rw = bio_data_dir(io->bio);
 491
 492        io->start_time = jiffies;
 493
 494        cpu = part_stat_lock();
 495        part_round_stats(cpu, &dm_disk(md)->part0);
 496        part_stat_unlock();
 497        atomic_set(&dm_disk(md)->part0.in_flight[rw],
 498                atomic_inc_return(&md->pending[rw]));
 499}
 500
 501static void end_io_acct(struct dm_io *io)
 502{
 503        struct mapped_device *md = io->md;
 504        struct bio *bio = io->bio;
 505        unsigned long duration = jiffies - io->start_time;
 506        int pending, cpu;
 507        int rw = bio_data_dir(bio);
 508
 509        cpu = part_stat_lock();
 510        part_round_stats(cpu, &dm_disk(md)->part0);
 511        part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
 512        part_stat_unlock();
 513
 514        /*
 515         * After this is decremented the bio must not be touched if it is
 516         * a flush.
 517         */
 518        pending = atomic_dec_return(&md->pending[rw]);
 519        atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
 520        pending += atomic_read(&md->pending[rw^0x1]);
 521
 522        /* nudge anyone waiting on suspend queue */
 523        if (!pending)
 524                wake_up(&md->wait);
 525}
 526
 527/*
 528 * Add the bio to the list of deferred io.
 529 */
 530static void queue_io(struct mapped_device *md, struct bio *bio)
 531{
 532        unsigned long flags;
 533
 534        spin_lock_irqsave(&md->deferred_lock, flags);
 535        bio_list_add(&md->deferred, bio);
 536        spin_unlock_irqrestore(&md->deferred_lock, flags);
 537        queue_work(md->wq, &md->work);
 538}
 539
 540/*
 541 * Everyone (including functions in this file), should use this
 542 * function to access the md->map field, and make sure they call
 543 * dm_table_put() when finished.
 544 */
 545struct dm_table *dm_get_live_table(struct mapped_device *md)
 546{
 547        struct dm_table *t;
 548        unsigned long flags;
 549
 550        read_lock_irqsave(&md->map_lock, flags);
 551        t = md->map;
 552        if (t)
 553                dm_table_get(t);
 554        read_unlock_irqrestore(&md->map_lock, flags);
 555
 556        return t;
 557}
 558
 559/*
 560 * Get the geometry associated with a dm device
 561 */
 562int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
 563{
 564        *geo = md->geometry;
 565
 566        return 0;
 567}
 568
 569/*
 570 * Set the geometry of a device.
 571 */
 572int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
 573{
 574        sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
 575
 576        if (geo->start > sz) {
 577                DMWARN("Start sector is beyond the geometry limits.");
 578                return -EINVAL;
 579        }
 580
 581        md->geometry = *geo;
 582
 583        return 0;
 584}
 585
 586/*-----------------------------------------------------------------
 587 * CRUD START:
 588 *   A more elegant soln is in the works that uses the queue
 589 *   merge fn, unfortunately there are a couple of changes to
 590 *   the block layer that I want to make for this.  So in the
 591 *   interests of getting something for people to use I give
 592 *   you this clearly demarcated crap.
 593 *---------------------------------------------------------------*/
 594
 595static int __noflush_suspending(struct mapped_device *md)
 596{
 597        return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
 598}
 599
 600/*
 601 * Decrements the number of outstanding ios that a bio has been
 602 * cloned into, completing the original io if necc.
 603 */
 604static void dec_pending(struct dm_io *io, int error)
 605{
 606        unsigned long flags;
 607        int io_error;
 608        struct bio *bio;
 609        struct mapped_device *md = io->md;
 610
 611        /* Push-back supersedes any I/O errors */
 612        if (unlikely(error)) {
 613                spin_lock_irqsave(&io->endio_lock, flags);
 614                if (!(io->error > 0 && __noflush_suspending(md)))
 615                        io->error = error;
 616                spin_unlock_irqrestore(&io->endio_lock, flags);
 617        }
 618
 619        if (atomic_dec_and_test(&io->io_count)) {
 620                if (io->error == DM_ENDIO_REQUEUE) {
 621                        /*
 622                         * Target requested pushing back the I/O.
 623                         */
 624                        spin_lock_irqsave(&md->deferred_lock, flags);
 625                        if (__noflush_suspending(md))
 626                                bio_list_add_head(&md->deferred, io->bio);
 627                        else
 628                                /* noflush suspend was interrupted. */
 629                                io->error = -EIO;
 630                        spin_unlock_irqrestore(&md->deferred_lock, flags);
 631                }
 632
 633                io_error = io->error;
 634                bio = io->bio;
 635                end_io_acct(io);
 636                free_io(md, io);
 637
 638                if (io_error == DM_ENDIO_REQUEUE)
 639                        return;
 640
 641                if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) {
 642                        /*
 643                         * Preflush done for flush with data, reissue
 644                         * without REQ_FLUSH.
 645                         */
 646                        bio->bi_rw &= ~REQ_FLUSH;
 647                        queue_io(md, bio);
 648                } else {
 649                        /* done with normal IO or empty flush */
 650                        trace_block_bio_complete(md->queue, bio, io_error);
 651                        bio_endio(bio, io_error);
 652                }
 653        }
 654}
 655
 656static void clone_endio(struct bio *bio, int error)
 657{
 658        int r = 0;
 659        struct dm_target_io *tio = bio->bi_private;
 660        struct dm_io *io = tio->io;
 661        struct mapped_device *md = tio->io->md;
 662        dm_endio_fn endio = tio->ti->type->end_io;
 663
 664        if (!bio_flagged(bio, BIO_UPTODATE) && !error)
 665                error = -EIO;
 666
 667        if (endio) {
 668                r = endio(tio->ti, bio, error, &tio->info);
 669                if (r < 0 || r == DM_ENDIO_REQUEUE)
 670                        /*
 671                         * error and requeue request are handled
 672                         * in dec_pending().
 673                         */
 674                        error = r;
 675                else if (r == DM_ENDIO_INCOMPLETE)
 676                        /* The target will handle the io */
 677                        return;
 678                else if (r) {
 679                        DMWARN("unimplemented target endio return value: %d", r);
 680                        BUG();
 681                }
 682        }
 683
 684        /*
 685         * Store md for cleanup instead of tio which is about to get freed.
 686         */
 687        bio->bi_private = md->bs;
 688
 689        free_tio(md, tio);
 690        bio_put(bio);
 691        dec_pending(io, error);
 692}
 693
 694/*
 695 * Partial completion handling for request-based dm
 696 */
 697static void end_clone_bio(struct bio *clone, int error)
 698{
 699        struct dm_rq_clone_bio_info *info = clone->bi_private;
 700        struct dm_rq_target_io *tio = info->tio;
 701        struct bio *bio = info->orig;
 702        unsigned int nr_bytes = info->orig->bi_size;
 703
 704        bio_put(clone);
 705
 706        if (tio->error)
 707                /*
 708                 * An error has already been detected on the request.
 709                 * Once error occurred, just let clone->end_io() handle
 710                 * the remainder.
 711                 */
 712                return;
 713        else if (error) {
 714                /*
 715                 * Don't notice the error to the upper layer yet.
 716                 * The error handling decision is made by the target driver,
 717                 * when the request is completed.
 718                 */
 719                tio->error = error;
 720                return;
 721        }
 722
 723        /*
 724         * I/O for the bio successfully completed.
 725         * Notice the data completion to the upper layer.
 726         */
 727
 728        /*
 729         * bios are processed from the head of the list.
 730         * So the completing bio should always be rq->bio.
 731         * If it's not, something wrong is happening.
 732         */
 733        if (tio->orig->bio != bio)
 734                DMERR("bio completion is going in the middle of the request");
 735
 736        /*
 737         * Update the original request.
 738         * Do not use blk_end_request() here, because it may complete
 739         * the original request before the clone, and break the ordering.
 740         */
 741        blk_update_request(tio->orig, 0, nr_bytes);
 742}
 743
 744/*
 745 * Don't touch any member of the md after calling this function because
 746 * the md may be freed in dm_put() at the end of this function.
 747 * Or do dm_get() before calling this function and dm_put() later.
 748 */
 749static void rq_completed(struct mapped_device *md, int rw, int run_queue)
 750{
 751        atomic_dec(&md->pending[rw]);
 752
 753        /* nudge anyone waiting on suspend queue */
 754        if (!md_in_flight(md))
 755                wake_up(&md->wait);
 756
 757        if (run_queue)
 758                blk_run_queue(md->queue);
 759
 760        /*
 761         * dm_put() must be at the end of this function. See the comment above
 762         */
 763        dm_put(md);
 764}
 765
 766static void free_rq_clone(struct request *clone)
 767{
 768        struct dm_rq_target_io *tio = clone->end_io_data;
 769
 770        blk_rq_unprep_clone(clone);
 771        free_rq_tio(tio);
 772}
 773
 774/*
 775 * Complete the clone and the original request.
 776 * Must be called without queue lock.
 777 */
 778static void dm_end_request(struct request *clone, int error)
 779{
 780        int rw = rq_data_dir(clone);
 781        struct dm_rq_target_io *tio = clone->end_io_data;
 782        struct mapped_device *md = tio->md;
 783        struct request *rq = tio->orig;
 784
 785        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
 786                rq->errors = clone->errors;
 787                rq->resid_len = clone->resid_len;
 788
 789                if (rq->sense)
 790                        /*
 791                         * We are using the sense buffer of the original
 792                         * request.
 793                         * So setting the length of the sense data is enough.
 794                         */
 795                        rq->sense_len = clone->sense_len;
 796        }
 797
 798        free_rq_clone(clone);
 799        blk_end_request_all(rq, error);
 800        rq_completed(md, rw, true);
 801}
 802
 803static void dm_unprep_request(struct request *rq)
 804{
 805        struct request *clone = rq->special;
 806
 807        rq->special = NULL;
 808        rq->cmd_flags &= ~REQ_DONTPREP;
 809
 810        free_rq_clone(clone);
 811}
 812
 813/*
 814 * Requeue the original request of a clone.
 815 */
 816void dm_requeue_unmapped_request(struct request *clone)
 817{
 818        int rw = rq_data_dir(clone);
 819        struct dm_rq_target_io *tio = clone->end_io_data;
 820        struct mapped_device *md = tio->md;
 821        struct request *rq = tio->orig;
 822        struct request_queue *q = rq->q;
 823        unsigned long flags;
 824
 825        dm_unprep_request(rq);
 826
 827        spin_lock_irqsave(q->queue_lock, flags);
 828        blk_requeue_request(q, rq);
 829        spin_unlock_irqrestore(q->queue_lock, flags);
 830
 831        rq_completed(md, rw, 0);
 832}
 833EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
 834
 835static void __stop_queue(struct request_queue *q)
 836{
 837        blk_stop_queue(q);
 838}
 839
 840static void stop_queue(struct request_queue *q)
 841{
 842        unsigned long flags;
 843
 844        spin_lock_irqsave(q->queue_lock, flags);
 845        __stop_queue(q);
 846        spin_unlock_irqrestore(q->queue_lock, flags);
 847}
 848
 849static void __start_queue(struct request_queue *q)
 850{
 851        if (blk_queue_stopped(q))
 852                blk_start_queue(q);
 853}
 854
 855static void start_queue(struct request_queue *q)
 856{
 857        unsigned long flags;
 858
 859        spin_lock_irqsave(q->queue_lock, flags);
 860        __start_queue(q);
 861        spin_unlock_irqrestore(q->queue_lock, flags);
 862}
 863
 864static void dm_done(struct request *clone, int error, bool mapped)
 865{
 866        int r = error;
 867        struct dm_rq_target_io *tio = clone->end_io_data;
 868        dm_request_endio_fn rq_end_io = NULL;
 869
 870        if (tio->ti) {
 871                rq_end_io = tio->ti->type->rq_end_io;
 872
 873                if (mapped && rq_end_io)
 874                        r = rq_end_io(tio->ti, clone, error, &tio->info);
 875        }
 876
 877        if (r <= 0)
 878                /* The target wants to complete the I/O */
 879                dm_end_request(clone, r);
 880        else if (r == DM_ENDIO_INCOMPLETE)
 881                /* The target will handle the I/O */
 882                return;
 883        else if (r == DM_ENDIO_REQUEUE)
 884                /* The target wants to requeue the I/O */
 885                dm_requeue_unmapped_request(clone);
 886        else {
 887                DMWARN("unimplemented target endio return value: %d", r);
 888                BUG();
 889        }
 890}
 891
 892/*
 893 * Request completion handler for request-based dm
 894 */
 895static void dm_softirq_done(struct request *rq)
 896{
 897        bool mapped = true;
 898        struct request *clone = rq->completion_data;
 899        struct dm_rq_target_io *tio = clone->end_io_data;
 900
 901        if (rq->cmd_flags & REQ_FAILED)
 902                mapped = false;
 903
 904        dm_done(clone, tio->error, mapped);
 905}
 906
 907/*
 908 * Complete the clone and the original request with the error status
 909 * through softirq context.
 910 */
 911static void dm_complete_request(struct request *clone, int error)
 912{
 913        struct dm_rq_target_io *tio = clone->end_io_data;
 914        struct request *rq = tio->orig;
 915
 916        tio->error = error;
 917        rq->completion_data = clone;
 918        blk_complete_request(rq);
 919}
 920
 921/*
 922 * Complete the not-mapped clone and the original request with the error status
 923 * through softirq context.
 924 * Target's rq_end_io() function isn't called.
 925 * This may be used when the target's map_rq() function fails.
 926 */
 927void dm_kill_unmapped_request(struct request *clone, int error)
 928{
 929        struct dm_rq_target_io *tio = clone->end_io_data;
 930        struct request *rq = tio->orig;
 931
 932        rq->cmd_flags |= REQ_FAILED;
 933        dm_complete_request(clone, error);
 934}
 935EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
 936
 937/*
 938 * Called with the queue lock held
 939 */
 940static void end_clone_request(struct request *clone, int error)
 941{
 942        /*
 943         * For just cleaning up the information of the queue in which
 944         * the clone was dispatched.
 945         * The clone is *NOT* freed actually here because it is alloced from
 946         * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
 947         */
 948        __blk_put_request(clone->q, clone);
 949
 950        /*
 951         * Actual request completion is done in a softirq context which doesn't
 952         * hold the queue lock.  Otherwise, deadlock could occur because:
 953         *     - another request may be submitted by the upper level driver
 954         *       of the stacking during the completion
 955         *     - the submission which requires queue lock may be done
 956         *       against this queue
 957         */
 958        dm_complete_request(clone, error);
 959}
 960
 961/*
 962 * Return maximum size of I/O possible at the supplied sector up to the current
 963 * target boundary.
 964 */
 965static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
 966{
 967        sector_t target_offset = dm_target_offset(ti, sector);
 968
 969        return ti->len - target_offset;
 970}
 971
 972static sector_t max_io_len(sector_t sector, struct dm_target *ti)
 973{
 974        sector_t len = max_io_len_target_boundary(sector, ti);
 975        sector_t offset, max_len;
 976
 977        /*
 978         * Does the target need to split even further?
 979         */
 980        if (ti->max_io_len) {
 981                offset = dm_target_offset(ti, sector);
 982                if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
 983                        max_len = sector_div(offset, ti->max_io_len);
 984                else
 985                        max_len = offset & (ti->max_io_len - 1);
 986                max_len = ti->max_io_len - max_len;
 987
 988                if (len > max_len)
 989                        len = max_len;
 990        }
 991
 992        return len;
 993}
 994
 995int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 996{
 997        if (len > UINT_MAX) {
 998                DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
 999                      (unsigned long long)len, UINT_MAX);
1000                ti->error = "Maximum size of target IO is too large";
1001                return -EINVAL;
1002        }
1003
1004        ti->max_io_len = (uint32_t) len;
1005
1006        return 0;
1007}
1008EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1009
1010static void __map_bio(struct dm_target *ti, struct bio *clone,
1011                      struct dm_target_io *tio)
1012{
1013        int r;
1014        sector_t sector;
1015        struct mapped_device *md;
1016
1017        clone->bi_end_io = clone_endio;
1018        clone->bi_private = tio;
1019
1020        /*
1021         * Map the clone.  If r == 0 we don't need to do
1022         * anything, the target has assumed ownership of
1023         * this io.
1024         */
1025        atomic_inc(&tio->io->io_count);
1026        sector = clone->bi_sector;
1027        r = ti->type->map(ti, clone, &tio->info);
1028        if (r == DM_MAPIO_REMAPPED) {
1029                /* the bio has been remapped so dispatch it */
1030
1031                trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1032                                      tio->io->bio->bi_bdev->bd_dev, sector);
1033
1034                generic_make_request(clone);
1035        } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1036                /* error the io and bail out, or requeue it if needed */
1037                md = tio->io->md;
1038                dec_pending(tio->io, r);
1039                /*
1040                 * Store bio_set for cleanup.
1041                 */
1042                clone->bi_end_io = NULL;
1043                clone->bi_private = md->bs;
1044                bio_put(clone);
1045                free_tio(md, tio);
1046        } else if (r) {
1047                DMWARN("unimplemented target map return value: %d", r);
1048                BUG();
1049        }
1050}
1051
1052struct clone_info {
1053        struct mapped_device *md;
1054        struct dm_table *map;
1055        struct bio *bio;
1056        struct dm_io *io;
1057        sector_t sector;
1058        sector_t sector_count;
1059        unsigned short idx;
1060};
1061
1062static void dm_bio_destructor(struct bio *bio)
1063{
1064        struct bio_set *bs = bio->bi_private;
1065
1066        bio_free(bio, bs);
1067}
1068
1069/*
1070 * Creates a little bio that just does part of a bvec.
1071 */
1072static struct bio *split_bvec(struct bio *bio, sector_t sector,
1073                              unsigned short idx, unsigned int offset,
1074                              unsigned int len, struct bio_set *bs)
1075{
1076        struct bio *clone;
1077        struct bio_vec *bv = bio->bi_io_vec + idx;
1078
1079        clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
1080        clone->bi_destructor = dm_bio_destructor;
1081        *clone->bi_io_vec = *bv;
1082
1083        clone->bi_sector = sector;
1084        clone->bi_bdev = bio->bi_bdev;
1085        clone->bi_rw = bio->bi_rw;
1086        clone->bi_vcnt = 1;
1087        clone->bi_size = to_bytes(len);
1088        clone->bi_io_vec->bv_offset = offset;
1089        clone->bi_io_vec->bv_len = clone->bi_size;
1090        clone->bi_flags |= 1 << BIO_CLONED;
1091
1092        if (bio_integrity(bio)) {
1093                bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1094                bio_integrity_trim(clone,
1095                                   bio_sector_offset(bio, idx, offset), len);
1096        }
1097
1098        return clone;
1099}
1100
1101/*
1102 * Creates a bio that consists of range of complete bvecs.
1103 */
1104static struct bio *clone_bio(struct bio *bio, sector_t sector,
1105                             unsigned short idx, unsigned short bv_count,
1106                             unsigned int len, struct bio_set *bs)
1107{
1108        struct bio *clone;
1109
1110        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1111        __bio_clone(clone, bio);
1112        clone->bi_destructor = dm_bio_destructor;
1113        clone->bi_sector = sector;
1114        clone->bi_idx = idx;
1115        clone->bi_vcnt = idx + bv_count;
1116        clone->bi_size = to_bytes(len);
1117        clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1118
1119        if (bio_integrity(bio)) {
1120                bio_integrity_clone(clone, bio, GFP_NOIO, bs);
1121
1122                if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1123                        bio_integrity_trim(clone,
1124                                           bio_sector_offset(bio, idx, 0), len);
1125        }
1126
1127        return clone;
1128}
1129
1130static struct dm_target_io *alloc_tio(struct clone_info *ci,
1131                                      struct dm_target *ti)
1132{
1133        struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
1134
1135        tio->io = ci->io;
1136        tio->ti = ti;
1137        memset(&tio->info, 0, sizeof(tio->info));
1138
1139        return tio;
1140}
1141
1142static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1143                                   unsigned request_nr, sector_t len)
1144{
1145        struct dm_target_io *tio = alloc_tio(ci, ti);
1146        struct bio *clone;
1147
1148        tio->info.target_request_nr = request_nr;
1149
1150        /*
1151         * Discard requests require the bio's inline iovecs be initialized.
1152         * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1153         * and discard, so no need for concern about wasted bvec allocations.
1154         */
1155        clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
1156        __bio_clone(clone, ci->bio);
1157        clone->bi_destructor = dm_bio_destructor;
1158        if (len) {
1159                clone->bi_sector = ci->sector;
1160                clone->bi_size = to_bytes(len);
1161        }
1162
1163        __map_bio(ti, clone, tio);
1164}
1165
1166static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1167                                    unsigned num_requests, sector_t len)
1168{
1169        unsigned request_nr;
1170
1171        for (request_nr = 0; request_nr < num_requests; request_nr++)
1172                __issue_target_request(ci, ti, request_nr, len);
1173}
1174
1175static int __clone_and_map_empty_flush(struct clone_info *ci)
1176{
1177        unsigned target_nr = 0;
1178        struct dm_target *ti;
1179
1180        BUG_ON(bio_has_data(ci->bio));
1181        while ((ti = dm_table_get_target(ci->map, target_nr++)))
1182                __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
1183
1184        return 0;
1185}
1186
1187/*
1188 * Perform all io with a single clone.
1189 */
1190static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1191{
1192        struct bio *clone, *bio = ci->bio;
1193        struct dm_target_io *tio;
1194
1195        tio = alloc_tio(ci, ti);
1196        clone = clone_bio(bio, ci->sector, ci->idx,
1197                          bio->bi_vcnt - ci->idx, ci->sector_count,
1198                          ci->md->bs);
1199        __map_bio(ti, clone, tio);
1200        ci->sector_count = 0;
1201}
1202
1203static int __clone_and_map_discard(struct clone_info *ci)
1204{
1205        struct dm_target *ti;
1206        sector_t len;
1207
1208        do {
1209                ti = dm_table_find_target(ci->map, ci->sector);
1210                if (!dm_target_is_valid(ti))
1211                        return -EIO;
1212
1213                /*
1214                 * Even though the device advertised discard support,
1215                 * that does not mean every target supports it, and
1216                 * reconfiguration might also have changed that since the
1217                 * check was performed.
1218                 */
1219                if (!ti->num_discard_requests)
1220                        return -EOPNOTSUPP;
1221
1222                if (!ti->split_discard_requests)
1223                        len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
1224                else
1225                        len = min(ci->sector_count, max_io_len(ci->sector, ti));
1226
1227                __issue_target_requests(ci, ti, ti->num_discard_requests, len);
1228
1229                ci->sector += len;
1230        } while (ci->sector_count -= len);
1231
1232        return 0;
1233}
1234
1235static int __clone_and_map(struct clone_info *ci)
1236{
1237        struct bio *clone, *bio = ci->bio;
1238        struct dm_target *ti;
1239        sector_t len = 0, max;
1240        struct dm_target_io *tio;
1241
1242        if (unlikely(bio->bi_rw & REQ_DISCARD))
1243                return __clone_and_map_discard(ci);
1244
1245        ti = dm_table_find_target(ci->map, ci->sector);
1246        if (!dm_target_is_valid(ti))
1247                return -EIO;
1248
1249        max = max_io_len(ci->sector, ti);
1250
1251        if (ci->sector_count <= max) {
1252                /*
1253                 * Optimise for the simple case where we can do all of
1254                 * the remaining io with a single clone.
1255                 */
1256                __clone_and_map_simple(ci, ti);
1257
1258        } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1259                /*
1260                 * There are some bvecs that don't span targets.
1261                 * Do as many of these as possible.
1262                 */
1263                int i;
1264                sector_t remaining = max;
1265                sector_t bv_len;
1266
1267                for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1268                        bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1269
1270                        if (bv_len > remaining)
1271                                break;
1272
1273                        remaining -= bv_len;
1274                        len += bv_len;
1275                }
1276
1277                tio = alloc_tio(ci, ti);
1278                clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1279                                  ci->md->bs);
1280                __map_bio(ti, clone, tio);
1281
1282                ci->sector += len;
1283                ci->sector_count -= len;
1284                ci->idx = i;
1285
1286        } else {
1287                /*
1288                 * Handle a bvec that must be split between two or more targets.
1289                 */
1290                struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1291                sector_t remaining = to_sector(bv->bv_len);
1292                unsigned int offset = 0;
1293
1294                do {
1295                        if (offset) {
1296                                ti = dm_table_find_target(ci->map, ci->sector);
1297                                if (!dm_target_is_valid(ti))
1298                                        return -EIO;
1299
1300                                max = max_io_len(ci->sector, ti);
1301                        }
1302
1303                        len = min(remaining, max);
1304
1305                        tio = alloc_tio(ci, ti);
1306                        clone = split_bvec(bio, ci->sector, ci->idx,
1307                                           bv->bv_offset + offset, len,
1308                                           ci->md->bs);
1309
1310                        __map_bio(ti, clone, tio);
1311
1312                        ci->sector += len;
1313                        ci->sector_count -= len;
1314                        offset += to_bytes(len);
1315                } while (remaining -= len);
1316
1317                ci->idx++;
1318        }
1319
1320        return 0;
1321}
1322
1323/*
1324 * Split the bio into several clones and submit it to targets.
1325 */
1326static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1327{
1328        struct clone_info ci;
1329        int error = 0;
1330
1331        ci.map = dm_get_live_table(md);
1332        if (unlikely(!ci.map)) {
1333                bio_io_error(bio);
1334                return;
1335        }
1336
1337        ci.md = md;
1338        ci.io = alloc_io(md);
1339        ci.io->error = 0;
1340        atomic_set(&ci.io->io_count, 1);
1341        ci.io->bio = bio;
1342        ci.io->md = md;
1343        spin_lock_init(&ci.io->endio_lock);
1344        ci.sector = bio->bi_sector;
1345        ci.idx = bio->bi_idx;
1346
1347        start_io_acct(ci.io);
1348        if (bio->bi_rw & REQ_FLUSH) {
1349                ci.bio = &ci.md->flush_bio;
1350                ci.sector_count = 0;
1351                error = __clone_and_map_empty_flush(&ci);
1352                /* dec_pending submits any data associated with flush */
1353        } else {
1354                ci.bio = bio;
1355                ci.sector_count = bio_sectors(bio);
1356                while (ci.sector_count && !error)
1357                        error = __clone_and_map(&ci);
1358        }
1359
1360        /* drop the extra reference count */
1361        dec_pending(ci.io, error);
1362        dm_table_put(ci.map);
1363}
1364/*-----------------------------------------------------------------
1365 * CRUD END
1366 *---------------------------------------------------------------*/
1367
1368static int dm_merge_bvec(struct request_queue *q,
1369                         struct bvec_merge_data *bvm,
1370                         struct bio_vec *biovec)
1371{
1372        struct mapped_device *md = q->queuedata;
1373        struct dm_table *map = dm_get_live_table(md);
1374        struct dm_target *ti;
1375        sector_t max_sectors;
1376        int max_size = 0;
1377
1378        if (unlikely(!map))
1379                goto out;
1380
1381        ti = dm_table_find_target(map, bvm->bi_sector);
1382        if (!dm_target_is_valid(ti))
1383                goto out_table;
1384
1385        /*
1386         * Find maximum amount of I/O that won't need splitting
1387         */
1388        max_sectors = min(max_io_len(bvm->bi_sector, ti),
1389                          (sector_t) BIO_MAX_SECTORS);
1390        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1391        if (max_size < 0)
1392                max_size = 0;
1393
1394        /*
1395         * merge_bvec_fn() returns number of bytes
1396         * it can accept at this offset
1397         * max is precomputed maximal io size
1398         */
1399        if (max_size && ti->type->merge)
1400                max_size = ti->type->merge(ti, bvm, biovec, max_size);
1401        /*
1402         * If the target doesn't support merge method and some of the devices
1403         * provided their merge_bvec method (we know this by looking at
1404         * queue_max_hw_sectors), then we can't allow bios with multiple vector
1405         * entries.  So always set max_size to 0, and the code below allows
1406         * just one page.
1407         */
1408        else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1409
1410                max_size = 0;
1411
1412out_table:
1413        dm_table_put(map);
1414
1415out:
1416        /*
1417         * Always allow an entire first page
1418         */
1419        if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1420                max_size = biovec->bv_len;
1421
1422        return max_size;
1423}
1424
1425/*
1426 * The request function that just remaps the bio built up by
1427 * dm_merge_bvec.
1428 */
1429static void _dm_request(struct request_queue *q, struct bio *bio)
1430{
1431        int rw = bio_data_dir(bio);
1432        struct mapped_device *md = q->queuedata;
1433        int cpu;
1434
1435        down_read(&md->io_lock);
1436
1437        cpu = part_stat_lock();
1438        part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1439        part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1440        part_stat_unlock();
1441
1442        /* if we're suspended, we have to queue this io for later */
1443        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
1444                up_read(&md->io_lock);
1445
1446                if (bio_rw(bio) != READA)
1447                        queue_io(md, bio);
1448                else
1449                        bio_io_error(bio);
1450                return;
1451        }
1452
1453        __split_and_process_bio(md, bio);
1454        up_read(&md->io_lock);
1455        return;
1456}
1457
1458static int dm_request_based(struct mapped_device *md)
1459{
1460        return blk_queue_stackable(md->queue);
1461}
1462
1463static void dm_request(struct request_queue *q, struct bio *bio)
1464{
1465        struct mapped_device *md = q->queuedata;
1466
1467        if (dm_request_based(md))
1468                blk_queue_bio(q, bio);
1469        else
1470                _dm_request(q, bio);
1471}
1472
1473void dm_dispatch_request(struct request *rq)
1474{
1475        int r;
1476
1477        if (blk_queue_io_stat(rq->q))
1478                rq->cmd_flags |= REQ_IO_STAT;
1479
1480        rq->start_time = jiffies;
1481        r = blk_insert_cloned_request(rq->q, rq);
1482        if (r)
1483                dm_complete_request(rq, r);
1484}
1485EXPORT_SYMBOL_GPL(dm_dispatch_request);
1486
1487static void dm_rq_bio_destructor(struct bio *bio)
1488{
1489        struct dm_rq_clone_bio_info *info = bio->bi_private;
1490        struct mapped_device *md = info->tio->md;
1491
1492        free_bio_info(info);
1493        bio_free(bio, md->bs);
1494}
1495
1496static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1497                                 void *data)
1498{
1499        struct dm_rq_target_io *tio = data;
1500        struct mapped_device *md = tio->md;
1501        struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1502
1503        if (!info)
1504                return -ENOMEM;
1505
1506        info->orig = bio_orig;
1507        info->tio = tio;
1508        bio->bi_end_io = end_clone_bio;
1509        bio->bi_private = info;
1510        bio->bi_destructor = dm_rq_bio_destructor;
1511
1512        return 0;
1513}
1514
1515static int setup_clone(struct request *clone, struct request *rq,
1516                       struct dm_rq_target_io *tio)
1517{
1518        int r;
1519
1520        r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1521                              dm_rq_bio_constructor, tio);
1522        if (r)
1523                return r;
1524
1525        clone->cmd = rq->cmd;
1526        clone->cmd_len = rq->cmd_len;
1527        clone->sense = rq->sense;
1528        clone->buffer = rq->buffer;
1529        clone->end_io = end_clone_request;
1530        clone->end_io_data = tio;
1531
1532        return 0;
1533}
1534
1535static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1536                                gfp_t gfp_mask)
1537{
1538        struct request *clone;
1539        struct dm_rq_target_io *tio;
1540
1541        tio = alloc_rq_tio(md, gfp_mask);
1542        if (!tio)
1543                return NULL;
1544
1545        tio->md = md;
1546        tio->ti = NULL;
1547        tio->orig = rq;
1548        tio->error = 0;
1549        memset(&tio->info, 0, sizeof(tio->info));
1550
1551        clone = &tio->clone;
1552        if (setup_clone(clone, rq, tio)) {
1553                /* -ENOMEM */
1554                free_rq_tio(tio);
1555                return NULL;
1556        }
1557
1558        return clone;
1559}
1560
1561/*
1562 * Called with the queue lock held.
1563 */
1564static int dm_prep_fn(struct request_queue *q, struct request *rq)
1565{
1566        struct mapped_device *md = q->queuedata;
1567        struct request *clone;
1568
1569        if (unlikely(rq->special)) {
1570                DMWARN("Already has something in rq->special.");
1571                return BLKPREP_KILL;
1572        }
1573
1574        clone = clone_rq(rq, md, GFP_ATOMIC);
1575        if (!clone)
1576                return BLKPREP_DEFER;
1577
1578        rq->special = clone;
1579        rq->cmd_flags |= REQ_DONTPREP;
1580
1581        return BLKPREP_OK;
1582}
1583
1584/*
1585 * Returns:
1586 * 0  : the request has been processed (not requeued)
1587 * !0 : the request has been requeued
1588 */
1589static int map_request(struct dm_target *ti, struct request *clone,
1590                       struct mapped_device *md)
1591{
1592        int r, requeued = 0;
1593        struct dm_rq_target_io *tio = clone->end_io_data;
1594
1595        tio->ti = ti;
1596        r = ti->type->map_rq(ti, clone, &tio->info);
1597        switch (r) {
1598        case DM_MAPIO_SUBMITTED:
1599                /* The target has taken the I/O to submit by itself later */
1600                break;
1601        case DM_MAPIO_REMAPPED:
1602                /* The target has remapped the I/O so dispatch it */
1603                trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1604                                     blk_rq_pos(tio->orig));
1605                dm_dispatch_request(clone);
1606                break;
1607        case DM_MAPIO_REQUEUE:
1608                /* The target wants to requeue the I/O */
1609                dm_requeue_unmapped_request(clone);
1610                requeued = 1;
1611                break;
1612        default:
1613                if (r > 0) {
1614                        DMWARN("unimplemented target map return value: %d", r);
1615                        BUG();
1616                }
1617
1618                /* The target wants to complete the I/O */
1619                dm_kill_unmapped_request(clone, r);
1620                break;
1621        }
1622
1623        return requeued;
1624}
1625
1626static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1627{
1628        struct request *clone;
1629
1630        blk_start_request(orig);
1631        clone = orig->special;
1632        atomic_inc(&md->pending[rq_data_dir(clone)]);
1633
1634        /*
1635         * Hold the md reference here for the in-flight I/O.
1636         * We can't rely on the reference count by device opener,
1637         * because the device may be closed during the request completion
1638         * when all bios are completed.
1639         * See the comment in rq_completed() too.
1640         */
1641        dm_get(md);
1642
1643        return clone;
1644}
1645
1646/*
1647 * q->request_fn for request-based dm.
1648 * Called with the queue lock held.
1649 */
1650static void dm_request_fn(struct request_queue *q)
1651{
1652        struct mapped_device *md = q->queuedata;
1653        struct dm_table *map = dm_get_live_table(md);
1654        struct dm_target *ti;
1655        struct request *rq, *clone;
1656        sector_t pos;
1657
1658        /*
1659         * For suspend, check blk_queue_stopped() and increment
1660         * ->pending within a single queue_lock not to increment the
1661         * number of in-flight I/Os after the queue is stopped in
1662         * dm_suspend().
1663         */
1664        while (!blk_queue_stopped(q)) {
1665                rq = blk_peek_request(q);
1666                if (!rq)
1667                        goto delay_and_out;
1668
1669                /* always use block 0 to find the target for flushes for now */
1670                pos = 0;
1671                if (!(rq->cmd_flags & REQ_FLUSH))
1672                        pos = blk_rq_pos(rq);
1673
1674                ti = dm_table_find_target(map, pos);
1675                if (!dm_target_is_valid(ti)) {
1676                        /*
1677                         * Must perform setup, that dm_done() requires,
1678                         * before calling dm_kill_unmapped_request
1679                         */
1680                        DMERR_LIMIT("request attempted access beyond the end of device");
1681                        clone = dm_start_request(md, rq);
1682                        dm_kill_unmapped_request(clone, -EIO);
1683                        continue;
1684                }
1685
1686                if (ti->type->busy && ti->type->busy(ti))
1687                        goto delay_and_out;
1688
1689                clone = dm_start_request(md, rq);
1690
1691                spin_unlock(q->queue_lock);
1692                if (map_request(ti, clone, md))
1693                        goto requeued;
1694
1695                BUG_ON(!irqs_disabled());
1696                spin_lock(q->queue_lock);
1697        }
1698
1699        goto out;
1700
1701requeued:
1702        BUG_ON(!irqs_disabled());
1703        spin_lock(q->queue_lock);
1704
1705delay_and_out:
1706        blk_delay_queue(q, HZ / 10);
1707out:
1708        dm_table_put(map);
1709}
1710
1711int dm_underlying_device_busy(struct request_queue *q)
1712{
1713        return blk_lld_busy(q);
1714}
1715EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1716
1717static int dm_lld_busy(struct request_queue *q)
1718{
1719        int r;
1720        struct mapped_device *md = q->queuedata;
1721        struct dm_table *map = dm_get_live_table(md);
1722
1723        if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1724                r = 1;
1725        else
1726                r = dm_table_any_busy_target(map);
1727
1728        dm_table_put(map);
1729
1730        return r;
1731}
1732
1733static int dm_any_congested(void *congested_data, int bdi_bits)
1734{
1735        int r = bdi_bits;
1736        struct mapped_device *md = congested_data;
1737        struct dm_table *map;
1738
1739        if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
1740                map = dm_get_live_table(md);
1741                if (map) {
1742                        /*
1743                         * Request-based dm cares about only own queue for
1744                         * the query about congestion status of request_queue
1745                         */
1746                        if (dm_request_based(md))
1747                                r = md->queue->backing_dev_info.state &
1748                                    bdi_bits;
1749                        else
1750                                r = dm_table_any_congested(map, bdi_bits);
1751
1752                        dm_table_put(map);
1753                }
1754        }
1755
1756        return r;
1757}
1758
1759/*-----------------------------------------------------------------
1760 * An IDR is used to keep track of allocated minor numbers.
1761 *---------------------------------------------------------------*/
1762static void free_minor(int minor)
1763{
1764        spin_lock(&_minor_lock);
1765        idr_remove(&_minor_idr, minor);
1766        spin_unlock(&_minor_lock);
1767}
1768
1769/*
1770 * See if the device with a specific minor # is free.
1771 */
1772static int specific_minor(int minor)
1773{
1774        int r, m;
1775
1776        if (minor >= (1 << MINORBITS))
1777                return -EINVAL;
1778
1779        r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1780        if (!r)
1781                return -ENOMEM;
1782
1783        spin_lock(&_minor_lock);
1784
1785        if (idr_find(&_minor_idr, minor)) {
1786                r = -EBUSY;
1787                goto out;
1788        }
1789
1790        r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
1791        if (r)
1792                goto out;
1793
1794        if (m != minor) {
1795                idr_remove(&_minor_idr, m);
1796                r = -EBUSY;
1797                goto out;
1798        }
1799
1800out:
1801        spin_unlock(&_minor_lock);
1802        return r;
1803}
1804
1805static int next_free_minor(int *minor)
1806{
1807        int r, m;
1808
1809        r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1810        if (!r)
1811                return -ENOMEM;
1812
1813        spin_lock(&_minor_lock);
1814
1815        r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
1816        if (r)
1817                goto out;
1818
1819        if (m >= (1 << MINORBITS)) {
1820                idr_remove(&_minor_idr, m);
1821                r = -ENOSPC;
1822                goto out;
1823        }
1824
1825        *minor = m;
1826
1827out:
1828        spin_unlock(&_minor_lock);
1829        return r;
1830}
1831
1832static const struct block_device_operations dm_blk_dops;
1833
1834static void dm_wq_work(struct work_struct *work);
1835
1836static void dm_init_md_queue(struct mapped_device *md)
1837{
1838        /*
1839         * Request-based dm devices cannot be stacked on top of bio-based dm
1840         * devices.  The type of this dm device has not been decided yet.
1841         * The type is decided at the first table loading time.
1842         * To prevent problematic device stacking, clear the queue flag
1843         * for request stacking support until then.
1844         *
1845         * This queue is new, so no concurrency on the queue_flags.
1846         */
1847        queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1848
1849        md->queue->queuedata = md;
1850        md->queue->backing_dev_info.congested_fn = dm_any_congested;
1851        md->queue->backing_dev_info.congested_data = md;
1852        blk_queue_make_request(md->queue, dm_request);
1853        blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1854        blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1855}
1856
1857/*
1858 * Allocate and initialise a blank device with a given minor.
1859 */
1860static struct mapped_device *alloc_dev(int minor)
1861{
1862        int r;
1863        struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
1864        void *old_md;
1865
1866        if (!md) {
1867                DMWARN("unable to allocate device, out of memory.");
1868                return NULL;
1869        }
1870
1871        if (!try_module_get(THIS_MODULE))
1872                goto bad_module_get;
1873
1874        /* get a minor number for the dev */
1875        if (minor == DM_ANY_MINOR)
1876                r = next_free_minor(&minor);
1877        else
1878                r = specific_minor(minor);
1879        if (r < 0)
1880                goto bad_minor;
1881
1882        md->type = DM_TYPE_NONE;
1883        init_rwsem(&md->io_lock);
1884        mutex_init(&md->suspend_lock);
1885        mutex_init(&md->type_lock);
1886        spin_lock_init(&md->deferred_lock);
1887        rwlock_init(&md->map_lock);
1888        atomic_set(&md->holders, 1);
1889        atomic_set(&md->open_count, 0);
1890        atomic_set(&md->event_nr, 0);
1891        atomic_set(&md->uevent_seq, 0);
1892        INIT_LIST_HEAD(&md->uevent_list);
1893        spin_lock_init(&md->uevent_lock);
1894
1895        md->queue = blk_alloc_queue(GFP_KERNEL);
1896        if (!md->queue)
1897                goto bad_queue;
1898
1899        dm_init_md_queue(md);
1900
1901        md->disk = alloc_disk(1);
1902        if (!md->disk)
1903                goto bad_disk;
1904
1905        atomic_set(&md->pending[0], 0);
1906        atomic_set(&md->pending[1], 0);
1907        init_waitqueue_head(&md->wait);
1908        INIT_WORK(&md->work, dm_wq_work);
1909        init_waitqueue_head(&md->eventq);
1910
1911        md->disk->major = _major;
1912        md->disk->first_minor = minor;
1913        md->disk->fops = &dm_blk_dops;
1914        md->disk->queue = md->queue;
1915        md->disk->private_data = md;
1916        sprintf(md->disk->disk_name, "dm-%d", minor);
1917        add_disk(md->disk);
1918        format_dev_t(md->name, MKDEV(_major, minor));
1919
1920        md->wq = alloc_workqueue("kdmflush",
1921                                 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
1922        if (!md->wq)
1923                goto bad_thread;
1924
1925        md->bdev = bdget_disk(md->disk, 0);
1926        if (!md->bdev)
1927                goto bad_bdev;
1928
1929        bio_init(&md->flush_bio);
1930        md->flush_bio.bi_bdev = md->bdev;
1931        md->flush_bio.bi_rw = WRITE_FLUSH;
1932
1933        /* Populate the mapping, nobody knows we exist yet */
1934        spin_lock(&_minor_lock);
1935        old_md = idr_replace(&_minor_idr, md, minor);
1936        spin_unlock(&_minor_lock);
1937
1938        BUG_ON(old_md != MINOR_ALLOCED);
1939
1940        return md;
1941
1942bad_bdev:
1943        destroy_workqueue(md->wq);
1944bad_thread:
1945        del_gendisk(md->disk);
1946        put_disk(md->disk);
1947bad_disk:
1948        blk_cleanup_queue(md->queue);
1949bad_queue:
1950        free_minor(minor);
1951bad_minor:
1952        module_put(THIS_MODULE);
1953bad_module_get:
1954        kfree(md);
1955        return NULL;
1956}
1957
1958static void unlock_fs(struct mapped_device *md);
1959
1960static void free_dev(struct mapped_device *md)
1961{
1962        int minor = MINOR(disk_devt(md->disk));
1963
1964        unlock_fs(md);
1965        bdput(md->bdev);
1966        destroy_workqueue(md->wq);
1967        if (md->tio_pool)
1968                mempool_destroy(md->tio_pool);
1969        if (md->io_pool)
1970                mempool_destroy(md->io_pool);
1971        if (md->bs)
1972                bioset_free(md->bs);
1973        blk_integrity_unregister(md->disk);
1974        del_gendisk(md->disk);
1975        free_minor(minor);
1976
1977        spin_lock(&_minor_lock);
1978        md->disk->private_data = NULL;
1979        spin_unlock(&_minor_lock);
1980
1981        put_disk(md->disk);
1982        blk_cleanup_queue(md->queue);
1983        module_put(THIS_MODULE);
1984        kfree(md);
1985}
1986
1987static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1988{
1989        struct dm_md_mempools *p;
1990
1991        if (md->io_pool && md->tio_pool && md->bs)
1992                /* the md already has necessary mempools */
1993                goto out;
1994
1995        p = dm_table_get_md_mempools(t);
1996        BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
1997
1998        md->io_pool = p->io_pool;
1999        p->io_pool = NULL;
2000        md->tio_pool = p->tio_pool;
2001        p->tio_pool = NULL;
2002        md->bs = p->bs;
2003        p->bs = NULL;
2004
2005out:
2006        /* mempool bind completed, now no need any mempools in the table */
2007        dm_table_free_md_mempools(t);
2008}
2009
2010/*
2011 * Bind a table to the device.
2012 */
2013static void event_callback(void *context)
2014{
2015        unsigned long flags;
2016        LIST_HEAD(uevents);
2017        struct mapped_device *md = (struct mapped_device *) context;
2018
2019        spin_lock_irqsave(&md->uevent_lock, flags);
2020        list_splice_init(&md->uevent_list, &uevents);
2021        spin_unlock_irqrestore(&md->uevent_lock, flags);
2022
2023        dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2024
2025        atomic_inc(&md->event_nr);
2026        wake_up(&md->eventq);
2027}
2028
2029/*
2030 * Protected by md->suspend_lock obtained by dm_swap_table().
2031 */
2032static void __set_size(struct mapped_device *md, sector_t size)
2033{
2034        set_capacity(md->disk, size);
2035
2036        i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2037}
2038
2039/*
2040 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2041 *
2042 * If this function returns 0, then the device is either a non-dm
2043 * device without a merge_bvec_fn, or it is a dm device that is
2044 * able to split any bios it receives that are too big.
2045 */
2046int dm_queue_merge_is_compulsory(struct request_queue *q)
2047{
2048        struct mapped_device *dev_md;
2049
2050        if (!q->merge_bvec_fn)
2051                return 0;
2052
2053        if (q->make_request_fn == dm_request) {
2054                dev_md = q->queuedata;
2055                if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2056                        return 0;
2057        }
2058
2059        return 1;
2060}
2061
2062static int dm_device_merge_is_compulsory(struct dm_target *ti,
2063                                         struct dm_dev *dev, sector_t start,
2064                                         sector_t len, void *data)
2065{
2066        struct block_device *bdev = dev->bdev;
2067        struct request_queue *q = bdev_get_queue(bdev);
2068
2069        return dm_queue_merge_is_compulsory(q);
2070}
2071
2072/*
2073 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2074 * on the properties of the underlying devices.
2075 */
2076static int dm_table_merge_is_optional(struct dm_table *table)
2077{
2078        unsigned i = 0;
2079        struct dm_target *ti;
2080
2081        while (i < dm_table_get_num_targets(table)) {
2082                ti = dm_table_get_target(table, i++);
2083
2084                if (ti->type->iterate_devices &&
2085                    ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2086                        return 0;
2087        }
2088
2089        return 1;
2090}
2091
2092/*
2093 * Returns old map, which caller must destroy.
2094 */
2095static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2096                               struct queue_limits *limits)
2097{
2098        struct dm_table *old_map;
2099        struct request_queue *q = md->queue;
2100        sector_t size;
2101        unsigned long flags;
2102        int merge_is_optional;
2103
2104        size = dm_table_get_size(t);
2105
2106        /*
2107         * Wipe any geometry if the size of the table changed.
2108         */
2109        if (size != get_capacity(md->disk))
2110                memset(&md->geometry, 0, sizeof(md->geometry));
2111
2112        __set_size(md, size);
2113
2114        dm_table_event_callback(t, event_callback, md);
2115
2116        /*
2117         * The queue hasn't been stopped yet, if the old table type wasn't
2118         * for request-based during suspension.  So stop it to prevent
2119         * I/O mapping before resume.
2120         * This must be done before setting the queue restrictions,
2121         * because request-based dm may be run just after the setting.
2122         */
2123        if (dm_table_request_based(t) && !blk_queue_stopped(q))
2124                stop_queue(q);
2125
2126        __bind_mempools(md, t);
2127
2128        merge_is_optional = dm_table_merge_is_optional(t);
2129
2130        write_lock_irqsave(&md->map_lock, flags);
2131        old_map = md->map;
2132        md->map = t;
2133        md->immutable_target_type = dm_table_get_immutable_target_type(t);
2134
2135        dm_table_set_restrictions(t, q, limits);
2136        if (merge_is_optional)
2137                set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2138        else
2139                clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2140        write_unlock_irqrestore(&md->map_lock, flags);
2141
2142        return old_map;
2143}
2144
2145/*
2146 * Returns unbound table for the caller to free.
2147 */
2148static struct dm_table *__unbind(struct mapped_device *md)
2149{
2150        struct dm_table *map = md->map;
2151        unsigned long flags;
2152
2153        if (!map)
2154                return NULL;
2155
2156        dm_table_event_callback(map, NULL, NULL);
2157        write_lock_irqsave(&md->map_lock, flags);
2158        md->map = NULL;
2159        write_unlock_irqrestore(&md->map_lock, flags);
2160
2161        return map;
2162}
2163
2164/*
2165 * Constructor for a new device.
2166 */
2167int dm_create(int minor, struct mapped_device **result)
2168{
2169        struct mapped_device *md;
2170
2171        md = alloc_dev(minor);
2172        if (!md)
2173                return -ENXIO;
2174
2175        dm_sysfs_init(md);
2176
2177        *result = md;
2178        return 0;
2179}
2180
2181/*
2182 * Functions to manage md->type.
2183 * All are required to hold md->type_lock.
2184 */
2185void dm_lock_md_type(struct mapped_device *md)
2186{
2187        mutex_lock(&md->type_lock);
2188}
2189
2190void dm_unlock_md_type(struct mapped_device *md)
2191{
2192        mutex_unlock(&md->type_lock);
2193}
2194
2195void dm_set_md_type(struct mapped_device *md, unsigned type)
2196{
2197        md->type = type;
2198}
2199
2200unsigned dm_get_md_type(struct mapped_device *md)
2201{
2202        return md->type;
2203}
2204
2205struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2206{
2207        return md->immutable_target_type;
2208}
2209
2210/*
2211 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2212 */
2213static int dm_init_request_based_queue(struct mapped_device *md)
2214{
2215        struct request_queue *q = NULL;
2216
2217        if (md->queue->elevator)
2218                return 1;
2219
2220        /* Fully initialize the queue */
2221        q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2222        if (!q)
2223                return 0;
2224
2225        md->queue = q;
2226        dm_init_md_queue(md);
2227        blk_queue_softirq_done(md->queue, dm_softirq_done);
2228        blk_queue_prep_rq(md->queue, dm_prep_fn);
2229        blk_queue_lld_busy(md->queue, dm_lld_busy);
2230
2231        elv_register_queue(md->queue);
2232
2233        return 1;
2234}
2235
2236/*
2237 * Setup the DM device's queue based on md's type
2238 */
2239int dm_setup_md_queue(struct mapped_device *md)
2240{
2241        if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2242            !dm_init_request_based_queue(md)) {
2243                DMWARN("Cannot initialize queue for request-based mapped device");
2244                return -EINVAL;
2245        }
2246
2247        return 0;
2248}
2249
2250static struct mapped_device *dm_find_md(dev_t dev)
2251{
2252        struct mapped_device *md;
2253        unsigned minor = MINOR(dev);
2254
2255        if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2256                return NULL;
2257
2258        spin_lock(&_minor_lock);
2259
2260        md = idr_find(&_minor_idr, minor);
2261        if (md && (md == MINOR_ALLOCED ||
2262                   (MINOR(disk_devt(dm_disk(md))) != minor) ||
2263                   dm_deleting_md(md) ||
2264                   test_bit(DMF_FREEING, &md->flags))) {
2265                md = NULL;
2266                goto out;
2267        }
2268
2269out:
2270        spin_unlock(&_minor_lock);
2271
2272        return md;
2273}
2274
2275struct mapped_device *dm_get_md(dev_t dev)
2276{
2277        struct mapped_device *md = dm_find_md(dev);
2278
2279        if (md)
2280                dm_get(md);
2281
2282        return md;
2283}
2284EXPORT_SYMBOL_GPL(dm_get_md);
2285
2286void *dm_get_mdptr(struct mapped_device *md)
2287{
2288        return md->interface_ptr;
2289}
2290
2291void dm_set_mdptr(struct mapped_device *md, void *ptr)
2292{
2293        md->interface_ptr = ptr;
2294}
2295
2296void dm_get(struct mapped_device *md)
2297{
2298        atomic_inc(&md->holders);
2299        BUG_ON(test_bit(DMF_FREEING, &md->flags));
2300}
2301
2302const char *dm_device_name(struct mapped_device *md)
2303{
2304        return md->name;
2305}
2306EXPORT_SYMBOL_GPL(dm_device_name);
2307
2308static void __dm_destroy(struct mapped_device *md, bool wait)
2309{
2310        struct dm_table *map;
2311
2312        might_sleep();
2313
2314        spin_lock(&_minor_lock);
2315        map = dm_get_live_table(md);
2316        idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2317        set_bit(DMF_FREEING, &md->flags);
2318        spin_unlock(&_minor_lock);
2319
2320        if (!dm_suspended_md(md)) {
2321                dm_table_presuspend_targets(map);
2322                dm_table_postsuspend_targets(map);
2323        }
2324
2325        /*
2326         * Rare, but there may be I/O requests still going to complete,
2327         * for example.  Wait for all references to disappear.
2328         * No one should increment the reference count of the mapped_device,
2329         * after the mapped_device state becomes DMF_FREEING.
2330         */
2331        if (wait)
2332                while (atomic_read(&md->holders))
2333                        msleep(1);
2334        else if (atomic_read(&md->holders))
2335                DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2336                       dm_device_name(md), atomic_read(&md->holders));
2337
2338        dm_sysfs_exit(md);
2339        dm_table_put(map);
2340        dm_table_destroy(__unbind(md));
2341        free_dev(md);
2342}
2343
2344void dm_destroy(struct mapped_device *md)
2345{
2346        __dm_destroy(md, true);
2347}
2348
2349void dm_destroy_immediate(struct mapped_device *md)
2350{
2351        __dm_destroy(md, false);
2352}
2353
2354void dm_put(struct mapped_device *md)
2355{
2356        atomic_dec(&md->holders);
2357}
2358EXPORT_SYMBOL_GPL(dm_put);
2359
2360static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2361{
2362        int r = 0;
2363        DECLARE_WAITQUEUE(wait, current);
2364
2365        add_wait_queue(&md->wait, &wait);
2366
2367        while (1) {
2368                set_current_state(interruptible);
2369
2370                if (!md_in_flight(md))
2371                        break;
2372
2373                if (interruptible == TASK_INTERRUPTIBLE &&
2374                    signal_pending(current)) {
2375                        r = -EINTR;
2376                        break;
2377                }
2378
2379                io_schedule();
2380        }
2381        set_current_state(TASK_RUNNING);
2382
2383        remove_wait_queue(&md->wait, &wait);
2384
2385        return r;
2386}
2387
2388/*
2389 * Process the deferred bios
2390 */
2391static void dm_wq_work(struct work_struct *work)
2392{
2393        struct mapped_device *md = container_of(work, struct mapped_device,
2394                                                work);
2395        struct bio *c;
2396
2397        down_read(&md->io_lock);
2398
2399        while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2400                spin_lock_irq(&md->deferred_lock);
2401                c = bio_list_pop(&md->deferred);
2402                spin_unlock_irq(&md->deferred_lock);
2403
2404                if (!c)
2405                        break;
2406
2407                up_read(&md->io_lock);
2408
2409                if (dm_request_based(md))
2410                        generic_make_request(c);
2411                else
2412                        __split_and_process_bio(md, c);
2413
2414                down_read(&md->io_lock);
2415        }
2416
2417        up_read(&md->io_lock);
2418}
2419
2420static void dm_queue_flush(struct mapped_device *md)
2421{
2422        clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2423        smp_mb__after_clear_bit();
2424        queue_work(md->wq, &md->work);
2425}
2426
2427/*
2428 * Swap in a new table, returning the old one for the caller to destroy.
2429 */
2430struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2431{
2432        struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
2433        struct queue_limits limits;
2434        int r;
2435
2436        mutex_lock(&md->suspend_lock);
2437
2438        /* device must be suspended */
2439        if (!dm_suspended_md(md))
2440                goto out;
2441
2442        /*
2443         * If the new table has no data devices, retain the existing limits.
2444         * This helps multipath with queue_if_no_path if all paths disappear,
2445         * then new I/O is queued based on these limits, and then some paths
2446         * reappear.
2447         */
2448        if (dm_table_has_no_data_devices(table)) {
2449                live_map = dm_get_live_table(md);
2450                if (live_map)
2451                        limits = md->queue->limits;
2452                dm_table_put(live_map);
2453        }
2454
2455        r = dm_calculate_queue_limits(table, &limits);
2456        if (r) {
2457                map = ERR_PTR(r);
2458                goto out;
2459        }
2460
2461        map = __bind(md, table, &limits);
2462
2463out:
2464        mutex_unlock(&md->suspend_lock);
2465        return map;
2466}
2467
2468/*
2469 * Functions to lock and unlock any filesystem running on the
2470 * device.
2471 */
2472static int lock_fs(struct mapped_device *md)
2473{
2474        int r;
2475
2476        WARN_ON(md->frozen_sb);
2477
2478        md->frozen_sb = freeze_bdev(md->bdev);
2479        if (IS_ERR(md->frozen_sb)) {
2480                r = PTR_ERR(md->frozen_sb);
2481                md->frozen_sb = NULL;
2482                return r;
2483        }
2484
2485        set_bit(DMF_FROZEN, &md->flags);
2486
2487        return 0;
2488}
2489
2490static void unlock_fs(struct mapped_device *md)
2491{
2492        if (!test_bit(DMF_FROZEN, &md->flags))
2493                return;
2494
2495        thaw_bdev(md->bdev, md->frozen_sb);
2496        md->frozen_sb = NULL;
2497        clear_bit(DMF_FROZEN, &md->flags);
2498}
2499
2500/*
2501 * We need to be able to change a mapping table under a mounted
2502 * filesystem.  For example we might want to move some data in
2503 * the background.  Before the table can be swapped with
2504 * dm_bind_table, dm_suspend must be called to flush any in
2505 * flight bios and ensure that any further io gets deferred.
2506 */
2507/*
2508 * Suspend mechanism in request-based dm.
2509 *
2510 * 1. Flush all I/Os by lock_fs() if needed.
2511 * 2. Stop dispatching any I/O by stopping the request_queue.
2512 * 3. Wait for all in-flight I/Os to be completed or requeued.
2513 *
2514 * To abort suspend, start the request_queue.
2515 */
2516int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
2517{
2518        struct dm_table *map = NULL;
2519        int r = 0;
2520        int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2521        int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
2522
2523        mutex_lock(&md->suspend_lock);
2524
2525        if (dm_suspended_md(md)) {
2526                r = -EINVAL;
2527                goto out_unlock;
2528        }
2529
2530        map = dm_get_live_table(md);
2531
2532        /*
2533         * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2534         * This flag is cleared before dm_suspend returns.
2535         */
2536        if (noflush)
2537                set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2538
2539        /* This does not get reverted if there's an error later. */
2540        dm_table_presuspend_targets(map);
2541
2542        /*
2543         * Flush I/O to the device.
2544         * Any I/O submitted after lock_fs() may not be flushed.
2545         * noflush takes precedence over do_lockfs.
2546         * (lock_fs() flushes I/Os and waits for them to complete.)
2547         */
2548        if (!noflush && do_lockfs) {
2549                r = lock_fs(md);
2550                if (r)
2551                        goto out;
2552        }
2553
2554        /*
2555         * Here we must make sure that no processes are submitting requests
2556         * to target drivers i.e. no one may be executing
2557         * __split_and_process_bio. This is called from dm_request and
2558         * dm_wq_work.
2559         *
2560         * To get all processes out of __split_and_process_bio in dm_request,
2561         * we take the write lock. To prevent any process from reentering
2562         * __split_and_process_bio from dm_request and quiesce the thread
2563         * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2564         * flush_workqueue(md->wq).
2565         */
2566        down_write(&md->io_lock);
2567        set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2568        up_write(&md->io_lock);
2569
2570        /*
2571         * Stop md->queue before flushing md->wq in case request-based
2572         * dm defers requests to md->wq from md->queue.
2573         */
2574        if (dm_request_based(md))
2575                stop_queue(md->queue);
2576
2577        flush_workqueue(md->wq);
2578
2579        /*
2580         * At this point no more requests are entering target request routines.
2581         * We call dm_wait_for_completion to wait for all existing requests
2582         * to finish.
2583         */
2584        r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
2585
2586        down_write(&md->io_lock);
2587        if (noflush)
2588                clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2589        up_write(&md->io_lock);
2590
2591        /* were we interrupted ? */
2592        if (r < 0) {
2593                dm_queue_flush(md);
2594
2595                if (dm_request_based(md))
2596                        start_queue(md->queue);
2597
2598                unlock_fs(md);
2599                goto out; /* pushback list is already flushed, so skip flush */
2600        }
2601
2602        /*
2603         * If dm_wait_for_completion returned 0, the device is completely
2604         * quiescent now. There is no request-processing activity. All new
2605         * requests are being added to md->deferred list.
2606         */
2607
2608        set_bit(DMF_SUSPENDED, &md->flags);
2609
2610        dm_table_postsuspend_targets(map);
2611
2612out:
2613        dm_table_put(map);
2614
2615out_unlock:
2616        mutex_unlock(&md->suspend_lock);
2617        return r;
2618}
2619
2620int dm_resume(struct mapped_device *md)
2621{
2622        int r = -EINVAL;
2623        struct dm_table *map = NULL;
2624
2625        mutex_lock(&md->suspend_lock);
2626        if (!dm_suspended_md(md))
2627                goto out;
2628
2629        map = dm_get_live_table(md);
2630        if (!map || !dm_table_get_size(map))
2631                goto out;
2632
2633        r = dm_table_resume_targets(map);
2634        if (r)
2635                goto out;
2636
2637        dm_queue_flush(md);
2638
2639        /*
2640         * Flushing deferred I/Os must be done after targets are resumed
2641         * so that mapping of targets can work correctly.
2642         * Request-based dm is queueing the deferred I/Os in its request_queue.
2643         */
2644        if (dm_request_based(md))
2645                start_queue(md->queue);
2646
2647        unlock_fs(md);
2648
2649        clear_bit(DMF_SUSPENDED, &md->flags);
2650
2651        r = 0;
2652out:
2653        dm_table_put(map);
2654        mutex_unlock(&md->suspend_lock);
2655
2656        return r;
2657}
2658
2659/*-----------------------------------------------------------------
2660 * Event notification.
2661 *---------------------------------------------------------------*/
2662int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
2663                       unsigned cookie)
2664{
2665        char udev_cookie[DM_COOKIE_LENGTH];
2666        char *envp[] = { udev_cookie, NULL };
2667
2668        if (!cookie)
2669                return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
2670        else {
2671                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2672                         DM_COOKIE_ENV_VAR_NAME, cookie);
2673                return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2674                                          action, envp);
2675        }
2676}
2677
2678uint32_t dm_next_uevent_seq(struct mapped_device *md)
2679{
2680        return atomic_add_return(1, &md->uevent_seq);
2681}
2682
2683uint32_t dm_get_event_nr(struct mapped_device *md)
2684{
2685        return atomic_read(&md->event_nr);
2686}
2687
2688int dm_wait_event(struct mapped_device *md, int event_nr)
2689{
2690        return wait_event_interruptible(md->eventq,
2691                        (event_nr != atomic_read(&md->event_nr)));
2692}
2693
2694void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2695{
2696        unsigned long flags;
2697
2698        spin_lock_irqsave(&md->uevent_lock, flags);
2699        list_add(elist, &md->uevent_list);
2700        spin_unlock_irqrestore(&md->uevent_lock, flags);
2701}
2702
2703/*
2704 * The gendisk is only valid as long as you have a reference
2705 * count on 'md'.
2706 */
2707struct gendisk *dm_disk(struct mapped_device *md)
2708{
2709        return md->disk;
2710}
2711
2712struct kobject *dm_kobject(struct mapped_device *md)
2713{
2714        return &md->kobj;
2715}
2716
2717/*
2718 * struct mapped_device should not be exported outside of dm.c
2719 * so use this check to verify that kobj is part of md structure
2720 */
2721struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2722{
2723        struct mapped_device *md;
2724
2725        md = container_of(kobj, struct mapped_device, kobj);
2726        if (&md->kobj != kobj)
2727                return NULL;
2728
2729        if (test_bit(DMF_FREEING, &md->flags) ||
2730            dm_deleting_md(md))
2731                return NULL;
2732
2733        dm_get(md);
2734        return md;
2735}
2736
2737int dm_suspended_md(struct mapped_device *md)
2738{
2739        return test_bit(DMF_SUSPENDED, &md->flags);
2740}
2741
2742int dm_suspended(struct dm_target *ti)
2743{
2744        return dm_suspended_md(dm_table_get_md(ti->table));
2745}
2746EXPORT_SYMBOL_GPL(dm_suspended);
2747
2748int dm_noflush_suspending(struct dm_target *ti)
2749{
2750        return __noflush_suspending(dm_table_get_md(ti->table));
2751}
2752EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2753
2754struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
2755{
2756        struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2757        unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
2758
2759        if (!pools)
2760                return NULL;
2761
2762        pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2763                         mempool_create_slab_pool(MIN_IOS, _io_cache) :
2764                         mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2765        if (!pools->io_pool)
2766                goto free_pools_and_out;
2767
2768        pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2769                          mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2770                          mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2771        if (!pools->tio_pool)
2772                goto free_io_pool_and_out;
2773
2774        pools->bs = bioset_create(pool_size, 0);
2775        if (!pools->bs)
2776                goto free_tio_pool_and_out;
2777
2778        if (integrity && bioset_integrity_create(pools->bs, pool_size))
2779                goto free_bioset_and_out;
2780
2781        return pools;
2782
2783free_bioset_and_out:
2784        bioset_free(pools->bs);
2785
2786free_tio_pool_and_out:
2787        mempool_destroy(pools->tio_pool);
2788
2789free_io_pool_and_out:
2790        mempool_destroy(pools->io_pool);
2791
2792free_pools_and_out:
2793        kfree(pools);
2794
2795        return NULL;
2796}
2797
2798void dm_free_md_mempools(struct dm_md_mempools *pools)
2799{
2800        if (!pools)
2801                return;
2802
2803        if (pools->io_pool)
2804                mempool_destroy(pools->io_pool);
2805
2806        if (pools->tio_pool)
2807                mempool_destroy(pools->tio_pool);
2808
2809        if (pools->bs)
2810                bioset_free(pools->bs);
2811
2812        kfree(pools);
2813}
2814
2815static const struct block_device_operations dm_blk_dops = {
2816        .open = dm_blk_open,
2817        .release = dm_blk_close,
2818        .ioctl = dm_blk_ioctl,
2819        .getgeo = dm_blk_getgeo,
2820        .owner = THIS_MODULE
2821};
2822
2823EXPORT_SYMBOL(dm_get_mapinfo);
2824
2825/*
2826 * module hooks
2827 */
2828module_init(dm_init);
2829module_exit(dm_exit);
2830
2831module_param(major, uint, 0);
2832MODULE_PARM_DESC(major, "The major number of the device mapper");
2833MODULE_DESCRIPTION(DM_NAME " driver");
2834MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2835MODULE_LICENSE("GPL");
2836
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.