linux/drivers/md/dm-thin.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011-2012 Red Hat UK.
   3 *
   4 * This file is released under the GPL.
   5 */
   6
   7#include "dm-thin-metadata.h"
   8#include "dm-bio-prison.h"
   9#include "dm.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/dm-kcopyd.h>
  14#include <linux/list.h>
  15#include <linux/init.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18
  19#define DM_MSG_PREFIX   "thin"
  20
  21/*
  22 * Tunable constants
  23 */
  24#define ENDIO_HOOK_POOL_SIZE 1024
  25#define MAPPING_POOL_SIZE 1024
  26#define PRISON_CELLS 1024
  27#define COMMIT_PERIOD HZ
  28
  29DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
  30                "A percentage of time allocated for copy on write");
  31
  32/*
  33 * The block size of the device holding pool data must be
  34 * between 64KB and 1GB.
  35 */
  36#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
  37#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
  38
  39/*
  40 * Device id is restricted to 24 bits.
  41 */
  42#define MAX_DEV_ID ((1 << 24) - 1)
  43
  44/*
  45 * How do we handle breaking sharing of data blocks?
  46 * =================================================
  47 *
  48 * We use a standard copy-on-write btree to store the mappings for the
  49 * devices (note I'm talking about copy-on-write of the metadata here, not
  50 * the data).  When you take an internal snapshot you clone the root node
  51 * of the origin btree.  After this there is no concept of an origin or a
  52 * snapshot.  They are just two device trees that happen to point to the
  53 * same data blocks.
  54 *
  55 * When we get a write in we decide if it's to a shared data block using
  56 * some timestamp magic.  If it is, we have to break sharing.
  57 *
  58 * Let's say we write to a shared block in what was the origin.  The
  59 * steps are:
  60 *
  61 * i) plug io further to this physical block. (see bio_prison code).
  62 *
  63 * ii) quiesce any read io to that shared data block.  Obviously
  64 * including all devices that share this block.  (see dm_deferred_set code)
  65 *
  66 * iii) copy the data block to a newly allocate block.  This step can be
  67 * missed out if the io covers the block. (schedule_copy).
  68 *
  69 * iv) insert the new mapping into the origin's btree
  70 * (process_prepared_mapping).  This act of inserting breaks some
  71 * sharing of btree nodes between the two devices.  Breaking sharing only
  72 * effects the btree of that specific device.  Btrees for the other
  73 * devices that share the block never change.  The btree for the origin
  74 * device as it was after the last commit is untouched, ie. we're using
  75 * persistent data structures in the functional programming sense.
  76 *
  77 * v) unplug io to this physical block, including the io that triggered
  78 * the breaking of sharing.
  79 *
  80 * Steps (ii) and (iii) occur in parallel.
  81 *
  82 * The metadata _doesn't_ need to be committed before the io continues.  We
  83 * get away with this because the io is always written to a _new_ block.
  84 * If there's a crash, then:
  85 *
  86 * - The origin mapping will point to the old origin block (the shared
  87 * one).  This will contain the data as it was before the io that triggered
  88 * the breaking of sharing came in.
  89 *
  90 * - The snap mapping still points to the old block.  As it would after
  91 * the commit.
  92 *
  93 * The downside of this scheme is the timestamp magic isn't perfect, and
  94 * will continue to think that data block in the snapshot device is shared
  95 * even after the write to the origin has broken sharing.  I suspect data
  96 * blocks will typically be shared by many different devices, so we're
  97 * breaking sharing n + 1 times, rather than n, where n is the number of
  98 * devices that reference this data block.  At the moment I think the
  99 * benefits far, far outweigh the disadvantages.
 100 */
 101
 102/*----------------------------------------------------------------*/
 103
 104/*
 105 * Key building.
 106 */
 107static void build_data_key(struct dm_thin_device *td,
 108                           dm_block_t b, struct dm_cell_key *key)
 109{
 110        key->virtual = 0;
 111        key->dev = dm_thin_dev_id(td);
 112        key->block = b;
 113}
 114
 115static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
 116                              struct dm_cell_key *key)
 117{
 118        key->virtual = 1;
 119        key->dev = dm_thin_dev_id(td);
 120        key->block = b;
 121}
 122
 123/*----------------------------------------------------------------*/
 124
 125/*
 126 * A pool device ties together a metadata device and a data device.  It
 127 * also provides the interface for creating and destroying internal
 128 * devices.
 129 */
 130struct dm_thin_new_mapping;
 131
 132/*
 133 * The pool runs in 3 modes.  Ordered in degraded order for comparisons.
 134 */
 135enum pool_mode {
 136        PM_WRITE,               /* metadata may be changed */
 137        PM_READ_ONLY,           /* metadata may not be changed */
 138        PM_FAIL,                /* all I/O fails */
 139};
 140
 141struct pool_features {
 142        enum pool_mode mode;
 143
 144        bool zero_new_blocks:1;
 145        bool discard_enabled:1;
 146        bool discard_passdown:1;
 147};
 148
 149struct thin_c;
 150typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
 151typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
 152
 153struct pool {
 154        struct list_head list;
 155        struct dm_target *ti;   /* Only set if a pool target is bound */
 156
 157        struct mapped_device *pool_md;
 158        struct block_device *md_dev;
 159        struct dm_pool_metadata *pmd;
 160
 161        dm_block_t low_water_blocks;
 162        uint32_t sectors_per_block;
 163        int sectors_per_block_shift;
 164
 165        struct pool_features pf;
 166        unsigned low_water_triggered:1; /* A dm event has been sent */
 167        unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
 168
 169        struct dm_bio_prison *prison;
 170        struct dm_kcopyd_client *copier;
 171
 172        struct workqueue_struct *wq;
 173        struct work_struct worker;
 174        struct delayed_work waker;
 175
 176        unsigned long last_commit_jiffies;
 177        unsigned ref_count;
 178
 179        spinlock_t lock;
 180        struct bio_list deferred_bios;
 181        struct bio_list deferred_flush_bios;
 182        struct list_head prepared_mappings;
 183        struct list_head prepared_discards;
 184
 185        struct bio_list retry_on_resume_list;
 186
 187        struct dm_deferred_set *shared_read_ds;
 188        struct dm_deferred_set *all_io_ds;
 189
 190        struct dm_thin_new_mapping *next_mapping;
 191        mempool_t *mapping_pool;
 192
 193        process_bio_fn process_bio;
 194        process_bio_fn process_discard;
 195
 196        process_mapping_fn process_prepared_mapping;
 197        process_mapping_fn process_prepared_discard;
 198};
 199
 200static enum pool_mode get_pool_mode(struct pool *pool);
 201static void set_pool_mode(struct pool *pool, enum pool_mode mode);
 202
 203/*
 204 * Target context for a pool.
 205 */
 206struct pool_c {
 207        struct dm_target *ti;
 208        struct pool *pool;
 209        struct dm_dev *data_dev;
 210        struct dm_dev *metadata_dev;
 211        struct dm_target_callbacks callbacks;
 212
 213        dm_block_t low_water_blocks;
 214        struct pool_features requested_pf; /* Features requested during table load */
 215        struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
 216};
 217
 218/*
 219 * Target context for a thin.
 220 */
 221struct thin_c {
 222        struct dm_dev *pool_dev;
 223        struct dm_dev *origin_dev;
 224        dm_thin_id dev_id;
 225
 226        struct pool *pool;
 227        struct dm_thin_device *td;
 228};
 229
 230/*----------------------------------------------------------------*/
 231
 232/*
 233 * wake_worker() is used when new work is queued and when pool_resume is
 234 * ready to continue deferred IO processing.
 235 */
 236static void wake_worker(struct pool *pool)
 237{
 238        queue_work(pool->wq, &pool->worker);
 239}
 240
 241/*----------------------------------------------------------------*/
 242
 243static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
 244                      struct dm_bio_prison_cell **cell_result)
 245{
 246        int r;
 247        struct dm_bio_prison_cell *cell_prealloc;
 248
 249        /*
 250         * Allocate a cell from the prison's mempool.
 251         * This might block but it can't fail.
 252         */
 253        cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
 254
 255        r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
 256        if (r)
 257                /*
 258                 * We reused an old cell; we can get rid of
 259                 * the new one.
 260                 */
 261                dm_bio_prison_free_cell(pool->prison, cell_prealloc);
 262
 263        return r;
 264}
 265
 266static void cell_release(struct pool *pool,
 267                         struct dm_bio_prison_cell *cell,
 268                         struct bio_list *bios)
 269{
 270        dm_cell_release(pool->prison, cell, bios);
 271        dm_bio_prison_free_cell(pool->prison, cell);
 272}
 273
 274static void cell_release_no_holder(struct pool *pool,
 275                                   struct dm_bio_prison_cell *cell,
 276                                   struct bio_list *bios)
 277{
 278        dm_cell_release_no_holder(pool->prison, cell, bios);
 279        dm_bio_prison_free_cell(pool->prison, cell);
 280}
 281
 282static void cell_defer_no_holder_no_free(struct thin_c *tc,
 283                                         struct dm_bio_prison_cell *cell)
 284{
 285        struct pool *pool = tc->pool;
 286        unsigned long flags;
 287
 288        spin_lock_irqsave(&pool->lock, flags);
 289        dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
 290        spin_unlock_irqrestore(&pool->lock, flags);
 291
 292        wake_worker(pool);
 293}
 294
 295static void cell_error(struct pool *pool,
 296                       struct dm_bio_prison_cell *cell)
 297{
 298        dm_cell_error(pool->prison, cell);
 299        dm_bio_prison_free_cell(pool->prison, cell);
 300}
 301
 302/*----------------------------------------------------------------*/
 303
 304/*
 305 * A global list of pools that uses a struct mapped_device as a key.
 306 */
 307static struct dm_thin_pool_table {
 308        struct mutex mutex;
 309        struct list_head pools;
 310} dm_thin_pool_table;
 311
 312static void pool_table_init(void)
 313{
 314        mutex_init(&dm_thin_pool_table.mutex);
 315        INIT_LIST_HEAD(&dm_thin_pool_table.pools);
 316}
 317
 318static void __pool_table_insert(struct pool *pool)
 319{
 320        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 321        list_add(&pool->list, &dm_thin_pool_table.pools);
 322}
 323
 324static void __pool_table_remove(struct pool *pool)
 325{
 326        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 327        list_del(&pool->list);
 328}
 329
 330static struct pool *__pool_table_lookup(struct mapped_device *md)
 331{
 332        struct pool *pool = NULL, *tmp;
 333
 334        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 335
 336        list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 337                if (tmp->pool_md == md) {
 338                        pool = tmp;
 339                        break;
 340                }
 341        }
 342
 343        return pool;
 344}
 345
 346static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
 347{
 348        struct pool *pool = NULL, *tmp;
 349
 350        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
 351
 352        list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
 353                if (tmp->md_dev == md_dev) {
 354                        pool = tmp;
 355                        break;
 356                }
 357        }
 358
 359        return pool;
 360}
 361
 362/*----------------------------------------------------------------*/
 363
 364struct dm_thin_endio_hook {
 365        struct thin_c *tc;
 366        struct dm_deferred_entry *shared_read_entry;
 367        struct dm_deferred_entry *all_io_entry;
 368        struct dm_thin_new_mapping *overwrite_mapping;
 369};
 370
 371static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
 372{
 373        struct bio *bio;
 374        struct bio_list bios;
 375
 376        bio_list_init(&bios);
 377        bio_list_merge(&bios, master);
 378        bio_list_init(master);
 379
 380        while ((bio = bio_list_pop(&bios))) {
 381                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 382
 383                if (h->tc == tc)
 384                        bio_endio(bio, DM_ENDIO_REQUEUE);
 385                else
 386                        bio_list_add(master, bio);
 387        }
 388}
 389
 390static void requeue_io(struct thin_c *tc)
 391{
 392        struct pool *pool = tc->pool;
 393        unsigned long flags;
 394
 395        spin_lock_irqsave(&pool->lock, flags);
 396        __requeue_bio_list(tc, &pool->deferred_bios);
 397        __requeue_bio_list(tc, &pool->retry_on_resume_list);
 398        spin_unlock_irqrestore(&pool->lock, flags);
 399}
 400
 401/*
 402 * This section of code contains the logic for processing a thin device's IO.
 403 * Much of the code depends on pool object resources (lists, workqueues, etc)
 404 * but most is exclusively called from the thin target rather than the thin-pool
 405 * target.
 406 */
 407
 408static bool block_size_is_power_of_two(struct pool *pool)
 409{
 410        return pool->sectors_per_block_shift >= 0;
 411}
 412
 413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
 414{
 415        struct pool *pool = tc->pool;
 416        sector_t block_nr = bio->bi_sector;
 417
 418        if (block_size_is_power_of_two(pool))
 419                block_nr >>= pool->sectors_per_block_shift;
 420        else
 421                (void) sector_div(block_nr, pool->sectors_per_block);
 422
 423        return block_nr;
 424}
 425
 426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
 427{
 428        struct pool *pool = tc->pool;
 429        sector_t bi_sector = bio->bi_sector;
 430
 431        bio->bi_bdev = tc->pool_dev->bdev;
 432        if (block_size_is_power_of_two(pool))
 433                bio->bi_sector = (block << pool->sectors_per_block_shift) |
 434                                (bi_sector & (pool->sectors_per_block - 1));
 435        else
 436                bio->bi_sector = (block * pool->sectors_per_block) +
 437                                 sector_div(bi_sector, pool->sectors_per_block);
 438}
 439
 440static void remap_to_origin(struct thin_c *tc, struct bio *bio)
 441{
 442        bio->bi_bdev = tc->origin_dev->bdev;
 443}
 444
 445static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
 446{
 447        return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
 448                dm_thin_changed_this_transaction(tc->td);
 449}
 450
 451static void inc_all_io_entry(struct pool *pool, struct bio *bio)
 452{
 453        struct dm_thin_endio_hook *h;
 454
 455        if (bio->bi_rw & REQ_DISCARD)
 456                return;
 457
 458        h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 459        h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
 460}
 461
 462static void issue(struct thin_c *tc, struct bio *bio)
 463{
 464        struct pool *pool = tc->pool;
 465        unsigned long flags;
 466
 467        if (!bio_triggers_commit(tc, bio)) {
 468                generic_make_request(bio);
 469                return;
 470        }
 471
 472        /*
 473         * Complete bio with an error if earlier I/O caused changes to
 474         * the metadata that can't be committed e.g, due to I/O errors
 475         * on the metadata device.
 476         */
 477        if (dm_thin_aborted_changes(tc->td)) {
 478                bio_io_error(bio);
 479                return;
 480        }
 481
 482        /*
 483         * Batch together any bios that trigger commits and then issue a
 484         * single commit for them in process_deferred_bios().
 485         */
 486        spin_lock_irqsave(&pool->lock, flags);
 487        bio_list_add(&pool->deferred_flush_bios, bio);
 488        spin_unlock_irqrestore(&pool->lock, flags);
 489}
 490
 491static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
 492{
 493        remap_to_origin(tc, bio);
 494        issue(tc, bio);
 495}
 496
 497static void remap_and_issue(struct thin_c *tc, struct bio *bio,
 498                            dm_block_t block)
 499{
 500        remap(tc, bio, block);
 501        issue(tc, bio);
 502}
 503
 504/*----------------------------------------------------------------*/
 505
 506/*
 507 * Bio endio functions.
 508 */
 509struct dm_thin_new_mapping {
 510        struct list_head list;
 511
 512        unsigned quiesced:1;
 513        unsigned prepared:1;
 514        unsigned pass_discard:1;
 515
 516        struct thin_c *tc;
 517        dm_block_t virt_block;
 518        dm_block_t data_block;
 519        struct dm_bio_prison_cell *cell, *cell2;
 520        int err;
 521
 522        /*
 523         * If the bio covers the whole area of a block then we can avoid
 524         * zeroing or copying.  Instead this bio is hooked.  The bio will
 525         * still be in the cell, so care has to be taken to avoid issuing
 526         * the bio twice.
 527         */
 528        struct bio *bio;
 529        bio_end_io_t *saved_bi_end_io;
 530};
 531
 532static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
 533{
 534        struct pool *pool = m->tc->pool;
 535
 536        if (m->quiesced && m->prepared) {
 537                list_add(&m->list, &pool->prepared_mappings);
 538                wake_worker(pool);
 539        }
 540}
 541
 542static void copy_complete(int read_err, unsigned long write_err, void *context)
 543{
 544        unsigned long flags;
 545        struct dm_thin_new_mapping *m = context;
 546        struct pool *pool = m->tc->pool;
 547
 548        m->err = read_err || write_err ? -EIO : 0;
 549
 550        spin_lock_irqsave(&pool->lock, flags);
 551        m->prepared = 1;
 552        __maybe_add_mapping(m);
 553        spin_unlock_irqrestore(&pool->lock, flags);
 554}
 555
 556static void overwrite_endio(struct bio *bio, int err)
 557{
 558        unsigned long flags;
 559        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 560        struct dm_thin_new_mapping *m = h->overwrite_mapping;
 561        struct pool *pool = m->tc->pool;
 562
 563        m->err = err;
 564
 565        spin_lock_irqsave(&pool->lock, flags);
 566        m->prepared = 1;
 567        __maybe_add_mapping(m);
 568        spin_unlock_irqrestore(&pool->lock, flags);
 569}
 570
 571/*----------------------------------------------------------------*/
 572
 573/*
 574 * Workqueue.
 575 */
 576
 577/*
 578 * Prepared mapping jobs.
 579 */
 580
 581/*
 582 * This sends the bios in the cell back to the deferred_bios list.
 583 */
 584static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 585{
 586        struct pool *pool = tc->pool;
 587        unsigned long flags;
 588
 589        spin_lock_irqsave(&pool->lock, flags);
 590        cell_release(pool, cell, &pool->deferred_bios);
 591        spin_unlock_irqrestore(&tc->pool->lock, flags);
 592
 593        wake_worker(pool);
 594}
 595
 596/*
 597 * Same as cell_defer above, except it omits the original holder of the cell.
 598 */
 599static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 600{
 601        struct pool *pool = tc->pool;
 602        unsigned long flags;
 603
 604        spin_lock_irqsave(&pool->lock, flags);
 605        cell_release_no_holder(pool, cell, &pool->deferred_bios);
 606        spin_unlock_irqrestore(&pool->lock, flags);
 607
 608        wake_worker(pool);
 609}
 610
 611static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 612{
 613        if (m->bio)
 614                m->bio->bi_end_io = m->saved_bi_end_io;
 615        cell_error(m->tc->pool, m->cell);
 616        list_del(&m->list);
 617        mempool_free(m, m->tc->pool->mapping_pool);
 618}
 619
 620static void process_prepared_mapping(struct dm_thin_new_mapping *m)
 621{
 622        struct thin_c *tc = m->tc;
 623        struct pool *pool = tc->pool;
 624        struct bio *bio;
 625        int r;
 626
 627        bio = m->bio;
 628        if (bio)
 629                bio->bi_end_io = m->saved_bi_end_io;
 630
 631        if (m->err) {
 632                cell_error(pool, m->cell);
 633                goto out;
 634        }
 635
 636        /*
 637         * Commit the prepared block into the mapping btree.
 638         * Any I/O for this block arriving after this point will get
 639         * remapped to it directly.
 640         */
 641        r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
 642        if (r) {
 643                DMERR_LIMIT("dm_thin_insert_block() failed");
 644                cell_error(pool, m->cell);
 645                goto out;
 646        }
 647
 648        /*
 649         * Release any bios held while the block was being provisioned.
 650         * If we are processing a write bio that completely covers the block,
 651         * we already processed it so can ignore it now when processing
 652         * the bios in the cell.
 653         */
 654        if (bio) {
 655                cell_defer_no_holder(tc, m->cell);
 656                bio_endio(bio, 0);
 657        } else
 658                cell_defer(tc, m->cell);
 659
 660out:
 661        list_del(&m->list);
 662        mempool_free(m, pool->mapping_pool);
 663}
 664
 665static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
 666{
 667        struct thin_c *tc = m->tc;
 668
 669        bio_io_error(m->bio);
 670        cell_defer_no_holder(tc, m->cell);
 671        cell_defer_no_holder(tc, m->cell2);
 672        mempool_free(m, tc->pool->mapping_pool);
 673}
 674
 675static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
 676{
 677        struct thin_c *tc = m->tc;
 678
 679        inc_all_io_entry(tc->pool, m->bio);
 680        cell_defer_no_holder(tc, m->cell);
 681        cell_defer_no_holder(tc, m->cell2);
 682
 683        if (m->pass_discard)
 684                remap_and_issue(tc, m->bio, m->data_block);
 685        else
 686                bio_endio(m->bio, 0);
 687
 688        mempool_free(m, tc->pool->mapping_pool);
 689}
 690
 691static void process_prepared_discard(struct dm_thin_new_mapping *m)
 692{
 693        int r;
 694        struct thin_c *tc = m->tc;
 695
 696        r = dm_thin_remove_block(tc->td, m->virt_block);
 697        if (r)
 698                DMERR_LIMIT("dm_thin_remove_block() failed");
 699
 700        process_prepared_discard_passdown(m);
 701}
 702
 703static void process_prepared(struct pool *pool, struct list_head *head,
 704                             process_mapping_fn *fn)
 705{
 706        unsigned long flags;
 707        struct list_head maps;
 708        struct dm_thin_new_mapping *m, *tmp;
 709
 710        INIT_LIST_HEAD(&maps);
 711        spin_lock_irqsave(&pool->lock, flags);
 712        list_splice_init(head, &maps);
 713        spin_unlock_irqrestore(&pool->lock, flags);
 714
 715        list_for_each_entry_safe(m, tmp, &maps, list)
 716                (*fn)(m);
 717}
 718
 719/*
 720 * Deferred bio jobs.
 721 */
 722static int io_overlaps_block(struct pool *pool, struct bio *bio)
 723{
 724        return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
 725}
 726
 727static int io_overwrites_block(struct pool *pool, struct bio *bio)
 728{
 729        return (bio_data_dir(bio) == WRITE) &&
 730                io_overlaps_block(pool, bio);
 731}
 732
 733static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
 734                               bio_end_io_t *fn)
 735{
 736        *save = bio->bi_end_io;
 737        bio->bi_end_io = fn;
 738}
 739
 740static int ensure_next_mapping(struct pool *pool)
 741{
 742        if (pool->next_mapping)
 743                return 0;
 744
 745        pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
 746
 747        return pool->next_mapping ? 0 : -ENOMEM;
 748}
 749
 750static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
 751{
 752        struct dm_thin_new_mapping *r = pool->next_mapping;
 753
 754        BUG_ON(!pool->next_mapping);
 755
 756        pool->next_mapping = NULL;
 757
 758        return r;
 759}
 760
 761static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
 762                          struct dm_dev *origin, dm_block_t data_origin,
 763                          dm_block_t data_dest,
 764                          struct dm_bio_prison_cell *cell, struct bio *bio)
 765{
 766        int r;
 767        struct pool *pool = tc->pool;
 768        struct dm_thin_new_mapping *m = get_next_mapping(pool);
 769
 770        INIT_LIST_HEAD(&m->list);
 771        m->quiesced = 0;
 772        m->prepared = 0;
 773        m->tc = tc;
 774        m->virt_block = virt_block;
 775        m->data_block = data_dest;
 776        m->cell = cell;
 777        m->err = 0;
 778        m->bio = NULL;
 779
 780        if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
 781                m->quiesced = 1;
 782
 783        /*
 784         * IO to pool_dev remaps to the pool target's data_dev.
 785         *
 786         * If the whole block of data is being overwritten, we can issue the
 787         * bio immediately. Otherwise we use kcopyd to clone the data first.
 788         */
 789        if (io_overwrites_block(pool, bio)) {
 790                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 791
 792                h->overwrite_mapping = m;
 793                m->bio = bio;
 794                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
 795                inc_all_io_entry(pool, bio);
 796                remap_and_issue(tc, bio, data_dest);
 797        } else {
 798                struct dm_io_region from, to;
 799
 800                from.bdev = origin->bdev;
 801                from.sector = data_origin * pool->sectors_per_block;
 802                from.count = pool->sectors_per_block;
 803
 804                to.bdev = tc->pool_dev->bdev;
 805                to.sector = data_dest * pool->sectors_per_block;
 806                to.count = pool->sectors_per_block;
 807
 808                r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
 809                                   0, copy_complete, m);
 810                if (r < 0) {
 811                        mempool_free(m, pool->mapping_pool);
 812                        DMERR_LIMIT("dm_kcopyd_copy() failed");
 813                        cell_error(pool, cell);
 814                }
 815        }
 816}
 817
 818static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
 819                                   dm_block_t data_origin, dm_block_t data_dest,
 820                                   struct dm_bio_prison_cell *cell, struct bio *bio)
 821{
 822        schedule_copy(tc, virt_block, tc->pool_dev,
 823                      data_origin, data_dest, cell, bio);
 824}
 825
 826static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 827                                   dm_block_t data_dest,
 828                                   struct dm_bio_prison_cell *cell, struct bio *bio)
 829{
 830        schedule_copy(tc, virt_block, tc->origin_dev,
 831                      virt_block, data_dest, cell, bio);
 832}
 833
 834static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
 835                          dm_block_t data_block, struct dm_bio_prison_cell *cell,
 836                          struct bio *bio)
 837{
 838        struct pool *pool = tc->pool;
 839        struct dm_thin_new_mapping *m = get_next_mapping(pool);
 840
 841        INIT_LIST_HEAD(&m->list);
 842        m->quiesced = 1;
 843        m->prepared = 0;
 844        m->tc = tc;
 845        m->virt_block = virt_block;
 846        m->data_block = data_block;
 847        m->cell = cell;
 848        m->err = 0;
 849        m->bio = NULL;
 850
 851        /*
 852         * If the whole block of data is being overwritten or we are not
 853         * zeroing pre-existing data, we can issue the bio immediately.
 854         * Otherwise we use kcopyd to zero the data first.
 855         */
 856        if (!pool->pf.zero_new_blocks)
 857                process_prepared_mapping(m);
 858
 859        else if (io_overwrites_block(pool, bio)) {
 860                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 861
 862                h->overwrite_mapping = m;
 863                m->bio = bio;
 864                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
 865                inc_all_io_entry(pool, bio);
 866                remap_and_issue(tc, bio, data_block);
 867        } else {
 868                int r;
 869                struct dm_io_region to;
 870
 871                to.bdev = tc->pool_dev->bdev;
 872                to.sector = data_block * pool->sectors_per_block;
 873                to.count = pool->sectors_per_block;
 874
 875                r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
 876                if (r < 0) {
 877                        mempool_free(m, pool->mapping_pool);
 878                        DMERR_LIMIT("dm_kcopyd_zero() failed");
 879                        cell_error(pool, cell);
 880                }
 881        }
 882}
 883
 884static int commit(struct pool *pool)
 885{
 886        int r;
 887
 888        r = dm_pool_commit_metadata(pool->pmd);
 889        if (r)
 890                DMERR_LIMIT("commit failed: error = %d", r);
 891
 892        return r;
 893}
 894
 895/*
 896 * A non-zero return indicates read_only or fail_io mode.
 897 * Many callers don't care about the return value.
 898 */
 899static int commit_or_fallback(struct pool *pool)
 900{
 901        int r;
 902
 903        if (get_pool_mode(pool) != PM_WRITE)
 904                return -EINVAL;
 905
 906        r = commit(pool);
 907        if (r)
 908                set_pool_mode(pool, PM_READ_ONLY);
 909
 910        return r;
 911}
 912
 913static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 914{
 915        int r;
 916        dm_block_t free_blocks;
 917        unsigned long flags;
 918        struct pool *pool = tc->pool;
 919
 920        r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
 921        if (r)
 922                return r;
 923
 924        if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
 925                DMWARN("%s: reached low water mark, sending event.",
 926                       dm_device_name(pool->pool_md));
 927                spin_lock_irqsave(&pool->lock, flags);
 928                pool->low_water_triggered = 1;
 929                spin_unlock_irqrestore(&pool->lock, flags);
 930                dm_table_event(pool->ti->table);
 931        }
 932
 933        if (!free_blocks) {
 934                if (pool->no_free_space)
 935                        return -ENOSPC;
 936                else {
 937                        /*
 938                         * Try to commit to see if that will free up some
 939                         * more space.
 940                         */
 941                        (void) commit_or_fallback(pool);
 942
 943                        r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
 944                        if (r)
 945                                return r;
 946
 947                        /*
 948                         * If we still have no space we set a flag to avoid
 949                         * doing all this checking and return -ENOSPC.
 950                         */
 951                        if (!free_blocks) {
 952                                DMWARN("%s: no free space available.",
 953                                       dm_device_name(pool->pool_md));
 954                                spin_lock_irqsave(&pool->lock, flags);
 955                                pool->no_free_space = 1;
 956                                spin_unlock_irqrestore(&pool->lock, flags);
 957                                return -ENOSPC;
 958                        }
 959                }
 960        }
 961
 962        r = dm_pool_alloc_data_block(pool->pmd, result);
 963        if (r)
 964                return r;
 965
 966        return 0;
 967}
 968
 969/*
 970 * If we have run out of space, queue bios until the device is
 971 * resumed, presumably after having been reloaded with more space.
 972 */
 973static void retry_on_resume(struct bio *bio)
 974{
 975        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 976        struct thin_c *tc = h->tc;
 977        struct pool *pool = tc->pool;
 978        unsigned long flags;
 979
 980        spin_lock_irqsave(&pool->lock, flags);
 981        bio_list_add(&pool->retry_on_resume_list, bio);
 982        spin_unlock_irqrestore(&pool->lock, flags);
 983}
 984
 985static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
 986{
 987        struct bio *bio;
 988        struct bio_list bios;
 989
 990        bio_list_init(&bios);
 991        cell_release(pool, cell, &bios);
 992
 993        while ((bio = bio_list_pop(&bios)))
 994                retry_on_resume(bio);
 995}
 996
 997static void process_discard(struct thin_c *tc, struct bio *bio)
 998{
 999        int r;
1000        unsigned long flags;
1001        struct pool *pool = tc->pool;
1002        struct dm_bio_prison_cell *cell, *cell2;
1003        struct dm_cell_key key, key2;
1004        dm_block_t block = get_bio_block(tc, bio);
1005        struct dm_thin_lookup_result lookup_result;
1006        struct dm_thin_new_mapping *m;
1007
1008        build_virtual_key(tc->td, block, &key);
1009        if (bio_detain(tc->pool, &key, bio, &cell))
1010                return;
1011
1012        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1013        switch (r) {
1014        case 0:
1015                /*
1016                 * Check nobody is fiddling with this pool block.  This can
1017                 * happen if someone's in the process of breaking sharing
1018                 * on this block.
1019                 */
1020                build_data_key(tc->td, lookup_result.block, &key2);
1021                if (bio_detain(tc->pool, &key2, bio, &cell2)) {
1022                        cell_defer_no_holder(tc, cell);
1023                        break;
1024                }
1025
1026                if (io_overlaps_block(pool, bio)) {
1027                        /*
1028                         * IO may still be going to the destination block.  We must
1029                         * quiesce before we can do the removal.
1030                         */
1031                        m = get_next_mapping(pool);
1032                        m->tc = tc;
1033                        m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
1034                        m->virt_block = block;
1035                        m->data_block = lookup_result.block;
1036                        m->cell = cell;
1037                        m->cell2 = cell2;
1038                        m->err = 0;
1039                        m->bio = bio;
1040
1041                        if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
1042                                spin_lock_irqsave(&pool->lock, flags);
1043                                list_add(&m->list, &pool->prepared_discards);
1044                                spin_unlock_irqrestore(&pool->lock, flags);
1045                                wake_worker(pool);
1046                        }
1047                } else {
1048                        inc_all_io_entry(pool, bio);
1049                        cell_defer_no_holder(tc, cell);
1050                        cell_defer_no_holder(tc, cell2);
1051
1052                        /*
1053                         * The DM core makes sure that the discard doesn't span
1054                         * a block boundary.  So we submit the discard of a
1055                         * partial block appropriately.
1056                         */
1057                        if ((!lookup_result.shared) && pool->pf.discard_passdown)
1058                                remap_and_issue(tc, bio, lookup_result.block);
1059                        else
1060                                bio_endio(bio, 0);
1061                }
1062                break;
1063
1064        case -ENODATA:
1065                /*
1066                 * It isn't provisioned, just forget it.
1067                 */
1068                cell_defer_no_holder(tc, cell);
1069                bio_endio(bio, 0);
1070                break;
1071
1072        default:
1073                DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1074                            __func__, r);
1075                cell_defer_no_holder(tc, cell);
1076                bio_io_error(bio);
1077                break;
1078        }
1079}
1080
1081static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1082                          struct dm_cell_key *key,
1083                          struct dm_thin_lookup_result *lookup_result,
1084                          struct dm_bio_prison_cell *cell)
1085{
1086        int r;
1087        dm_block_t data_block;
1088
1089        r = alloc_data_block(tc, &data_block);
1090        switch (r) {
1091        case 0:
1092                schedule_internal_copy(tc, block, lookup_result->block,
1093                                       data_block, cell, bio);
1094                break;
1095
1096        case -ENOSPC:
1097                no_space(tc->pool, cell);
1098                break;
1099
1100        default:
1101                DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1102                            __func__, r);
1103                cell_error(tc->pool, cell);
1104                break;
1105        }
1106}
1107
1108static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1109                               dm_block_t block,
1110                               struct dm_thin_lookup_result *lookup_result)
1111{
1112        struct dm_bio_prison_cell *cell;
1113        struct pool *pool = tc->pool;
1114        struct dm_cell_key key;
1115
1116        /*
1117         * If cell is already occupied, then sharing is already in the process
1118         * of being broken so we have nothing further to do here.
1119         */
1120        build_data_key(tc->td, lookup_result->block, &key);
1121        if (bio_detain(pool, &key, bio, &cell))
1122                return;
1123
1124        if (bio_data_dir(bio) == WRITE && bio->bi_size)
1125                break_sharing(tc, bio, block, &key, lookup_result, cell);
1126        else {
1127                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1128
1129                h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1130                inc_all_io_entry(pool, bio);
1131                cell_defer_no_holder(tc, cell);
1132
1133                remap_and_issue(tc, bio, lookup_result->block);
1134        }
1135}
1136
1137static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1138                            struct dm_bio_prison_cell *cell)
1139{
1140        int r;
1141        dm_block_t data_block;
1142        struct pool *pool = tc->pool;
1143
1144        /*
1145         * Remap empty bios (flushes) immediately, without provisioning.
1146         */
1147        if (!bio->bi_size) {
1148                inc_all_io_entry(pool, bio);
1149                cell_defer_no_holder(tc, cell);
1150
1151                remap_and_issue(tc, bio, 0);
1152                return;
1153        }
1154
1155        /*
1156         * Fill read bios with zeroes and complete them immediately.
1157         */
1158        if (bio_data_dir(bio) == READ) {
1159                zero_fill_bio(bio);
1160                cell_defer_no_holder(tc, cell);
1161                bio_endio(bio, 0);
1162                return;
1163        }
1164
1165        r = alloc_data_block(tc, &data_block);
1166        switch (r) {
1167        case 0:
1168                if (tc->origin_dev)
1169                        schedule_external_copy(tc, block, data_block, cell, bio);
1170                else
1171                        schedule_zero(tc, block, data_block, cell, bio);
1172                break;
1173
1174        case -ENOSPC:
1175                no_space(pool, cell);
1176                break;
1177
1178        default:
1179                DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1180                            __func__, r);
1181                set_pool_mode(pool, PM_READ_ONLY);
1182                cell_error(pool, cell);
1183                break;
1184        }
1185}
1186
1187static void process_bio(struct thin_c *tc, struct bio *bio)
1188{
1189        int r;
1190        struct pool *pool = tc->pool;
1191        dm_block_t block = get_bio_block(tc, bio);
1192        struct dm_bio_prison_cell *cell;
1193        struct dm_cell_key key;
1194        struct dm_thin_lookup_result lookup_result;
1195
1196        /*
1197         * If cell is already occupied, then the block is already
1198         * being provisioned so we have nothing further to do here.
1199         */
1200        build_virtual_key(tc->td, block, &key);
1201        if (bio_detain(pool, &key, bio, &cell))
1202                return;
1203
1204        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1205        switch (r) {
1206        case 0:
1207                if (lookup_result.shared) {
1208                        process_shared_bio(tc, bio, block, &lookup_result);
1209                        cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
1210                } else {
1211                        inc_all_io_entry(pool, bio);
1212                        cell_defer_no_holder(tc, cell);
1213
1214                        remap_and_issue(tc, bio, lookup_result.block);
1215                }
1216                break;
1217
1218        case -ENODATA:
1219                if (bio_data_dir(bio) == READ && tc->origin_dev) {
1220                        inc_all_io_entry(pool, bio);
1221                        cell_defer_no_holder(tc, cell);
1222
1223                        remap_to_origin_and_issue(tc, bio);
1224                } else
1225                        provision_block(tc, bio, block, cell);
1226                break;
1227
1228        default:
1229                DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1230                            __func__, r);
1231                cell_defer_no_holder(tc, cell);
1232                bio_io_error(bio);
1233                break;
1234        }
1235}
1236
1237static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1238{
1239        int r;
1240        int rw = bio_data_dir(bio);
1241        dm_block_t block = get_bio_block(tc, bio);
1242        struct dm_thin_lookup_result lookup_result;
1243
1244        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1245        switch (r) {
1246        case 0:
1247                if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1248                        bio_io_error(bio);
1249                else {
1250                        inc_all_io_entry(tc->pool, bio);
1251                        remap_and_issue(tc, bio, lookup_result.block);
1252                }
1253                break;
1254
1255        case -ENODATA:
1256                if (rw != READ) {
1257                        bio_io_error(bio);
1258                        break;
1259                }
1260
1261                if (tc->origin_dev) {
1262                        inc_all_io_entry(tc->pool, bio);
1263                        remap_to_origin_and_issue(tc, bio);
1264                        break;
1265                }
1266
1267                zero_fill_bio(bio);
1268                bio_endio(bio, 0);
1269                break;
1270
1271        default:
1272                DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1273                            __func__, r);
1274                bio_io_error(bio);
1275                break;
1276        }
1277}
1278
1279static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1280{
1281        bio_io_error(bio);
1282}
1283
1284static int need_commit_due_to_time(struct pool *pool)
1285{
1286        return jiffies < pool->last_commit_jiffies ||
1287               jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1288}
1289
1290static void process_deferred_bios(struct pool *pool)
1291{
1292        unsigned long flags;
1293        struct bio *bio;
1294        struct bio_list bios;
1295
1296        bio_list_init(&bios);
1297
1298        spin_lock_irqsave(&pool->lock, flags);
1299        bio_list_merge(&bios, &pool->deferred_bios);
1300        bio_list_init(&pool->deferred_bios);
1301        spin_unlock_irqrestore(&pool->lock, flags);
1302
1303        while ((bio = bio_list_pop(&bios))) {
1304                struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1305                struct thin_c *tc = h->tc;
1306
1307                /*
1308                 * If we've got no free new_mapping structs, and processing
1309                 * this bio might require one, we pause until there are some
1310                 * prepared mappings to process.
1311                 */
1312                if (ensure_next_mapping(pool)) {
1313                        spin_lock_irqsave(&pool->lock, flags);
1314                        bio_list_merge(&pool->deferred_bios, &bios);
1315                        spin_unlock_irqrestore(&pool->lock, flags);
1316
1317                        break;
1318                }
1319
1320                if (bio->bi_rw & REQ_DISCARD)
1321                        pool->process_discard(tc, bio);
1322                else
1323                        pool->process_bio(tc, bio);
1324        }
1325
1326        /*
1327         * If there are any deferred flush bios, we must commit
1328         * the metadata before issuing them.
1329         */
1330        bio_list_init(&bios);
1331        spin_lock_irqsave(&pool->lock, flags);
1332        bio_list_merge(&bios, &pool->deferred_flush_bios);
1333        bio_list_init(&pool->deferred_flush_bios);
1334        spin_unlock_irqrestore(&pool->lock, flags);
1335
1336        if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1337                return;
1338
1339        if (commit_or_fallback(pool)) {
1340                while ((bio = bio_list_pop(&bios)))
1341                        bio_io_error(bio);
1342                return;
1343        }
1344        pool->last_commit_jiffies = jiffies;
1345
1346        while ((bio = bio_list_pop(&bios)))
1347                generic_make_request(bio);
1348}
1349
1350static void do_worker(struct work_struct *ws)
1351{
1352        struct pool *pool = container_of(ws, struct pool, worker);
1353
1354        process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1355        process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1356        process_deferred_bios(pool);
1357}
1358
1359/*
1360 * We want to commit periodically so that not too much
1361 * unwritten data builds up.
1362 */
1363static void do_waker(struct work_struct *ws)
1364{
1365        struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1366        wake_worker(pool);
1367        queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1368}
1369
1370/*----------------------------------------------------------------*/
1371
1372static enum pool_mode get_pool_mode(struct pool *pool)
1373{
1374        return pool->pf.mode;
1375}
1376
1377static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1378{
1379        int r;
1380
1381        pool->pf.mode = mode;
1382
1383        switch (mode) {
1384        case PM_FAIL:
1385                DMERR("switching pool to failure mode");
1386                pool->process_bio = process_bio_fail;
1387                pool->process_discard = process_bio_fail;
1388                pool->process_prepared_mapping = process_prepared_mapping_fail;
1389                pool->process_prepared_discard = process_prepared_discard_fail;
1390                break;
1391
1392        case PM_READ_ONLY:
1393                DMERR("switching pool to read-only mode");
1394                r = dm_pool_abort_metadata(pool->pmd);
1395                if (r) {
1396                        DMERR("aborting transaction failed");
1397                        set_pool_mode(pool, PM_FAIL);
1398                } else {
1399                        dm_pool_metadata_read_only(pool->pmd);
1400                        pool->process_bio = process_bio_read_only;
1401                        pool->process_discard = process_discard;
1402                        pool->process_prepared_mapping = process_prepared_mapping_fail;
1403                        pool->process_prepared_discard = process_prepared_discard_passdown;
1404                }
1405                break;
1406
1407        case PM_WRITE:
1408                pool->process_bio = process_bio;
1409                pool->process_discard = process_discard;
1410                pool->process_prepared_mapping = process_prepared_mapping;
1411                pool->process_prepared_discard = process_prepared_discard;
1412                break;
1413        }
1414}
1415
1416/*----------------------------------------------------------------*/
1417
1418/*
1419 * Mapping functions.
1420 */
1421
1422/*
1423 * Called only while mapping a thin bio to hand it over to the workqueue.
1424 */
1425static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1426{
1427        unsigned long flags;
1428        struct pool *pool = tc->pool;
1429
1430        spin_lock_irqsave(&pool->lock, flags);
1431        bio_list_add(&pool->deferred_bios, bio);
1432        spin_unlock_irqrestore(&pool->lock, flags);
1433
1434        wake_worker(pool);
1435}
1436
1437static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1438{
1439        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1440
1441        h->tc = tc;
1442        h->shared_read_entry = NULL;
1443        h->all_io_entry = NULL;
1444        h->overwrite_mapping = NULL;
1445}
1446
1447/*
1448 * Non-blocking function called from the thin target's map function.
1449 */
1450static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1451{
1452        int r;
1453        struct thin_c *tc = ti->private;
1454        dm_block_t block = get_bio_block(tc, bio);
1455        struct dm_thin_device *td = tc->td;
1456        struct dm_thin_lookup_result result;
1457        struct dm_bio_prison_cell cell1, cell2;
1458        struct dm_bio_prison_cell *cell_result;
1459        struct dm_cell_key key;
1460
1461        thin_hook_bio(tc, bio);
1462
1463        if (get_pool_mode(tc->pool) == PM_FAIL) {
1464                bio_io_error(bio);
1465                return DM_MAPIO_SUBMITTED;
1466        }
1467
1468        if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1469                thin_defer_bio(tc, bio);
1470                return DM_MAPIO_SUBMITTED;
1471        }
1472
1473        r = dm_thin_find_block(td, block, 0, &result);
1474
1475        /*
1476         * Note that we defer readahead too.
1477         */
1478        switch (r) {
1479        case 0:
1480                if (unlikely(result.shared)) {
1481                        /*
1482                         * We have a race condition here between the
1483                         * result.shared value returned by the lookup and
1484                         * snapshot creation, which may cause new
1485                         * sharing.
1486                         *
1487                         * To avoid this always quiesce the origin before
1488                         * taking the snap.  You want to do this anyway to
1489                         * ensure a consistent application view
1490                         * (i.e. lockfs).
1491                         *
1492                         * More distant ancestors are irrelevant. The
1493                         * shared flag will be set in their case.
1494                         */
1495                        thin_defer_bio(tc, bio);
1496                        return DM_MAPIO_SUBMITTED;
1497                }
1498
1499                build_virtual_key(tc->td, block, &key);
1500                if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1501                        return DM_MAPIO_SUBMITTED;
1502
1503                build_data_key(tc->td, result.block, &key);
1504                if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1505                        cell_defer_no_holder_no_free(tc, &cell1);
1506                        return DM_MAPIO_SUBMITTED;
1507                }
1508
1509                inc_all_io_entry(tc->pool, bio);
1510                cell_defer_no_holder_no_free(tc, &cell2);
1511                cell_defer_no_holder_no_free(tc, &cell1);
1512
1513                remap(tc, bio, result.block);
1514                return DM_MAPIO_REMAPPED;
1515
1516        case -ENODATA:
1517                if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1518                        /*
1519                         * This block isn't provisioned, and we have no way
1520                         * of doing so.  Just error it.
1521                         */
1522                        bio_io_error(bio);
1523                        return DM_MAPIO_SUBMITTED;
1524                }
1525                /* fall through */
1526
1527        case -EWOULDBLOCK:
1528                /*
1529                 * In future, the failed dm_thin_find_block above could
1530                 * provide the hint to load the metadata into cache.
1531                 */
1532                thin_defer_bio(tc, bio);
1533                return DM_MAPIO_SUBMITTED;
1534
1535        default:
1536                /*
1537                 * Must always call bio_io_error on failure.
1538                 * dm_thin_find_block can fail with -EINVAL if the
1539                 * pool is switched to fail-io mode.
1540                 */
1541                bio_io_error(bio);
1542                return DM_MAPIO_SUBMITTED;
1543        }
1544}
1545
1546static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1547{
1548        int r;
1549        unsigned long flags;
1550        struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1551
1552        spin_lock_irqsave(&pt->pool->lock, flags);
1553        r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1554        spin_unlock_irqrestore(&pt->pool->lock, flags);
1555
1556        if (!r) {
1557                struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1558                r = bdi_congested(&q->backing_dev_info, bdi_bits);
1559        }
1560
1561        return r;
1562}
1563
1564static void __requeue_bios(struct pool *pool)
1565{
1566        bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1567        bio_list_init(&pool->retry_on_resume_list);
1568}
1569
1570/*----------------------------------------------------------------
1571 * Binding of control targets to a pool object
1572 *--------------------------------------------------------------*/
1573static bool data_dev_supports_discard(struct pool_c *pt)
1574{
1575        struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1576
1577        return q && blk_queue_discard(q);
1578}
1579
1580static bool is_factor(sector_t block_size, uint32_t n)
1581{
1582        return !sector_div(block_size, n);
1583}
1584
1585/*
1586 * If discard_passdown was enabled verify that the data device
1587 * supports discards.  Disable discard_passdown if not.
1588 */
1589static void disable_passdown_if_not_supported(struct pool_c *pt)
1590{
1591        struct pool *pool = pt->pool;
1592        struct block_device *data_bdev = pt->data_dev->bdev;
1593        struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1594        sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1595        const char *reason = NULL;
1596        char buf[BDEVNAME_SIZE];
1597
1598        if (!pt->adjusted_pf.discard_passdown)
1599                return;
1600
1601        if (!data_dev_supports_discard(pt))
1602                reason = "discard unsupported";
1603
1604        else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1605                reason = "max discard sectors smaller than a block";
1606
1607        else if (data_limits->discard_granularity > block_size)
1608                reason = "discard granularity larger than a block";
1609
1610        else if (!is_factor(block_size, data_limits->discard_granularity))
1611                reason = "discard granularity not a factor of block size";
1612
1613        if (reason) {
1614                DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1615                pt->adjusted_pf.discard_passdown = false;
1616        }
1617}
1618
1619static int bind_control_target(struct pool *pool, struct dm_target *ti)
1620{
1621        struct pool_c *pt = ti->private;
1622
1623        /*
1624         * We want to make sure that degraded pools are never upgraded.
1625         */
1626        enum pool_mode old_mode = pool->pf.mode;
1627        enum pool_mode new_mode = pt->adjusted_pf.mode;
1628
1629        if (old_mode > new_mode)
1630                new_mode = old_mode;
1631
1632        pool->ti = ti;
1633        pool->low_water_blocks = pt->low_water_blocks;
1634        pool->pf = pt->adjusted_pf;
1635
1636        set_pool_mode(pool, new_mode);
1637
1638        return 0;
1639}
1640
1641static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1642{
1643        if (pool->ti == ti)
1644                pool->ti = NULL;
1645}
1646
1647/*----------------------------------------------------------------
1648 * Pool creation
1649 *--------------------------------------------------------------*/
1650/* Initialize pool features. */
1651static void pool_features_init(struct pool_features *pf)
1652{
1653        pf->mode = PM_WRITE;
1654        pf->zero_new_blocks = true;
1655        pf->discard_enabled = true;
1656        pf->discard_passdown = true;
1657}
1658
1659static void __pool_destroy(struct pool *pool)
1660{
1661        __pool_table_remove(pool);
1662
1663        if (dm_pool_metadata_close(pool->pmd) < 0)
1664                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1665
1666        dm_bio_prison_destroy(pool->prison);
1667        dm_kcopyd_client_destroy(pool->copier);
1668
1669        if (pool->wq)
1670                destroy_workqueue(pool->wq);
1671
1672        if (pool->next_mapping)
1673                mempool_free(pool->next_mapping, pool->mapping_pool);
1674        mempool_destroy(pool->mapping_pool);
1675        dm_deferred_set_destroy(pool->shared_read_ds);
1676        dm_deferred_set_destroy(pool->all_io_ds);
1677        kfree(pool);
1678}
1679
1680static struct kmem_cache *_new_mapping_cache;
1681
1682static struct pool *pool_create(struct mapped_device *pool_md,
1683                                struct block_device *metadata_dev,
1684                                unsigned long block_size,
1685                                int read_only, char **error)
1686{
1687        int r;
1688        void *err_p;
1689        struct pool *pool;
1690        struct dm_pool_metadata *pmd;
1691        bool format_device = read_only ? false : true;
1692
1693        pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1694        if (IS_ERR(pmd)) {
1695                *error = "Error creating metadata object";
1696                return (struct pool *)pmd;
1697        }
1698
1699        pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1700        if (!pool) {
1701                *error = "Error allocating memory for pool";
1702                err_p = ERR_PTR(-ENOMEM);
1703                goto bad_pool;
1704        }
1705
1706        pool->pmd = pmd;
1707        pool->sectors_per_block = block_size;
1708        if (block_size & (block_size - 1))
1709                pool->sectors_per_block_shift = -1;
1710        else
1711                pool->sectors_per_block_shift = __ffs(block_size);
1712        pool->low_water_blocks = 0;
1713        pool_features_init(&pool->pf);
1714        pool->prison = dm_bio_prison_create(PRISON_CELLS);
1715        if (!pool->prison) {
1716                *error = "Error creating pool's bio prison";
1717                err_p = ERR_PTR(-ENOMEM);
1718                goto bad_prison;
1719        }
1720
1721        pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1722        if (IS_ERR(pool->copier)) {
1723                r = PTR_ERR(pool->copier);
1724                *error = "Error creating pool's kcopyd client";
1725                err_p = ERR_PTR(r);
1726                goto bad_kcopyd_client;
1727        }
1728
1729        /*
1730         * Create singlethreaded workqueue that will service all devices
1731         * that use this metadata.
1732         */
1733        pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1734        if (!pool->wq) {
1735                *error = "Error creating pool's workqueue";
1736                err_p = ERR_PTR(-ENOMEM);
1737                goto bad_wq;
1738        }
1739
1740        INIT_WORK(&pool->worker, do_worker);
1741        INIT_DELAYED_WORK(&pool->waker, do_waker);
1742        spin_lock_init(&pool->lock);
1743        bio_list_init(&pool->deferred_bios);
1744        bio_list_init(&pool->deferred_flush_bios);
1745        INIT_LIST_HEAD(&pool->prepared_mappings);
1746        INIT_LIST_HEAD(&pool->prepared_discards);
1747        pool->low_water_triggered = 0;
1748        pool->no_free_space = 0;
1749        bio_list_init(&pool->retry_on_resume_list);
1750
1751        pool->shared_read_ds = dm_deferred_set_create();
1752        if (!pool->shared_read_ds) {
1753                *error = "Error creating pool's shared read deferred set";
1754                err_p = ERR_PTR(-ENOMEM);
1755                goto bad_shared_read_ds;
1756        }
1757
1758        pool->all_io_ds = dm_deferred_set_create();
1759        if (!pool->all_io_ds) {
1760                *error = "Error creating pool's all io deferred set";
1761                err_p = ERR_PTR(-ENOMEM);
1762                goto bad_all_io_ds;
1763        }
1764
1765        pool->next_mapping = NULL;
1766        pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1767                                                      _new_mapping_cache);
1768        if (!pool->mapping_pool) {
1769                *error = "Error creating pool's mapping mempool";
1770                err_p = ERR_PTR(-ENOMEM);
1771                goto bad_mapping_pool;
1772        }
1773
1774        pool->ref_count = 1;
1775        pool->last_commit_jiffies = jiffies;
1776        pool->pool_md = pool_md;
1777        pool->md_dev = metadata_dev;
1778        __pool_table_insert(pool);
1779
1780        return pool;
1781
1782bad_mapping_pool:
1783        dm_deferred_set_destroy(pool->all_io_ds);
1784bad_all_io_ds:
1785        dm_deferred_set_destroy(pool->shared_read_ds);
1786bad_shared_read_ds:
1787        destroy_workqueue(pool->wq);
1788bad_wq:
1789        dm_kcopyd_client_destroy(pool->copier);
1790bad_kcopyd_client:
1791        dm_bio_prison_destroy(pool->prison);
1792bad_prison:
1793        kfree(pool);
1794bad_pool:
1795        if (dm_pool_metadata_close(pmd))
1796                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1797
1798        return err_p;
1799}
1800
1801static void __pool_inc(struct pool *pool)
1802{
1803        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1804        pool->ref_count++;
1805}
1806
1807static void __pool_dec(struct pool *pool)
1808{
1809        BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1810        BUG_ON(!pool->ref_count);
1811        if (!--pool->ref_count)
1812                __pool_destroy(pool);
1813}
1814
1815static struct pool *__pool_find(struct mapped_device *pool_md,
1816                                struct block_device *metadata_dev,
1817                                unsigned long block_size, int read_only,
1818                                char **error, int *created)
1819{
1820        struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1821
1822        if (pool) {
1823                if (pool->pool_md != pool_md) {
1824                        *error = "metadata device already in use by a pool";
1825                        return ERR_PTR(-EBUSY);
1826                }
1827                __pool_inc(pool);
1828
1829        } else {
1830                pool = __pool_table_lookup(pool_md);
1831                if (pool) {
1832                        if (pool->md_dev != metadata_dev) {
1833                                *error = "different pool cannot replace a pool";
1834                                return ERR_PTR(-EINVAL);
1835                        }
1836                        __pool_inc(pool);
1837
1838                } else {
1839                        pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1840                        *created = 1;
1841                }
1842        }
1843
1844        return pool;
1845}
1846
1847/*----------------------------------------------------------------
1848 * Pool target methods
1849 *--------------------------------------------------------------*/
1850static void pool_dtr(struct dm_target *ti)
1851{
1852        struct pool_c *pt = ti->private;
1853
1854        mutex_lock(&dm_thin_pool_table.mutex);
1855
1856        unbind_control_target(pt->pool, ti);
1857        __pool_dec(pt->pool);
1858        dm_put_device(ti, pt->metadata_dev);
1859        dm_put_device(ti, pt->data_dev);
1860        kfree(pt);
1861
1862        mutex_unlock(&dm_thin_pool_table.mutex);
1863}
1864
1865static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1866                               struct dm_target *ti)
1867{
1868        int r;
1869        unsigned argc;
1870        const char *arg_name;
1871
1872        static struct dm_arg _args[] = {
1873                {0, 3, "Invalid number of pool feature arguments"},
1874        };
1875
1876        /*
1877         * No feature arguments supplied.
1878         */
1879        if (!as->argc)
1880                return 0;
1881
1882        r = dm_read_arg_group(_args, as, &argc, &ti->error);
1883        if (r)
1884                return -EINVAL;
1885
1886        while (argc && !r) {
1887                arg_name = dm_shift_arg(as);
1888                argc--;
1889
1890                if (!strcasecmp(arg_name, "skip_block_zeroing"))
1891                        pf->zero_new_blocks = false;
1892
1893                else if (!strcasecmp(arg_name, "ignore_discard"))
1894                        pf->discard_enabled = false;
1895
1896                else if (!strcasecmp(arg_name, "no_discard_passdown"))
1897                        pf->discard_passdown = false;
1898
1899                else if (!strcasecmp(arg_name, "read_only"))
1900                        pf->mode = PM_READ_ONLY;
1901
1902                else {
1903                        ti->error = "Unrecognised pool feature requested";
1904                        r = -EINVAL;
1905                        break;
1906                }
1907        }
1908
1909        return r;
1910}
1911
1912/*
1913 * thin-pool <metadata dev> <data dev>
1914 *           <data block size (sectors)>
1915 *           <low water mark (blocks)>
1916 *           [<#feature args> [<arg>]*]
1917 *
1918 * Optional feature arguments are:
1919 *           skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1920 *           ignore_discard: disable discard
1921 *           no_discard_passdown: don't pass discards down to the data device
1922 */
1923static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1924{
1925        int r, pool_created = 0;
1926        struct pool_c *pt;
1927        struct pool *pool;
1928        struct pool_features pf;
1929        struct dm_arg_set as;
1930        struct dm_dev *data_dev;
1931        unsigned long block_size;
1932        dm_block_t low_water_blocks;
1933        struct dm_dev *metadata_dev;
1934        sector_t metadata_dev_size;
1935        char b[BDEVNAME_SIZE];
1936
1937        /*
1938         * FIXME Remove validation from scope of lock.
1939         */
1940        mutex_lock(&dm_thin_pool_table.mutex);
1941
1942        if (argc < 4) {
1943                ti->error = "Invalid argument count";
1944                r = -EINVAL;
1945                goto out_unlock;
1946        }
1947        as.argc = argc;
1948        as.argv = argv;
1949
1950        r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1951        if (r) {
1952                ti->error = "Error opening metadata block device";
1953                goto out_unlock;
1954        }
1955
1956        metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1957        if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1958                DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1959                       bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1960
1961        r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1962        if (r) {
1963                ti->error = "Error getting data device";
1964                goto out_metadata;
1965        }
1966
1967        if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1968            block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1969            block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1970            block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1971                ti->error = "Invalid block size";
1972                r = -EINVAL;
1973                goto out;
1974        }
1975
1976        if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1977                ti->error = "Invalid low water mark";
1978                r = -EINVAL;
1979                goto out;
1980        }
1981
1982        /*
1983         * Set default pool features.
1984         */
1985        pool_features_init(&pf);
1986
1987        dm_consume_args(&as, 4);
1988        r = parse_pool_features(&as, &pf, ti);
1989        if (r)
1990                goto out;
1991
1992        pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1993        if (!pt) {
1994                r = -ENOMEM;
1995                goto out;
1996        }
1997
1998        pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1999                           block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
2000        if (IS_ERR(pool)) {
2001                r = PTR_ERR(pool);
2002                goto out_free_pt;
2003        }
2004
2005        /*
2006         * 'pool_created' reflects whether this is the first table load.
2007         * Top level discard support is not allowed to be changed after
2008         * initial load.  This would require a pool reload to trigger thin
2009         * device changes.
2010         */
2011        if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2012                ti->error = "Discard support cannot be disabled once enabled";
2013                r = -EINVAL;
2014                goto out_flags_changed;
2015        }
2016
2017        pt->pool = pool;
2018        pt->ti = ti;
2019        pt->metadata_dev = metadata_dev;
2020        pt->data_dev = data_dev;
2021        pt->low_water_blocks = low_water_blocks;
2022        pt->adjusted_pf = pt->requested_pf = pf;
2023        ti->num_flush_bios = 1;
2024
2025        /*
2026         * Only need to enable discards if the pool should pass
2027         * them down to the data device.  The thin device's discard
2028         * processing will cause mappings to be removed from the btree.
2029         */
2030        if (pf.discard_enabled && pf.discard_passdown) {
2031                ti->num_discard_bios = 1;
2032
2033                /*
2034                 * Setting 'discards_supported' circumvents the normal
2035                 * stacking of discard limits (this keeps the pool and
2036                 * thin devices' discard limits consistent).
2037                 */
2038                ti->discards_supported = true;
2039                ti->discard_zeroes_data_unsupported = true;
2040        }
2041        ti->private = pt;
2042
2043        pt->callbacks.congested_fn = pool_is_congested;
2044        dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2045
2046        mutex_unlock(&dm_thin_pool_table.mutex);
2047
2048        return 0;
2049
2050out_flags_changed:
2051        __pool_dec(pool);
2052out_free_pt:
2053        kfree(pt);
2054out:
2055        dm_put_device(ti, data_dev);
2056out_metadata:
2057        dm_put_device(ti, metadata_dev);
2058out_unlock:
2059        mutex_unlock(&dm_thin_pool_table.mutex);
2060
2061        return r;
2062}
2063
2064static int pool_map(struct dm_target *ti, struct bio *bio)
2065{
2066        int r;
2067        struct pool_c *pt = ti->private;
2068        struct pool *pool = pt->pool;
2069        unsigned long flags;
2070
2071        /*
2072         * As this is a singleton target, ti->begin is always zero.
2073         */
2074        spin_lock_irqsave(&pool->lock, flags);
2075        bio->bi_bdev = pt->data_dev->bdev;
2076        r = DM_MAPIO_REMAPPED;
2077        spin_unlock_irqrestore(&pool->lock, flags);
2078
2079        return r;
2080}
2081
2082/*
2083 * Retrieves the number of blocks of the data device from
2084 * the superblock and compares it to the actual device size,
2085 * thus resizing the data device in case it has grown.
2086 *
2087 * This both copes with opening preallocated data devices in the ctr
2088 * being followed by a resume
2089 * -and-
2090 * calling the resume method individually after userspace has
2091 * grown the data device in reaction to a table event.
2092 */
2093static int pool_preresume(struct dm_target *ti)
2094{
2095        int r;
2096        struct pool_c *pt = ti->private;
2097        struct pool *pool = pt->pool;
2098        sector_t data_size = ti->len;
2099        dm_block_t sb_data_size;
2100
2101        /*
2102         * Take control of the pool object.
2103         */
2104        r = bind_control_target(pool, ti);
2105        if (r)
2106                return r;
2107
2108        (void) sector_div(data_size, pool->sectors_per_block);
2109
2110        r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2111        if (r) {
2112                DMERR("failed to retrieve data device size");
2113                return r;
2114        }
2115
2116        if (data_size < sb_data_size) {
2117                DMERR("pool target too small, is %llu blocks (expected %llu)",
2118                      (unsigned long long)data_size, sb_data_size);
2119                return -EINVAL;
2120
2121        } else if (data_size > sb_data_size) {
2122                r = dm_pool_resize_data_dev(pool->pmd, data_size);
2123                if (r) {
2124                        DMERR("failed to resize data device");
2125                        /* FIXME Stricter than necessary: Rollback transaction instead here */
2126                        set_pool_mode(pool, PM_READ_ONLY);
2127                        return r;
2128                }
2129
2130                (void) commit_or_fallback(pool);
2131        }
2132
2133        return 0;
2134}
2135
2136static void pool_resume(struct dm_target *ti)
2137{
2138        struct pool_c *pt = ti->private;
2139        struct pool *pool = pt->pool;
2140        unsigned long flags;
2141
2142        spin_lock_irqsave(&pool->lock, flags);
2143        pool->low_water_triggered = 0;
2144        pool->no_free_space = 0;
2145        __requeue_bios(pool);
2146        spin_unlock_irqrestore(&pool->lock, flags);
2147
2148        do_waker(&pool->waker.work);
2149}
2150
2151static void pool_postsuspend(struct dm_target *ti)
2152{
2153        struct pool_c *pt = ti->private;
2154        struct pool *pool = pt->pool;
2155
2156        cancel_delayed_work(&pool->waker);
2157        flush_workqueue(pool->wq);
2158        (void) commit_or_fallback(pool);
2159}
2160
2161static int check_arg_count(unsigned argc, unsigned args_required)
2162{
2163        if (argc != args_required) {
2164                DMWARN("Message received with %u arguments instead of %u.",
2165                       argc, args_required);
2166                return -EINVAL;
2167        }
2168
2169        return 0;
2170}
2171
2172static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2173{
2174        if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2175            *dev_id <= MAX_DEV_ID)
2176                return 0;
2177
2178        if (warning)
2179                DMWARN("Message received with invalid device id: %s", arg);
2180
2181        return -EINVAL;
2182}
2183
2184static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2185{
2186        dm_thin_id dev_id;
2187        int r;
2188
2189        r = check_arg_count(argc, 2);
2190        if (r)
2191                return r;
2192
2193        r = read_dev_id(argv[1], &dev_id, 1);
2194        if (r)
2195                return r;
2196
2197        r = dm_pool_create_thin(pool->pmd, dev_id);
2198        if (r) {
2199                DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2200                       argv[1]);
2201                return r;
2202        }
2203
2204        return 0;
2205}
2206
2207static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2208{
2209        dm_thin_id dev_id;
2210        dm_thin_id origin_dev_id;
2211        int r;
2212
2213        r = check_arg_count(argc, 3);
2214        if (r)
2215                return r;
2216
2217        r = read_dev_id(argv[1], &dev_id, 1);
2218        if (r)
2219                return r;
2220
2221        r = read_dev_id(argv[2], &origin_dev_id, 1);
2222        if (r)
2223                return r;
2224
2225        r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2226        if (r) {
2227                DMWARN("Creation of new snapshot %s of device %s failed.",
2228                       argv[1], argv[2]);
2229                return r;
2230        }
2231
2232        return 0;
2233}
2234
2235static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2236{
2237        dm_thin_id dev_id;
2238        int r;
2239
2240        r = check_arg_count(argc, 2);
2241        if (r)
2242                return r;
2243
2244        r = read_dev_id(argv[1], &dev_id, 1);
2245        if (r)
2246                return r;
2247
2248        r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2249        if (r)
2250                DMWARN("Deletion of thin device %s failed.", argv[1]);
2251
2252        return r;
2253}
2254
2255static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2256{
2257        dm_thin_id old_id, new_id;
2258        int r;
2259
2260        r = check_arg_count(argc, 3);
2261        if (r)
2262                return r;
2263
2264        if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2265                DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2266                return -EINVAL;
2267        }
2268
2269        if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2270                DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2271                return -EINVAL;
2272        }
2273
2274        r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2275        if (r) {
2276                DMWARN("Failed to change transaction id from %s to %s.",
2277                       argv[1], argv[2]);
2278                return r;
2279        }
2280
2281        return 0;
2282}
2283
2284static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2285{
2286        int r;
2287
2288        r = check_arg_count(argc, 1);
2289        if (r)
2290                return r;
2291
2292        (void) commit_or_fallback(pool);
2293
2294        r = dm_pool_reserve_metadata_snap(pool->pmd);
2295        if (r)
2296                DMWARN("reserve_metadata_snap message failed.");
2297
2298        return r;
2299}
2300
2301static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2302{
2303        int r;
2304
2305        r = check_arg_count(argc, 1);
2306        if (r)
2307                return r;
2308
2309        r = dm_pool_release_metadata_snap(pool->pmd);
2310        if (r)
2311                DMWARN("release_metadata_snap message failed.");
2312
2313        return r;
2314}
2315
2316/*
2317 * Messages supported:
2318 *   create_thin        <dev_id>
2319 *   create_snap        <dev_id> <origin_id>
2320 *   delete             <dev_id>
2321 *   trim               <dev_id> <new_size_in_sectors>
2322 *   set_transaction_id <current_trans_id> <new_trans_id>
2323 *   reserve_metadata_snap
2324 *   release_metadata_snap
2325 */
2326static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2327{
2328        int r = -EINVAL;
2329        struct pool_c *pt = ti->private;
2330        struct pool *pool = pt->pool;
2331
2332        if (!strcasecmp(argv[0], "create_thin"))
2333                r = process_create_thin_mesg(argc, argv, pool);
2334
2335        else if (!strcasecmp(argv[0], "create_snap"))
2336                r = process_create_snap_mesg(argc, argv, pool);
2337
2338        else if (!strcasecmp(argv[0], "delete"))
2339                r = process_delete_mesg(argc, argv, pool);
2340
2341        else if (!strcasecmp(argv[0], "set_transaction_id"))
2342                r = process_set_transaction_id_mesg(argc, argv, pool);
2343
2344        else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2345                r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2346
2347        else if (!strcasecmp(argv[0], "release_metadata_snap"))
2348                r = process_release_metadata_snap_mesg(argc, argv, pool);
2349
2350        else
2351                DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2352
2353        if (!r)
2354                (void) commit_or_fallback(pool);
2355
2356        return r;
2357}
2358
2359static void emit_flags(struct pool_features *pf, char *result,
2360                       unsigned sz, unsigned maxlen)
2361{
2362        unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2363                !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2364        DMEMIT("%u ", count);
2365
2366        if (!pf->zero_new_blocks)
2367                DMEMIT("skip_block_zeroing ");
2368
2369        if (!pf->discard_enabled)
2370                DMEMIT("ignore_discard ");
2371
2372        if (!pf->discard_passdown)
2373                DMEMIT("no_discard_passdown ");
2374
2375        if (pf->mode == PM_READ_ONLY)
2376                DMEMIT("read_only ");
2377}
2378
2379/*
2380 * Status line is:
2381 *    <transaction id> <used metadata sectors>/<total metadata sectors>
2382 *    <used data sectors>/<total data sectors> <held metadata root>
2383 */
2384static void pool_status(struct dm_target *ti, status_type_t type,
2385                        unsigned status_flags, char *result, unsigned maxlen)
2386{
2387        int r;
2388        unsigned sz = 0;
2389        uint64_t transaction_id;
2390        dm_block_t nr_free_blocks_data;
2391        dm_block_t nr_free_blocks_metadata;
2392        dm_block_t nr_blocks_data;
2393        dm_block_t nr_blocks_metadata;
2394        dm_block_t held_root;
2395        char buf[BDEVNAME_SIZE];
2396        char buf2[BDEVNAME_SIZE];
2397        struct pool_c *pt = ti->private;
2398        struct pool *pool = pt->pool;
2399
2400        switch (type) {
2401        case STATUSTYPE_INFO:
2402                if (get_pool_mode(pool) == PM_FAIL) {
2403                        DMEMIT("Fail");
2404                        break;
2405                }
2406
2407                /* Commit to ensure statistics aren't out-of-date */
2408                if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2409                        (void) commit_or_fallback(pool);
2410
2411                r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2412                if (r) {
2413                        DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
2414                        goto err;
2415                }
2416
2417                r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2418                if (r) {
2419                        DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
2420                        goto err;
2421                }
2422
2423                r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2424                if (r) {
2425                        DMERR("dm_pool_get_metadata_dev_size returned %d", r);
2426                        goto err;
2427                }
2428
2429                r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2430                if (r) {
2431                        DMERR("dm_pool_get_free_block_count returned %d", r);
2432                        goto err;
2433                }
2434
2435                r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2436                if (r) {
2437                        DMERR("dm_pool_get_data_dev_size returned %d", r);
2438                        goto err;
2439                }
2440
2441                r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2442                if (r) {
2443                        DMERR("dm_pool_get_metadata_snap returned %d", r);
2444                        goto err;
2445                }
2446
2447                DMEMIT("%llu %llu/%llu %llu/%llu ",
2448                       (unsigned long long)transaction_id,
2449                       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2450                       (unsigned long long)nr_blocks_metadata,
2451                       (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2452                       (unsigned long long)nr_blocks_data);
2453
2454                if (held_root)
2455                        DMEMIT("%llu ", held_root);
2456                else
2457                        DMEMIT("- ");
2458
2459                if (pool->pf.mode == PM_READ_ONLY)
2460                        DMEMIT("ro ");
2461                else
2462                        DMEMIT("rw ");
2463
2464                if (!pool->pf.discard_enabled)
2465                        DMEMIT("ignore_discard");
2466                else if (pool->pf.discard_passdown)
2467                        DMEMIT("discard_passdown");
2468                else
2469                        DMEMIT("no_discard_passdown");
2470
2471                break;
2472
2473        case STATUSTYPE_TABLE:
2474                DMEMIT("%s %s %lu %llu ",
2475                       format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2476                       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2477                       (unsigned long)pool->sectors_per_block,
2478                       (unsigned long long)pt->low_water_blocks);
2479                emit_flags(&pt->requested_pf, result, sz, maxlen);
2480                break;
2481        }
2482        return;
2483
2484err:
2485        DMEMIT("Error");
2486}
2487
2488static int pool_iterate_devices(struct dm_target *ti,
2489                                iterate_devices_callout_fn fn, void *data)
2490{
2491        struct pool_c *pt = ti->private;
2492
2493        return fn(ti, pt->data_dev, 0, ti->len, data);
2494}
2495
2496static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2497                      struct bio_vec *biovec, int max_size)
2498{
2499        struct pool_c *pt = ti->private;
2500        struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2501
2502        if (!q->merge_bvec_fn)
2503                return max_size;
2504
2505        bvm->bi_bdev = pt->data_dev->bdev;
2506
2507        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2508}
2509
2510static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2511{
2512        struct pool *pool = pt->pool;
2513        struct queue_limits *data_limits;
2514
2515        limits->max_discard_sectors = pool->sectors_per_block;
2516
2517        /*
2518         * discard_granularity is just a hint, and not enforced.
2519         */
2520        if (pt->adjusted_pf.discard_passdown) {
2521                data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2522                limits->discard_granularity = data_limits->discard_granularity;
2523        } else
2524                limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2525}
2526
2527static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2528{
2529        struct pool_c *pt = ti->private;
2530        struct pool *pool = pt->pool;
2531
2532        blk_limits_io_min(limits, 0);
2533        blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2534
2535        /*
2536         * pt->adjusted_pf is a staging area for the actual features to use.
2537         * They get transferred to the live pool in bind_control_target()
2538         * called from pool_preresume().
2539         */
2540        if (!pt->adjusted_pf.discard_enabled)
2541                return;
2542
2543        disable_passdown_if_not_supported(pt);
2544
2545        set_discard_limits(pt, limits);
2546}
2547
2548static struct target_type pool_target = {
2549        .name = "thin-pool",
2550        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2551                    DM_TARGET_IMMUTABLE,
2552        .version = {1, 7, 0},
2553        .module = THIS_MODULE,
2554        .ctr = pool_ctr,
2555        .dtr = pool_dtr,
2556        .map = pool_map,
2557        .postsuspend = pool_postsuspend,
2558        .preresume = pool_preresume,
2559        .resume = pool_resume,
2560        .message = pool_message,
2561        .status = pool_status,
2562        .merge = pool_merge,
2563        .iterate_devices = pool_iterate_devices,
2564        .io_hints = pool_io_hints,
2565};
2566
2567/*----------------------------------------------------------------
2568 * Thin target methods
2569 *--------------------------------------------------------------*/
2570static void thin_dtr(struct dm_target *ti)
2571{
2572        struct thin_c *tc = ti->private;
2573
2574        mutex_lock(&dm_thin_pool_table.mutex);
2575
2576        __pool_dec(tc->pool);
2577        dm_pool_close_thin_device(tc->td);
2578        dm_put_device(ti, tc->pool_dev);
2579        if (tc->origin_dev)
2580                dm_put_device(ti, tc->origin_dev);
2581        kfree(tc);
2582
2583        mutex_unlock(&dm_thin_pool_table.mutex);
2584}
2585
2586/*
2587 * Thin target parameters:
2588 *
2589 * <pool_dev> <dev_id> [origin_dev]
2590 *
2591 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2592 * dev_id: the internal device identifier
2593 * origin_dev: a device external to the pool that should act as the origin
2594 *
2595 * If the pool device has discards disabled, they get disabled for the thin
2596 * device as well.
2597 */
2598static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2599{
2600        int r;
2601        struct thin_c *tc;
2602        struct dm_dev *pool_dev, *origin_dev;
2603        struct mapped_device *pool_md;
2604
2605        mutex_lock(&dm_thin_pool_table.mutex);
2606
2607        if (argc != 2 && argc != 3) {
2608                ti->error = "Invalid argument count";
2609                r = -EINVAL;
2610                goto out_unlock;
2611        }
2612
2613        tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2614        if (!tc) {
2615                ti->error = "Out of memory";
2616                r = -ENOMEM;
2617                goto out_unlock;
2618        }
2619
2620        if (argc == 3) {
2621                r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2622                if (r) {
2623                        ti->error = "Error opening origin device";
2624                        goto bad_origin_dev;
2625                }
2626                tc->origin_dev = origin_dev;
2627        }
2628
2629        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2630        if (r) {
2631                ti->error = "Error opening pool device";
2632                goto bad_pool_dev;
2633        }
2634        tc->pool_dev = pool_dev;
2635
2636        if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2637                ti->error = "Invalid device id";
2638                r = -EINVAL;
2639                goto bad_common;
2640        }
2641
2642        pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2643        if (!pool_md) {
2644                ti->error = "Couldn't get pool mapped device";
2645                r = -EINVAL;
2646                goto bad_common;
2647        }
2648
2649        tc->pool = __pool_table_lookup(pool_md);
2650        if (!tc->pool) {
2651                ti->error = "Couldn't find pool object";
2652                r = -EINVAL;
2653                goto bad_pool_lookup;
2654        }
2655        __pool_inc(tc->pool);
2656
2657        if (get_pool_mode(tc->pool) == PM_FAIL) {
2658                ti->error = "Couldn't open thin device, Pool is in fail mode";
2659                goto bad_thin_open;
2660        }
2661
2662        r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2663        if (r) {
2664                ti->error = "Couldn't open thin internal device";
2665                goto bad_thin_open;
2666        }
2667
2668        r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2669        if (r)
2670                goto bad_thin_open;
2671
2672        ti->num_flush_bios = 1;
2673        ti->flush_supported = true;
2674        ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2675
2676        /* In case the pool supports discards, pass them on. */
2677        if (tc->pool->pf.discard_enabled) {
2678                ti->discards_supported = true;
2679                ti->num_discard_bios = 1;
2680                ti->discard_zeroes_data_unsupported = true;
2681                /* Discard bios must be split on a block boundary */
2682                ti->split_discard_bios = true;
2683        }
2684
2685        dm_put(pool_md);
2686
2687        mutex_unlock(&dm_thin_pool_table.mutex);
2688
2689        return 0;
2690
2691bad_thin_open:
2692        __pool_dec(tc->pool);
2693bad_pool_lookup:
2694        dm_put(pool_md);
2695bad_common:
2696        dm_put_device(ti, tc->pool_dev);
2697bad_pool_dev:
2698        if (tc->origin_dev)
2699                dm_put_device(ti, tc->origin_dev);
2700bad_origin_dev:
2701        kfree(tc);
2702out_unlock:
2703        mutex_unlock(&dm_thin_pool_table.mutex);
2704
2705        return r;
2706}
2707
2708static int thin_map(struct dm_target *ti, struct bio *bio)
2709{
2710        bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2711
2712        return thin_bio_map(ti, bio);
2713}
2714
2715static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2716{
2717        unsigned long flags;
2718        struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2719        struct list_head work;
2720        struct dm_thin_new_mapping *m, *tmp;
2721        struct pool *pool = h->tc->pool;
2722
2723        if (h->shared_read_entry) {
2724                INIT_LIST_HEAD(&work);
2725                dm_deferred_entry_dec(h->shared_read_entry, &work);
2726
2727                spin_lock_irqsave(&pool->lock, flags);
2728                list_for_each_entry_safe(m, tmp, &work, list) {
2729                        list_del(&m->list);
2730                        m->quiesced = 1;
2731                        __maybe_add_mapping(m);
2732                }
2733                spin_unlock_irqrestore(&pool->lock, flags);
2734        }
2735
2736        if (h->all_io_entry) {
2737                INIT_LIST_HEAD(&work);
2738                dm_deferred_entry_dec(h->all_io_entry, &work);
2739                if (!list_empty(&work)) {
2740                        spin_lock_irqsave(&pool->lock, flags);
2741                        list_for_each_entry_safe(m, tmp, &work, list)
2742                                list_add(&m->list, &pool->prepared_discards);
2743                        spin_unlock_irqrestore(&pool->lock, flags);
2744                        wake_worker(pool);
2745                }
2746        }
2747
2748        return 0;
2749}
2750
2751static void thin_postsuspend(struct dm_target *ti)
2752{
2753        if (dm_noflush_suspending(ti))
2754                requeue_io((struct thin_c *)ti->private);
2755}
2756
2757/*
2758 * <nr mapped sectors> <highest mapped sector>
2759 */
2760static void thin_status(struct dm_target *ti, status_type_t type,
2761                        unsigned status_flags, char *result, unsigned maxlen)
2762{
2763        int r;
2764        ssize_t sz = 0;
2765        dm_block_t mapped, highest;
2766        char buf[BDEVNAME_SIZE];
2767        struct thin_c *tc = ti->private;
2768
2769        if (get_pool_mode(tc->pool) == PM_FAIL) {
2770                DMEMIT("Fail");
2771                return;
2772        }
2773
2774        if (!tc->td)
2775                DMEMIT("-");
2776        else {
2777                switch (type) {
2778                case STATUSTYPE_INFO:
2779                        r = dm_thin_get_mapped_count(tc->td, &mapped);
2780                        if (r) {
2781                                DMERR("dm_thin_get_mapped_count returned %d", r);
2782                                goto err;
2783                        }
2784
2785                        r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2786                        if (r < 0) {
2787                                DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2788                                goto err;
2789                        }
2790
2791                        DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2792                        if (r)
2793                                DMEMIT("%llu", ((highest + 1) *
2794                                                tc->pool->sectors_per_block) - 1);
2795                        else
2796                                DMEMIT("-");
2797                        break;
2798
2799                case STATUSTYPE_TABLE:
2800                        DMEMIT("%s %lu",
2801                               format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2802                               (unsigned long) tc->dev_id);
2803                        if (tc->origin_dev)
2804                                DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2805                        break;
2806                }
2807        }
2808
2809        return;
2810
2811err:
2812        DMEMIT("Error");
2813}
2814
2815static int thin_iterate_devices(struct dm_target *ti,
2816                                iterate_devices_callout_fn fn, void *data)
2817{
2818        sector_t blocks;
2819        struct thin_c *tc = ti->private;
2820        struct pool *pool = tc->pool;
2821
2822        /*
2823         * We can't call dm_pool_get_data_dev_size() since that blocks.  So
2824         * we follow a more convoluted path through to the pool's target.
2825         */
2826        if (!pool->ti)
2827                return 0;       /* nothing is bound */
2828
2829        blocks = pool->ti->len;
2830        (void) sector_div(blocks, pool->sectors_per_block);
2831        if (blocks)
2832                return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
2833
2834        return 0;
2835}
2836
2837static struct target_type thin_target = {
2838        .name = "thin",
2839        .version = {1, 8, 0},
2840        .module = THIS_MODULE,
2841        .ctr = thin_ctr,
2842        .dtr = thin_dtr,
2843        .map = thin_map,
2844        .end_io = thin_endio,
2845        .postsuspend = thin_postsuspend,
2846        .status = thin_status,
2847        .iterate_devices = thin_iterate_devices,
2848};
2849
2850/*----------------------------------------------------------------*/
2851
2852static int __init dm_thin_init(void)
2853{
2854        int r;
2855
2856        pool_table_init();
2857
2858        r = dm_register_target(&thin_target);
2859        if (r)
2860                return r;
2861
2862        r = dm_register_target(&pool_target);
2863        if (r)
2864                goto bad_pool_target;
2865
2866        r = -ENOMEM;
2867
2868        _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2869        if (!_new_mapping_cache)
2870                goto bad_new_mapping_cache;
2871
2872        return 0;
2873
2874bad_new_mapping_cache:
2875        dm_unregister_target(&pool_target);
2876bad_pool_target:
2877        dm_unregister_target(&thin_target);
2878
2879        return r;
2880}
2881
2882static void dm_thin_exit(void)
2883{
2884        dm_unregister_target(&thin_target);
2885        dm_unregister_target(&pool_target);
2886
2887        kmem_cache_destroy(_new_mapping_cache);
2888}
2889
2890module_init(dm_thin_init);
2891module_exit(dm_thin_exit);
2892
2893MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2894MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2895MODULE_LICENSE("GPL");
2896
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.