linux/drivers/md/dm-snap.c
<<
>>
Prefs
   1/*
   2 * dm-snapshot.c
   3 *
   4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/device-mapper.h>
  11#include <linux/delay.h>
  12#include <linux/fs.h>
  13#include <linux/init.h>
  14#include <linux/kdev_t.h>
  15#include <linux/list.h>
  16#include <linux/mempool.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/log2.h>
  21#include <linux/dm-kcopyd.h>
  22
  23#include "dm-exception-store.h"
  24
  25#define DM_MSG_PREFIX "snapshots"
  26
  27static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  28
  29#define dm_target_is_snapshot_merge(ti) \
  30        ((ti)->type->name == dm_snapshot_merge_target_name)
  31
  32/*
  33 * The size of the mempool used to track chunks in use.
  34 */
  35#define MIN_IOS 256
  36
  37#define DM_TRACKED_CHUNK_HASH_SIZE      16
  38#define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
  39                                         (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  40
  41struct dm_exception_table {
  42        uint32_t hash_mask;
  43        unsigned hash_shift;
  44        struct list_head *table;
  45};
  46
  47struct dm_snapshot {
  48        struct rw_semaphore lock;
  49
  50        struct dm_dev *origin;
  51        struct dm_dev *cow;
  52
  53        struct dm_target *ti;
  54
  55        /* List of snapshots per Origin */
  56        struct list_head list;
  57
  58        /*
  59         * You can't use a snapshot if this is 0 (e.g. if full).
  60         * A snapshot-merge target never clears this.
  61         */
  62        int valid;
  63
  64        /* Origin writes don't trigger exceptions until this is set */
  65        int active;
  66
  67        atomic_t pending_exceptions_count;
  68
  69        mempool_t *pending_pool;
  70
  71        struct dm_exception_table pending;
  72        struct dm_exception_table complete;
  73
  74        /*
  75         * pe_lock protects all pending_exception operations and access
  76         * as well as the snapshot_bios list.
  77         */
  78        spinlock_t pe_lock;
  79
  80        /* Chunks with outstanding reads */
  81        spinlock_t tracked_chunk_lock;
  82        struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  83
  84        /* The on disk metadata handler */
  85        struct dm_exception_store *store;
  86
  87        struct dm_kcopyd_client *kcopyd_client;
  88
  89        /* Wait for events based on state_bits */
  90        unsigned long state_bits;
  91
  92        /* Range of chunks currently being merged. */
  93        chunk_t first_merging_chunk;
  94        int num_merging_chunks;
  95
  96        /*
  97         * The merge operation failed if this flag is set.
  98         * Failure modes are handled as follows:
  99         * - I/O error reading the header
 100         *      => don't load the target; abort.
 101         * - Header does not have "valid" flag set
 102         *      => use the origin; forget about the snapshot.
 103         * - I/O error when reading exceptions
 104         *      => don't load the target; abort.
 105         *         (We can't use the intermediate origin state.)
 106         * - I/O error while merging
 107         *      => stop merging; set merge_failed; process I/O normally.
 108         */
 109        int merge_failed;
 110
 111        /*
 112         * Incoming bios that overlap with chunks being merged must wait
 113         * for them to be committed.
 114         */
 115        struct bio_list bios_queued_during_merge;
 116};
 117
 118/*
 119 * state_bits:
 120 *   RUNNING_MERGE  - Merge operation is in progress.
 121 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 122 *                    cleared afterwards.
 123 */
 124#define RUNNING_MERGE          0
 125#define SHUTDOWN_MERGE         1
 126
 127struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
 128{
 129        return s->origin;
 130}
 131EXPORT_SYMBOL(dm_snap_origin);
 132
 133struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 134{
 135        return s->cow;
 136}
 137EXPORT_SYMBOL(dm_snap_cow);
 138
 139static sector_t chunk_to_sector(struct dm_exception_store *store,
 140                                chunk_t chunk)
 141{
 142        return chunk << store->chunk_shift;
 143}
 144
 145static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
 146{
 147        /*
 148         * There is only ever one instance of a particular block
 149         * device so we can compare pointers safely.
 150         */
 151        return lhs == rhs;
 152}
 153
 154struct dm_snap_pending_exception {
 155        struct dm_exception e;
 156
 157        /*
 158         * Origin buffers waiting for this to complete are held
 159         * in a bio list
 160         */
 161        struct bio_list origin_bios;
 162        struct bio_list snapshot_bios;
 163
 164        /* Pointer back to snapshot context */
 165        struct dm_snapshot *snap;
 166
 167        /*
 168         * 1 indicates the exception has already been sent to
 169         * kcopyd.
 170         */
 171        int started;
 172
 173        /*
 174         * For writing a complete chunk, bypassing the copy.
 175         */
 176        struct bio *full_bio;
 177        bio_end_io_t *full_bio_end_io;
 178        void *full_bio_private;
 179};
 180
 181/*
 182 * Hash table mapping origin volumes to lists of snapshots and
 183 * a lock to protect it
 184 */
 185static struct kmem_cache *exception_cache;
 186static struct kmem_cache *pending_cache;
 187
 188struct dm_snap_tracked_chunk {
 189        struct hlist_node node;
 190        chunk_t chunk;
 191};
 192
 193static void init_tracked_chunk(struct bio *bio)
 194{
 195        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 196        INIT_HLIST_NODE(&c->node);
 197}
 198
 199static bool is_bio_tracked(struct bio *bio)
 200{
 201        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 202        return !hlist_unhashed(&c->node);
 203}
 204
 205static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
 206{
 207        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 208
 209        c->chunk = chunk;
 210
 211        spin_lock_irq(&s->tracked_chunk_lock);
 212        hlist_add_head(&c->node,
 213                       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
 214        spin_unlock_irq(&s->tracked_chunk_lock);
 215}
 216
 217static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
 218{
 219        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 220        unsigned long flags;
 221
 222        spin_lock_irqsave(&s->tracked_chunk_lock, flags);
 223        hlist_del(&c->node);
 224        spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
 225}
 226
 227static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
 228{
 229        struct dm_snap_tracked_chunk *c;
 230        struct hlist_node *hn;
 231        int found = 0;
 232
 233        spin_lock_irq(&s->tracked_chunk_lock);
 234
 235        hlist_for_each_entry(c, hn,
 236            &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
 237                if (c->chunk == chunk) {
 238                        found = 1;
 239                        break;
 240                }
 241        }
 242
 243        spin_unlock_irq(&s->tracked_chunk_lock);
 244
 245        return found;
 246}
 247
 248/*
 249 * This conflicting I/O is extremely improbable in the caller,
 250 * so msleep(1) is sufficient and there is no need for a wait queue.
 251 */
 252static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
 253{
 254        while (__chunk_is_tracked(s, chunk))
 255                msleep(1);
 256}
 257
 258/*
 259 * One of these per registered origin, held in the snapshot_origins hash
 260 */
 261struct origin {
 262        /* The origin device */
 263        struct block_device *bdev;
 264
 265        struct list_head hash_list;
 266
 267        /* List of snapshots for this origin */
 268        struct list_head snapshots;
 269};
 270
 271/*
 272 * Size of the hash table for origin volumes. If we make this
 273 * the size of the minors list then it should be nearly perfect
 274 */
 275#define ORIGIN_HASH_SIZE 256
 276#define ORIGIN_MASK      0xFF
 277static struct list_head *_origins;
 278static struct rw_semaphore _origins_lock;
 279
 280static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
 281static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
 282static uint64_t _pending_exceptions_done_count;
 283
 284static int init_origin_hash(void)
 285{
 286        int i;
 287
 288        _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
 289                           GFP_KERNEL);
 290        if (!_origins) {
 291                DMERR("unable to allocate memory");
 292                return -ENOMEM;
 293        }
 294
 295        for (i = 0; i < ORIGIN_HASH_SIZE; i++)
 296                INIT_LIST_HEAD(_origins + i);
 297        init_rwsem(&_origins_lock);
 298
 299        return 0;
 300}
 301
 302static void exit_origin_hash(void)
 303{
 304        kfree(_origins);
 305}
 306
 307static unsigned origin_hash(struct block_device *bdev)
 308{
 309        return bdev->bd_dev & ORIGIN_MASK;
 310}
 311
 312static struct origin *__lookup_origin(struct block_device *origin)
 313{
 314        struct list_head *ol;
 315        struct origin *o;
 316
 317        ol = &_origins[origin_hash(origin)];
 318        list_for_each_entry (o, ol, hash_list)
 319                if (bdev_equal(o->bdev, origin))
 320                        return o;
 321
 322        return NULL;
 323}
 324
 325static void __insert_origin(struct origin *o)
 326{
 327        struct list_head *sl = &_origins[origin_hash(o->bdev)];
 328        list_add_tail(&o->hash_list, sl);
 329}
 330
 331/*
 332 * _origins_lock must be held when calling this function.
 333 * Returns number of snapshots registered using the supplied cow device, plus:
 334 * snap_src - a snapshot suitable for use as a source of exception handover
 335 * snap_dest - a snapshot capable of receiving exception handover.
 336 * snap_merge - an existing snapshot-merge target linked to the same origin.
 337 *   There can be at most one snapshot-merge target. The parameter is optional.
 338 *
 339 * Possible return values and states of snap_src and snap_dest.
 340 *   0: NULL, NULL  - first new snapshot
 341 *   1: snap_src, NULL - normal snapshot
 342 *   2: snap_src, snap_dest  - waiting for handover
 343 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 344 *   1: NULL, snap_dest - source got destroyed without handover
 345 */
 346static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
 347                                        struct dm_snapshot **snap_src,
 348                                        struct dm_snapshot **snap_dest,
 349                                        struct dm_snapshot **snap_merge)
 350{
 351        struct dm_snapshot *s;
 352        struct origin *o;
 353        int count = 0;
 354        int active;
 355
 356        o = __lookup_origin(snap->origin->bdev);
 357        if (!o)
 358                goto out;
 359
 360        list_for_each_entry(s, &o->snapshots, list) {
 361                if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
 362                        *snap_merge = s;
 363                if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
 364                        continue;
 365
 366                down_read(&s->lock);
 367                active = s->active;
 368                up_read(&s->lock);
 369
 370                if (active) {
 371                        if (snap_src)
 372                                *snap_src = s;
 373                } else if (snap_dest)
 374                        *snap_dest = s;
 375
 376                count++;
 377        }
 378
 379out:
 380        return count;
 381}
 382
 383/*
 384 * On success, returns 1 if this snapshot is a handover destination,
 385 * otherwise returns 0.
 386 */
 387static int __validate_exception_handover(struct dm_snapshot *snap)
 388{
 389        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 390        struct dm_snapshot *snap_merge = NULL;
 391
 392        /* Does snapshot need exceptions handed over to it? */
 393        if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
 394                                          &snap_merge) == 2) ||
 395            snap_dest) {
 396                snap->ti->error = "Snapshot cow pairing for exception "
 397                                  "table handover failed";
 398                return -EINVAL;
 399        }
 400
 401        /*
 402         * If no snap_src was found, snap cannot become a handover
 403         * destination.
 404         */
 405        if (!snap_src)
 406                return 0;
 407
 408        /*
 409         * Non-snapshot-merge handover?
 410         */
 411        if (!dm_target_is_snapshot_merge(snap->ti))
 412                return 1;
 413
 414        /*
 415         * Do not allow more than one merging snapshot.
 416         */
 417        if (snap_merge) {
 418                snap->ti->error = "A snapshot is already merging.";
 419                return -EINVAL;
 420        }
 421
 422        if (!snap_src->store->type->prepare_merge ||
 423            !snap_src->store->type->commit_merge) {
 424                snap->ti->error = "Snapshot exception store does not "
 425                                  "support snapshot-merge.";
 426                return -EINVAL;
 427        }
 428
 429        return 1;
 430}
 431
 432static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
 433{
 434        struct dm_snapshot *l;
 435
 436        /* Sort the list according to chunk size, largest-first smallest-last */
 437        list_for_each_entry(l, &o->snapshots, list)
 438                if (l->store->chunk_size < s->store->chunk_size)
 439                        break;
 440        list_add_tail(&s->list, &l->list);
 441}
 442
 443/*
 444 * Make a note of the snapshot and its origin so we can look it
 445 * up when the origin has a write on it.
 446 *
 447 * Also validate snapshot exception store handovers.
 448 * On success, returns 1 if this registration is a handover destination,
 449 * otherwise returns 0.
 450 */
 451static int register_snapshot(struct dm_snapshot *snap)
 452{
 453        struct origin *o, *new_o = NULL;
 454        struct block_device *bdev = snap->origin->bdev;
 455        int r = 0;
 456
 457        new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
 458        if (!new_o)
 459                return -ENOMEM;
 460
 461        down_write(&_origins_lock);
 462
 463        r = __validate_exception_handover(snap);
 464        if (r < 0) {
 465                kfree(new_o);
 466                goto out;
 467        }
 468
 469        o = __lookup_origin(bdev);
 470        if (o)
 471                kfree(new_o);
 472        else {
 473                /* New origin */
 474                o = new_o;
 475
 476                /* Initialise the struct */
 477                INIT_LIST_HEAD(&o->snapshots);
 478                o->bdev = bdev;
 479
 480                __insert_origin(o);
 481        }
 482
 483        __insert_snapshot(o, snap);
 484
 485out:
 486        up_write(&_origins_lock);
 487
 488        return r;
 489}
 490
 491/*
 492 * Move snapshot to correct place in list according to chunk size.
 493 */
 494static void reregister_snapshot(struct dm_snapshot *s)
 495{
 496        struct block_device *bdev = s->origin->bdev;
 497
 498        down_write(&_origins_lock);
 499
 500        list_del(&s->list);
 501        __insert_snapshot(__lookup_origin(bdev), s);
 502
 503        up_write(&_origins_lock);
 504}
 505
 506static void unregister_snapshot(struct dm_snapshot *s)
 507{
 508        struct origin *o;
 509
 510        down_write(&_origins_lock);
 511        o = __lookup_origin(s->origin->bdev);
 512
 513        list_del(&s->list);
 514        if (o && list_empty(&o->snapshots)) {
 515                list_del(&o->hash_list);
 516                kfree(o);
 517        }
 518
 519        up_write(&_origins_lock);
 520}
 521
 522/*
 523 * Implementation of the exception hash tables.
 524 * The lowest hash_shift bits of the chunk number are ignored, allowing
 525 * some consecutive chunks to be grouped together.
 526 */
 527static int dm_exception_table_init(struct dm_exception_table *et,
 528                                   uint32_t size, unsigned hash_shift)
 529{
 530        unsigned int i;
 531
 532        et->hash_shift = hash_shift;
 533        et->hash_mask = size - 1;
 534        et->table = dm_vcalloc(size, sizeof(struct list_head));
 535        if (!et->table)
 536                return -ENOMEM;
 537
 538        for (i = 0; i < size; i++)
 539                INIT_LIST_HEAD(et->table + i);
 540
 541        return 0;
 542}
 543
 544static void dm_exception_table_exit(struct dm_exception_table *et,
 545                                    struct kmem_cache *mem)
 546{
 547        struct list_head *slot;
 548        struct dm_exception *ex, *next;
 549        int i, size;
 550
 551        size = et->hash_mask + 1;
 552        for (i = 0; i < size; i++) {
 553                slot = et->table + i;
 554
 555                list_for_each_entry_safe (ex, next, slot, hash_list)
 556                        kmem_cache_free(mem, ex);
 557        }
 558
 559        vfree(et->table);
 560}
 561
 562static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
 563{
 564        return (chunk >> et->hash_shift) & et->hash_mask;
 565}
 566
 567static void dm_remove_exception(struct dm_exception *e)
 568{
 569        list_del(&e->hash_list);
 570}
 571
 572/*
 573 * Return the exception data for a sector, or NULL if not
 574 * remapped.
 575 */
 576static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
 577                                                chunk_t chunk)
 578{
 579        struct list_head *slot;
 580        struct dm_exception *e;
 581
 582        slot = &et->table[exception_hash(et, chunk)];
 583        list_for_each_entry (e, slot, hash_list)
 584                if (chunk >= e->old_chunk &&
 585                    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
 586                        return e;
 587
 588        return NULL;
 589}
 590
 591static struct dm_exception *alloc_completed_exception(void)
 592{
 593        struct dm_exception *e;
 594
 595        e = kmem_cache_alloc(exception_cache, GFP_NOIO);
 596        if (!e)
 597                e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
 598
 599        return e;
 600}
 601
 602static void free_completed_exception(struct dm_exception *e)
 603{
 604        kmem_cache_free(exception_cache, e);
 605}
 606
 607static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
 608{
 609        struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
 610                                                             GFP_NOIO);
 611
 612        atomic_inc(&s->pending_exceptions_count);
 613        pe->snap = s;
 614
 615        return pe;
 616}
 617
 618static void free_pending_exception(struct dm_snap_pending_exception *pe)
 619{
 620        struct dm_snapshot *s = pe->snap;
 621
 622        mempool_free(pe, s->pending_pool);
 623        smp_mb__before_atomic_dec();
 624        atomic_dec(&s->pending_exceptions_count);
 625}
 626
 627static void dm_insert_exception(struct dm_exception_table *eh,
 628                                struct dm_exception *new_e)
 629{
 630        struct list_head *l;
 631        struct dm_exception *e = NULL;
 632
 633        l = &eh->table[exception_hash(eh, new_e->old_chunk)];
 634
 635        /* Add immediately if this table doesn't support consecutive chunks */
 636        if (!eh->hash_shift)
 637                goto out;
 638
 639        /* List is ordered by old_chunk */
 640        list_for_each_entry_reverse(e, l, hash_list) {
 641                /* Insert after an existing chunk? */
 642                if (new_e->old_chunk == (e->old_chunk +
 643                                         dm_consecutive_chunk_count(e) + 1) &&
 644                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
 645                                         dm_consecutive_chunk_count(e) + 1)) {
 646                        dm_consecutive_chunk_count_inc(e);
 647                        free_completed_exception(new_e);
 648                        return;
 649                }
 650
 651                /* Insert before an existing chunk? */
 652                if (new_e->old_chunk == (e->old_chunk - 1) &&
 653                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
 654                        dm_consecutive_chunk_count_inc(e);
 655                        e->old_chunk--;
 656                        e->new_chunk--;
 657                        free_completed_exception(new_e);
 658                        return;
 659                }
 660
 661                if (new_e->old_chunk > e->old_chunk)
 662                        break;
 663        }
 664
 665out:
 666        list_add(&new_e->hash_list, e ? &e->hash_list : l);
 667}
 668
 669/*
 670 * Callback used by the exception stores to load exceptions when
 671 * initialising.
 672 */
 673static int dm_add_exception(void *context, chunk_t old, chunk_t new)
 674{
 675        struct dm_snapshot *s = context;
 676        struct dm_exception *e;
 677
 678        e = alloc_completed_exception();
 679        if (!e)
 680                return -ENOMEM;
 681
 682        e->old_chunk = old;
 683
 684        /* Consecutive_count is implicitly initialised to zero */
 685        e->new_chunk = new;
 686
 687        dm_insert_exception(&s->complete, e);
 688
 689        return 0;
 690}
 691
 692/*
 693 * Return a minimum chunk size of all snapshots that have the specified origin.
 694 * Return zero if the origin has no snapshots.
 695 */
 696static uint32_t __minimum_chunk_size(struct origin *o)
 697{
 698        struct dm_snapshot *snap;
 699        unsigned chunk_size = 0;
 700
 701        if (o)
 702                list_for_each_entry(snap, &o->snapshots, list)
 703                        chunk_size = min_not_zero(chunk_size,
 704                                                  snap->store->chunk_size);
 705
 706        return (uint32_t) chunk_size;
 707}
 708
 709/*
 710 * Hard coded magic.
 711 */
 712static int calc_max_buckets(void)
 713{
 714        /* use a fixed size of 2MB */
 715        unsigned long mem = 2 * 1024 * 1024;
 716        mem /= sizeof(struct list_head);
 717
 718        return mem;
 719}
 720
 721/*
 722 * Allocate room for a suitable hash table.
 723 */
 724static int init_hash_tables(struct dm_snapshot *s)
 725{
 726        sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
 727
 728        /*
 729         * Calculate based on the size of the original volume or
 730         * the COW volume...
 731         */
 732        cow_dev_size = get_dev_size(s->cow->bdev);
 733        origin_dev_size = get_dev_size(s->origin->bdev);
 734        max_buckets = calc_max_buckets();
 735
 736        hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift;
 737        hash_size = min(hash_size, max_buckets);
 738
 739        if (hash_size < 64)
 740                hash_size = 64;
 741        hash_size = rounddown_pow_of_two(hash_size);
 742        if (dm_exception_table_init(&s->complete, hash_size,
 743                                    DM_CHUNK_CONSECUTIVE_BITS))
 744                return -ENOMEM;
 745
 746        /*
 747         * Allocate hash table for in-flight exceptions
 748         * Make this smaller than the real hash table
 749         */
 750        hash_size >>= 3;
 751        if (hash_size < 64)
 752                hash_size = 64;
 753
 754        if (dm_exception_table_init(&s->pending, hash_size, 0)) {
 755                dm_exception_table_exit(&s->complete, exception_cache);
 756                return -ENOMEM;
 757        }
 758
 759        return 0;
 760}
 761
 762static void merge_shutdown(struct dm_snapshot *s)
 763{
 764        clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
 765        smp_mb__after_clear_bit();
 766        wake_up_bit(&s->state_bits, RUNNING_MERGE);
 767}
 768
 769static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
 770{
 771        s->first_merging_chunk = 0;
 772        s->num_merging_chunks = 0;
 773
 774        return bio_list_get(&s->bios_queued_during_merge);
 775}
 776
 777/*
 778 * Remove one chunk from the index of completed exceptions.
 779 */
 780static int __remove_single_exception_chunk(struct dm_snapshot *s,
 781                                           chunk_t old_chunk)
 782{
 783        struct dm_exception *e;
 784
 785        e = dm_lookup_exception(&s->complete, old_chunk);
 786        if (!e) {
 787                DMERR("Corruption detected: exception for block %llu is "
 788                      "on disk but not in memory",
 789                      (unsigned long long)old_chunk);
 790                return -EINVAL;
 791        }
 792
 793        /*
 794         * If this is the only chunk using this exception, remove exception.
 795         */
 796        if (!dm_consecutive_chunk_count(e)) {
 797                dm_remove_exception(e);
 798                free_completed_exception(e);
 799                return 0;
 800        }
 801
 802        /*
 803         * The chunk may be either at the beginning or the end of a
 804         * group of consecutive chunks - never in the middle.  We are
 805         * removing chunks in the opposite order to that in which they
 806         * were added, so this should always be true.
 807         * Decrement the consecutive chunk counter and adjust the
 808         * starting point if necessary.
 809         */
 810        if (old_chunk == e->old_chunk) {
 811                e->old_chunk++;
 812                e->new_chunk++;
 813        } else if (old_chunk != e->old_chunk +
 814                   dm_consecutive_chunk_count(e)) {
 815                DMERR("Attempt to merge block %llu from the "
 816                      "middle of a chunk range [%llu - %llu]",
 817                      (unsigned long long)old_chunk,
 818                      (unsigned long long)e->old_chunk,
 819                      (unsigned long long)
 820                      e->old_chunk + dm_consecutive_chunk_count(e));
 821                return -EINVAL;
 822        }
 823
 824        dm_consecutive_chunk_count_dec(e);
 825
 826        return 0;
 827}
 828
 829static void flush_bios(struct bio *bio);
 830
 831static int remove_single_exception_chunk(struct dm_snapshot *s)
 832{
 833        struct bio *b = NULL;
 834        int r;
 835        chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
 836
 837        down_write(&s->lock);
 838
 839        /*
 840         * Process chunks (and associated exceptions) in reverse order
 841         * so that dm_consecutive_chunk_count_dec() accounting works.
 842         */
 843        do {
 844                r = __remove_single_exception_chunk(s, old_chunk);
 845                if (r)
 846                        goto out;
 847        } while (old_chunk-- > s->first_merging_chunk);
 848
 849        b = __release_queued_bios_after_merge(s);
 850
 851out:
 852        up_write(&s->lock);
 853        if (b)
 854                flush_bios(b);
 855
 856        return r;
 857}
 858
 859static int origin_write_extent(struct dm_snapshot *merging_snap,
 860                               sector_t sector, unsigned chunk_size);
 861
 862static void merge_callback(int read_err, unsigned long write_err,
 863                           void *context);
 864
 865static uint64_t read_pending_exceptions_done_count(void)
 866{
 867        uint64_t pending_exceptions_done;
 868
 869        spin_lock(&_pending_exceptions_done_spinlock);
 870        pending_exceptions_done = _pending_exceptions_done_count;
 871        spin_unlock(&_pending_exceptions_done_spinlock);
 872
 873        return pending_exceptions_done;
 874}
 875
 876static void increment_pending_exceptions_done_count(void)
 877{
 878        spin_lock(&_pending_exceptions_done_spinlock);
 879        _pending_exceptions_done_count++;
 880        spin_unlock(&_pending_exceptions_done_spinlock);
 881
 882        wake_up_all(&_pending_exceptions_done);
 883}
 884
 885static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 886{
 887        int i, linear_chunks;
 888        chunk_t old_chunk, new_chunk;
 889        struct dm_io_region src, dest;
 890        sector_t io_size;
 891        uint64_t previous_count;
 892
 893        BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
 894        if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
 895                goto shut;
 896
 897        /*
 898         * valid flag never changes during merge, so no lock required.
 899         */
 900        if (!s->valid) {
 901                DMERR("Snapshot is invalid: can't merge");
 902                goto shut;
 903        }
 904
 905        linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
 906                                                      &new_chunk);
 907        if (linear_chunks <= 0) {
 908                if (linear_chunks < 0) {
 909                        DMERR("Read error in exception store: "
 910                              "shutting down merge");
 911                        down_write(&s->lock);
 912                        s->merge_failed = 1;
 913                        up_write(&s->lock);
 914                }
 915                goto shut;
 916        }
 917
 918        /* Adjust old_chunk and new_chunk to reflect start of linear region */
 919        old_chunk = old_chunk + 1 - linear_chunks;
 920        new_chunk = new_chunk + 1 - linear_chunks;
 921
 922        /*
 923         * Use one (potentially large) I/O to copy all 'linear_chunks'
 924         * from the exception store to the origin
 925         */
 926        io_size = linear_chunks * s->store->chunk_size;
 927
 928        dest.bdev = s->origin->bdev;
 929        dest.sector = chunk_to_sector(s->store, old_chunk);
 930        dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
 931
 932        src.bdev = s->cow->bdev;
 933        src.sector = chunk_to_sector(s->store, new_chunk);
 934        src.count = dest.count;
 935
 936        /*
 937         * Reallocate any exceptions needed in other snapshots then
 938         * wait for the pending exceptions to complete.
 939         * Each time any pending exception (globally on the system)
 940         * completes we are woken and repeat the process to find out
 941         * if we can proceed.  While this may not seem a particularly
 942         * efficient algorithm, it is not expected to have any
 943         * significant impact on performance.
 944         */
 945        previous_count = read_pending_exceptions_done_count();
 946        while (origin_write_extent(s, dest.sector, io_size)) {
 947                wait_event(_pending_exceptions_done,
 948                           (read_pending_exceptions_done_count() !=
 949                            previous_count));
 950                /* Retry after the wait, until all exceptions are done. */
 951                previous_count = read_pending_exceptions_done_count();
 952        }
 953
 954        down_write(&s->lock);
 955        s->first_merging_chunk = old_chunk;
 956        s->num_merging_chunks = linear_chunks;
 957        up_write(&s->lock);
 958
 959        /* Wait until writes to all 'linear_chunks' drain */
 960        for (i = 0; i < linear_chunks; i++)
 961                __check_for_conflicting_io(s, old_chunk + i);
 962
 963        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
 964        return;
 965
 966shut:
 967        merge_shutdown(s);
 968}
 969
 970static void error_bios(struct bio *bio);
 971
 972static void merge_callback(int read_err, unsigned long write_err, void *context)
 973{
 974        struct dm_snapshot *s = context;
 975        struct bio *b = NULL;
 976
 977        if (read_err || write_err) {
 978                if (read_err)
 979                        DMERR("Read error: shutting down merge.");
 980                else
 981                        DMERR("Write error: shutting down merge.");
 982                goto shut;
 983        }
 984
 985        if (s->store->type->commit_merge(s->store,
 986                                         s->num_merging_chunks) < 0) {
 987                DMERR("Write error in exception store: shutting down merge");
 988                goto shut;
 989        }
 990
 991        if (remove_single_exception_chunk(s) < 0)
 992                goto shut;
 993
 994        snapshot_merge_next_chunks(s);
 995
 996        return;
 997
 998shut:
 999        down_write(&s->lock);
1000        s->merge_failed = 1;
1001        b = __release_queued_bios_after_merge(s);
1002        up_write(&s->lock);
1003        error_bios(b);
1004
1005        merge_shutdown(s);
1006}
1007
1008static void start_merge(struct dm_snapshot *s)
1009{
1010        if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1011                snapshot_merge_next_chunks(s);
1012}
1013
1014static int wait_schedule(void *ptr)
1015{
1016        schedule();
1017
1018        return 0;
1019}
1020
1021/*
1022 * Stop the merging process and wait until it finishes.
1023 */
1024static void stop_merge(struct dm_snapshot *s)
1025{
1026        set_bit(SHUTDOWN_MERGE, &s->state_bits);
1027        wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1028                    TASK_UNINTERRUPTIBLE);
1029        clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1030}
1031
1032/*
1033 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1034 */
1035static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1036{
1037        struct dm_snapshot *s;
1038        int i;
1039        int r = -EINVAL;
1040        char *origin_path, *cow_path;
1041        unsigned args_used, num_flush_requests = 1;
1042        fmode_t origin_mode = FMODE_READ;
1043
1044        if (argc != 4) {
1045                ti->error = "requires exactly 4 arguments";
1046                r = -EINVAL;
1047                goto bad;
1048        }
1049
1050        if (dm_target_is_snapshot_merge(ti)) {
1051                num_flush_requests = 2;
1052                origin_mode = FMODE_WRITE;
1053        }
1054
1055        s = kmalloc(sizeof(*s), GFP_KERNEL);
1056        if (!s) {
1057                ti->error = "Cannot allocate private snapshot structure";
1058                r = -ENOMEM;
1059                goto bad;
1060        }
1061
1062        origin_path = argv[0];
1063        argv++;
1064        argc--;
1065
1066        r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1067        if (r) {
1068                ti->error = "Cannot get origin device";
1069                goto bad_origin;
1070        }
1071
1072        cow_path = argv[0];
1073        argv++;
1074        argc--;
1075
1076        r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1077        if (r) {
1078                ti->error = "Cannot get COW device";
1079                goto bad_cow;
1080        }
1081
1082        r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1083        if (r) {
1084                ti->error = "Couldn't create exception store";
1085                r = -EINVAL;
1086                goto bad_store;
1087        }
1088
1089        argv += args_used;
1090        argc -= args_used;
1091
1092        s->ti = ti;
1093        s->valid = 1;
1094        s->active = 0;
1095        atomic_set(&s->pending_exceptions_count, 0);
1096        init_rwsem(&s->lock);
1097        INIT_LIST_HEAD(&s->list);
1098        spin_lock_init(&s->pe_lock);
1099        s->state_bits = 0;
1100        s->merge_failed = 0;
1101        s->first_merging_chunk = 0;
1102        s->num_merging_chunks = 0;
1103        bio_list_init(&s->bios_queued_during_merge);
1104
1105        /* Allocate hash table for COW data */
1106        if (init_hash_tables(s)) {
1107                ti->error = "Unable to allocate hash table space";
1108                r = -ENOMEM;
1109                goto bad_hash_tables;
1110        }
1111
1112        s->kcopyd_client = dm_kcopyd_client_create();
1113        if (IS_ERR(s->kcopyd_client)) {
1114                r = PTR_ERR(s->kcopyd_client);
1115                ti->error = "Could not create kcopyd client";
1116                goto bad_kcopyd;
1117        }
1118
1119        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1120        if (!s->pending_pool) {
1121                ti->error = "Could not allocate mempool for pending exceptions";
1122                goto bad_pending_pool;
1123        }
1124
1125        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1126                INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1127
1128        spin_lock_init(&s->tracked_chunk_lock);
1129
1130        ti->private = s;
1131        ti->num_flush_requests = num_flush_requests;
1132        ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1133
1134        /* Add snapshot to the list of snapshots for this origin */
1135        /* Exceptions aren't triggered till snapshot_resume() is called */
1136        r = register_snapshot(s);
1137        if (r == -ENOMEM) {
1138                ti->error = "Snapshot origin struct allocation failed";
1139                goto bad_load_and_register;
1140        } else if (r < 0) {
1141                /* invalid handover, register_snapshot has set ti->error */
1142                goto bad_load_and_register;
1143        }
1144
1145        /*
1146         * Metadata must only be loaded into one table at once, so skip this
1147         * if metadata will be handed over during resume.
1148         * Chunk size will be set during the handover - set it to zero to
1149         * ensure it's ignored.
1150         */
1151        if (r > 0) {
1152                s->store->chunk_size = 0;
1153                return 0;
1154        }
1155
1156        r = s->store->type->read_metadata(s->store, dm_add_exception,
1157                                          (void *)s);
1158        if (r < 0) {
1159                ti->error = "Failed to read snapshot metadata";
1160                goto bad_read_metadata;
1161        } else if (r > 0) {
1162                s->valid = 0;
1163                DMWARN("Snapshot is marked invalid.");
1164        }
1165
1166        if (!s->store->chunk_size) {
1167                ti->error = "Chunk size not set";
1168                goto bad_read_metadata;
1169        }
1170
1171        r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1172        if (r)
1173                goto bad_read_metadata;
1174
1175        return 0;
1176
1177bad_read_metadata:
1178        unregister_snapshot(s);
1179
1180bad_load_and_register:
1181        mempool_destroy(s->pending_pool);
1182
1183bad_pending_pool:
1184        dm_kcopyd_client_destroy(s->kcopyd_client);
1185
1186bad_kcopyd:
1187        dm_exception_table_exit(&s->pending, pending_cache);
1188        dm_exception_table_exit(&s->complete, exception_cache);
1189
1190bad_hash_tables:
1191        dm_exception_store_destroy(s->store);
1192
1193bad_store:
1194        dm_put_device(ti, s->cow);
1195
1196bad_cow:
1197        dm_put_device(ti, s->origin);
1198
1199bad_origin:
1200        kfree(s);
1201
1202bad:
1203        return r;
1204}
1205
1206static void __free_exceptions(struct dm_snapshot *s)
1207{
1208        dm_kcopyd_client_destroy(s->kcopyd_client);
1209        s->kcopyd_client = NULL;
1210
1211        dm_exception_table_exit(&s->pending, pending_cache);
1212        dm_exception_table_exit(&s->complete, exception_cache);
1213}
1214
1215static void __handover_exceptions(struct dm_snapshot *snap_src,
1216                                  struct dm_snapshot *snap_dest)
1217{
1218        union {
1219                struct dm_exception_table table_swap;
1220                struct dm_exception_store *store_swap;
1221        } u;
1222
1223        /*
1224         * Swap all snapshot context information between the two instances.
1225         */
1226        u.table_swap = snap_dest->complete;
1227        snap_dest->complete = snap_src->complete;
1228        snap_src->complete = u.table_swap;
1229
1230        u.store_swap = snap_dest->store;
1231        snap_dest->store = snap_src->store;
1232        snap_src->store = u.store_swap;
1233
1234        snap_dest->store->snap = snap_dest;
1235        snap_src->store->snap = snap_src;
1236
1237        snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1238        snap_dest->valid = snap_src->valid;
1239
1240        /*
1241         * Set source invalid to ensure it receives no further I/O.
1242         */
1243        snap_src->valid = 0;
1244}
1245
1246static void snapshot_dtr(struct dm_target *ti)
1247{
1248#ifdef CONFIG_DM_DEBUG
1249        int i;
1250#endif
1251        struct dm_snapshot *s = ti->private;
1252        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1253
1254        down_read(&_origins_lock);
1255        /* Check whether exception handover must be cancelled */
1256        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1257        if (snap_src && snap_dest && (s == snap_src)) {
1258                down_write(&snap_dest->lock);
1259                snap_dest->valid = 0;
1260                up_write(&snap_dest->lock);
1261                DMERR("Cancelling snapshot handover.");
1262        }
1263        up_read(&_origins_lock);
1264
1265        if (dm_target_is_snapshot_merge(ti))
1266                stop_merge(s);
1267
1268        /* Prevent further origin writes from using this snapshot. */
1269        /* After this returns there can be no new kcopyd jobs. */
1270        unregister_snapshot(s);
1271
1272        while (atomic_read(&s->pending_exceptions_count))
1273                msleep(1);
1274        /*
1275         * Ensure instructions in mempool_destroy aren't reordered
1276         * before atomic_read.
1277         */
1278        smp_mb();
1279
1280#ifdef CONFIG_DM_DEBUG
1281        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1282                BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1283#endif
1284
1285        __free_exceptions(s);
1286
1287        mempool_destroy(s->pending_pool);
1288
1289        dm_exception_store_destroy(s->store);
1290
1291        dm_put_device(ti, s->cow);
1292
1293        dm_put_device(ti, s->origin);
1294
1295        kfree(s);
1296}
1297
1298/*
1299 * Flush a list of buffers.
1300 */
1301static void flush_bios(struct bio *bio)
1302{
1303        struct bio *n;
1304
1305        while (bio) {
1306                n = bio->bi_next;
1307                bio->bi_next = NULL;
1308                generic_make_request(bio);
1309                bio = n;
1310        }
1311}
1312
1313static int do_origin(struct dm_dev *origin, struct bio *bio);
1314
1315/*
1316 * Flush a list of buffers.
1317 */
1318static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1319{
1320        struct bio *n;
1321        int r;
1322
1323        while (bio) {
1324                n = bio->bi_next;
1325                bio->bi_next = NULL;
1326                r = do_origin(s->origin, bio);
1327                if (r == DM_MAPIO_REMAPPED)
1328                        generic_make_request(bio);
1329                bio = n;
1330        }
1331}
1332
1333/*
1334 * Error a list of buffers.
1335 */
1336static void error_bios(struct bio *bio)
1337{
1338        struct bio *n;
1339
1340        while (bio) {
1341                n = bio->bi_next;
1342                bio->bi_next = NULL;
1343                bio_io_error(bio);
1344                bio = n;
1345        }
1346}
1347
1348static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1349{
1350        if (!s->valid)
1351                return;
1352
1353        if (err == -EIO)
1354                DMERR("Invalidating snapshot: Error reading/writing.");
1355        else if (err == -ENOMEM)
1356                DMERR("Invalidating snapshot: Unable to allocate exception.");
1357
1358        if (s->store->type->drop_snapshot)
1359                s->store->type->drop_snapshot(s->store);
1360
1361        s->valid = 0;
1362
1363        dm_table_event(s->ti->table);
1364}
1365
1366static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1367{
1368        struct dm_exception *e;
1369        struct dm_snapshot *s = pe->snap;
1370        struct bio *origin_bios = NULL;
1371        struct bio *snapshot_bios = NULL;
1372        struct bio *full_bio = NULL;
1373        int error = 0;
1374
1375        if (!success) {
1376                /* Read/write error - snapshot is unusable */
1377                down_write(&s->lock);
1378                __invalidate_snapshot(s, -EIO);
1379                error = 1;
1380                goto out;
1381        }
1382
1383        e = alloc_completed_exception();
1384        if (!e) {
1385                down_write(&s->lock);
1386                __invalidate_snapshot(s, -ENOMEM);
1387                error = 1;
1388                goto out;
1389        }
1390        *e = pe->e;
1391
1392        down_write(&s->lock);
1393        if (!s->valid) {
1394                free_completed_exception(e);
1395                error = 1;
1396                goto out;
1397        }
1398
1399        /* Check for conflicting reads */
1400        __check_for_conflicting_io(s, pe->e.old_chunk);
1401
1402        /*
1403         * Add a proper exception, and remove the
1404         * in-flight exception from the list.
1405         */
1406        dm_insert_exception(&s->complete, e);
1407
1408out:
1409        dm_remove_exception(&pe->e);
1410        snapshot_bios = bio_list_get(&pe->snapshot_bios);
1411        origin_bios = bio_list_get(&pe->origin_bios);
1412        full_bio = pe->full_bio;
1413        if (full_bio) {
1414                full_bio->bi_end_io = pe->full_bio_end_io;
1415                full_bio->bi_private = pe->full_bio_private;
1416        }
1417        free_pending_exception(pe);
1418
1419        increment_pending_exceptions_done_count();
1420
1421        up_write(&s->lock);
1422
1423        /* Submit any pending write bios */
1424        if (error) {
1425                if (full_bio)
1426                        bio_io_error(full_bio);
1427                error_bios(snapshot_bios);
1428        } else {
1429                if (full_bio)
1430                        bio_endio(full_bio, 0);
1431                flush_bios(snapshot_bios);
1432        }
1433
1434        retry_origin_bios(s, origin_bios);
1435}
1436
1437static void commit_callback(void *context, int success)
1438{
1439        struct dm_snap_pending_exception *pe = context;
1440
1441        pending_complete(pe, success);
1442}
1443
1444/*
1445 * Called when the copy I/O has finished.  kcopyd actually runs
1446 * this code so don't block.
1447 */
1448static void copy_callback(int read_err, unsigned long write_err, void *context)
1449{
1450        struct dm_snap_pending_exception *pe = context;
1451        struct dm_snapshot *s = pe->snap;
1452
1453        if (read_err || write_err)
1454                pending_complete(pe, 0);
1455
1456        else
1457                /* Update the metadata if we are persistent */
1458                s->store->type->commit_exception(s->store, &pe->e,
1459                                                 commit_callback, pe);
1460}
1461
1462/*
1463 * Dispatches the copy operation to kcopyd.
1464 */
1465static void start_copy(struct dm_snap_pending_exception *pe)
1466{
1467        struct dm_snapshot *s = pe->snap;
1468        struct dm_io_region src, dest;
1469        struct block_device *bdev = s->origin->bdev;
1470        sector_t dev_size;
1471
1472        dev_size = get_dev_size(bdev);
1473
1474        src.bdev = bdev;
1475        src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1476        src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1477
1478        dest.bdev = s->cow->bdev;
1479        dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1480        dest.count = src.count;
1481
1482        /* Hand over to kcopyd */
1483        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1484}
1485
1486static void full_bio_end_io(struct bio *bio, int error)
1487{
1488        void *callback_data = bio->bi_private;
1489
1490        dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1491}
1492
1493static void start_full_bio(struct dm_snap_pending_exception *pe,
1494                           struct bio *bio)
1495{
1496        struct dm_snapshot *s = pe->snap;
1497        void *callback_data;
1498
1499        pe->full_bio = bio;
1500        pe->full_bio_end_io = bio->bi_end_io;
1501        pe->full_bio_private = bio->bi_private;
1502
1503        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1504                                                   copy_callback, pe);
1505
1506        bio->bi_end_io = full_bio_end_io;
1507        bio->bi_private = callback_data;
1508
1509        generic_make_request(bio);
1510}
1511
1512static struct dm_snap_pending_exception *
1513__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1514{
1515        struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1516
1517        if (!e)
1518                return NULL;
1519
1520        return container_of(e, struct dm_snap_pending_exception, e);
1521}
1522
1523/*
1524 * Looks to see if this snapshot already has a pending exception
1525 * for this chunk, otherwise it allocates a new one and inserts
1526 * it into the pending table.
1527 *
1528 * NOTE: a write lock must be held on snap->lock before calling
1529 * this.
1530 */
1531static struct dm_snap_pending_exception *
1532__find_pending_exception(struct dm_snapshot *s,
1533                         struct dm_snap_pending_exception *pe, chunk_t chunk)
1534{
1535        struct dm_snap_pending_exception *pe2;
1536
1537        pe2 = __lookup_pending_exception(s, chunk);
1538        if (pe2) {
1539                free_pending_exception(pe);
1540                return pe2;
1541        }
1542
1543        pe->e.old_chunk = chunk;
1544        bio_list_init(&pe->origin_bios);
1545        bio_list_init(&pe->snapshot_bios);
1546        pe->started = 0;
1547        pe->full_bio = NULL;
1548
1549        if (s->store->type->prepare_exception(s->store, &pe->e)) {
1550                free_pending_exception(pe);
1551                return NULL;
1552        }
1553
1554        dm_insert_exception(&s->pending, &pe->e);
1555
1556        return pe;
1557}
1558
1559static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1560                            struct bio *bio, chunk_t chunk)
1561{
1562        bio->bi_bdev = s->cow->bdev;
1563        bio->bi_sector = chunk_to_sector(s->store,
1564                                         dm_chunk_number(e->new_chunk) +
1565                                         (chunk - e->old_chunk)) +
1566                                         (bio->bi_sector &
1567                                          s->store->chunk_mask);
1568}
1569
1570static int snapshot_map(struct dm_target *ti, struct bio *bio)
1571{
1572        struct dm_exception *e;
1573        struct dm_snapshot *s = ti->private;
1574        int r = DM_MAPIO_REMAPPED;
1575        chunk_t chunk;
1576        struct dm_snap_pending_exception *pe = NULL;
1577
1578        init_tracked_chunk(bio);
1579
1580        if (bio->bi_rw & REQ_FLUSH) {
1581                bio->bi_bdev = s->cow->bdev;
1582                return DM_MAPIO_REMAPPED;
1583        }
1584
1585        chunk = sector_to_chunk(s->store, bio->bi_sector);
1586
1587        /* Full snapshots are not usable */
1588        /* To get here the table must be live so s->active is always set. */
1589        if (!s->valid)
1590                return -EIO;
1591
1592        /* FIXME: should only take write lock if we need
1593         * to copy an exception */
1594        down_write(&s->lock);
1595
1596        if (!s->valid) {
1597                r = -EIO;
1598                goto out_unlock;
1599        }
1600
1601        /* If the block is already remapped - use that, else remap it */
1602        e = dm_lookup_exception(&s->complete, chunk);
1603        if (e) {
1604                remap_exception(s, e, bio, chunk);
1605                goto out_unlock;
1606        }
1607
1608        /*
1609         * Write to snapshot - higher level takes care of RW/RO
1610         * flags so we should only get this if we are
1611         * writeable.
1612         */
1613        if (bio_rw(bio) == WRITE) {
1614                pe = __lookup_pending_exception(s, chunk);
1615                if (!pe) {
1616                        up_write(&s->lock);
1617                        pe = alloc_pending_exception(s);
1618                        down_write(&s->lock);
1619
1620                        if (!s->valid) {
1621                                free_pending_exception(pe);
1622                                r = -EIO;
1623                                goto out_unlock;
1624                        }
1625
1626                        e = dm_lookup_exception(&s->complete, chunk);
1627                        if (e) {
1628                                free_pending_exception(pe);
1629                                remap_exception(s, e, bio, chunk);
1630                                goto out_unlock;
1631                        }
1632
1633                        pe = __find_pending_exception(s, pe, chunk);
1634                        if (!pe) {
1635                                __invalidate_snapshot(s, -ENOMEM);
1636                                r = -EIO;
1637                                goto out_unlock;
1638                        }
1639                }
1640
1641                remap_exception(s, &pe->e, bio, chunk);
1642
1643                r = DM_MAPIO_SUBMITTED;
1644
1645                if (!pe->started &&
1646                    bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1647                        pe->started = 1;
1648                        up_write(&s->lock);
1649                        start_full_bio(pe, bio);
1650                        goto out;
1651                }
1652
1653                bio_list_add(&pe->snapshot_bios, bio);
1654
1655                if (!pe->started) {
1656                        /* this is protected by snap->lock */
1657                        pe->started = 1;
1658                        up_write(&s->lock);
1659                        start_copy(pe);
1660                        goto out;
1661                }
1662        } else {
1663                bio->bi_bdev = s->origin->bdev;
1664                track_chunk(s, bio, chunk);
1665        }
1666
1667out_unlock:
1668        up_write(&s->lock);
1669out:
1670        return r;
1671}
1672
1673/*
1674 * A snapshot-merge target behaves like a combination of a snapshot
1675 * target and a snapshot-origin target.  It only generates new
1676 * exceptions in other snapshots and not in the one that is being
1677 * merged.
1678 *
1679 * For each chunk, if there is an existing exception, it is used to
1680 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1681 * which in turn might generate exceptions in other snapshots.
1682 * If merging is currently taking place on the chunk in question, the
1683 * I/O is deferred by adding it to s->bios_queued_during_merge.
1684 */
1685static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1686{
1687        struct dm_exception *e;
1688        struct dm_snapshot *s = ti->private;
1689        int r = DM_MAPIO_REMAPPED;
1690        chunk_t chunk;
1691
1692        init_tracked_chunk(bio);
1693
1694        if (bio->bi_rw & REQ_FLUSH) {
1695                if (!dm_bio_get_target_request_nr(bio))
1696                        bio->bi_bdev = s->origin->bdev;
1697                else
1698                        bio->bi_bdev = s->cow->bdev;
1699                return DM_MAPIO_REMAPPED;
1700        }
1701
1702        chunk = sector_to_chunk(s->store, bio->bi_sector);
1703
1704        down_write(&s->lock);
1705
1706        /* Full merging snapshots are redirected to the origin */
1707        if (!s->valid)
1708                goto redirect_to_origin;
1709
1710        /* If the block is already remapped - use that */
1711        e = dm_lookup_exception(&s->complete, chunk);
1712        if (e) {
1713                /* Queue writes overlapping with chunks being merged */
1714                if (bio_rw(bio) == WRITE &&
1715                    chunk >= s->first_merging_chunk &&
1716                    chunk < (s->first_merging_chunk +
1717                             s->num_merging_chunks)) {
1718                        bio->bi_bdev = s->origin->bdev;
1719                        bio_list_add(&s->bios_queued_during_merge, bio);
1720                        r = DM_MAPIO_SUBMITTED;
1721                        goto out_unlock;
1722                }
1723
1724                remap_exception(s, e, bio, chunk);
1725
1726                if (bio_rw(bio) == WRITE)
1727                        track_chunk(s, bio, chunk);
1728                goto out_unlock;
1729        }
1730
1731redirect_to_origin:
1732        bio->bi_bdev = s->origin->bdev;
1733
1734        if (bio_rw(bio) == WRITE) {
1735                up_write(&s->lock);
1736                return do_origin(s->origin, bio);
1737        }
1738
1739out_unlock:
1740        up_write(&s->lock);
1741
1742        return r;
1743}
1744
1745static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1746{
1747        struct dm_snapshot *s = ti->private;
1748
1749        if (is_bio_tracked(bio))
1750                stop_tracking_chunk(s, bio);
1751
1752        return 0;
1753}
1754
1755static void snapshot_merge_presuspend(struct dm_target *ti)
1756{
1757        struct dm_snapshot *s = ti->private;
1758
1759        stop_merge(s);
1760}
1761
1762static int snapshot_preresume(struct dm_target *ti)
1763{
1764        int r = 0;
1765        struct dm_snapshot *s = ti->private;
1766        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1767
1768        down_read(&_origins_lock);
1769        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1770        if (snap_src && snap_dest) {
1771                down_read(&snap_src->lock);
1772                if (s == snap_src) {
1773                        DMERR("Unable to resume snapshot source until "
1774                              "handover completes.");
1775                        r = -EINVAL;
1776                } else if (!dm_suspended(snap_src->ti)) {
1777                        DMERR("Unable to perform snapshot handover until "
1778                              "source is suspended.");
1779                        r = -EINVAL;
1780                }
1781                up_read(&snap_src->lock);
1782        }
1783        up_read(&_origins_lock);
1784
1785        return r;
1786}
1787
1788static void snapshot_resume(struct dm_target *ti)
1789{
1790        struct dm_snapshot *s = ti->private;
1791        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1792
1793        down_read(&_origins_lock);
1794        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1795        if (snap_src && snap_dest) {
1796                down_write(&snap_src->lock);
1797                down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1798                __handover_exceptions(snap_src, snap_dest);
1799                up_write(&snap_dest->lock);
1800                up_write(&snap_src->lock);
1801        }
1802        up_read(&_origins_lock);
1803
1804        /* Now we have correct chunk size, reregister */
1805        reregister_snapshot(s);
1806
1807        down_write(&s->lock);
1808        s->active = 1;
1809        up_write(&s->lock);
1810}
1811
1812static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1813{
1814        uint32_t min_chunksize;
1815
1816        down_read(&_origins_lock);
1817        min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1818        up_read(&_origins_lock);
1819
1820        return min_chunksize;
1821}
1822
1823static void snapshot_merge_resume(struct dm_target *ti)
1824{
1825        struct dm_snapshot *s = ti->private;
1826
1827        /*
1828         * Handover exceptions from existing snapshot.
1829         */
1830        snapshot_resume(ti);
1831
1832        /*
1833         * snapshot-merge acts as an origin, so set ti->max_io_len
1834         */
1835        ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1836
1837        start_merge(s);
1838}
1839
1840static void snapshot_status(struct dm_target *ti, status_type_t type,
1841                            unsigned status_flags, char *result, unsigned maxlen)
1842{
1843        unsigned sz = 0;
1844        struct dm_snapshot *snap = ti->private;
1845
1846        switch (type) {
1847        case STATUSTYPE_INFO:
1848
1849                down_write(&snap->lock);
1850
1851                if (!snap->valid)
1852                        DMEMIT("Invalid");
1853                else if (snap->merge_failed)
1854                        DMEMIT("Merge failed");
1855                else {
1856                        if (snap->store->type->usage) {
1857                                sector_t total_sectors, sectors_allocated,
1858                                         metadata_sectors;
1859                                snap->store->type->usage(snap->store,
1860                                                         &total_sectors,
1861                                                         &sectors_allocated,
1862                                                         &metadata_sectors);
1863                                DMEMIT("%llu/%llu %llu",
1864                                       (unsigned long long)sectors_allocated,
1865                                       (unsigned long long)total_sectors,
1866                                       (unsigned long long)metadata_sectors);
1867                        }
1868                        else
1869                                DMEMIT("Unknown");
1870                }
1871
1872                up_write(&snap->lock);
1873
1874                break;
1875
1876        case STATUSTYPE_TABLE:
1877                /*
1878                 * kdevname returns a static pointer so we need
1879                 * to make private copies if the output is to
1880                 * make sense.
1881                 */
1882                DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1883                snap->store->type->status(snap->store, type, result + sz,
1884                                          maxlen - sz);
1885                break;
1886        }
1887}
1888
1889static int snapshot_iterate_devices(struct dm_target *ti,
1890                                    iterate_devices_callout_fn fn, void *data)
1891{
1892        struct dm_snapshot *snap = ti->private;
1893        int r;
1894
1895        r = fn(ti, snap->origin, 0, ti->len, data);
1896
1897        if (!r)
1898                r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1899
1900        return r;
1901}
1902
1903
1904/*-----------------------------------------------------------------
1905 * Origin methods
1906 *---------------------------------------------------------------*/
1907
1908/*
1909 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1910 * supplied bio was ignored.  The caller may submit it immediately.
1911 * (No remapping actually occurs as the origin is always a direct linear
1912 * map.)
1913 *
1914 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1915 * and any supplied bio is added to a list to be submitted once all
1916 * the necessary exceptions exist.
1917 */
1918static int __origin_write(struct list_head *snapshots, sector_t sector,
1919                          struct bio *bio)
1920{
1921        int r = DM_MAPIO_REMAPPED;
1922        struct dm_snapshot *snap;
1923        struct dm_exception *e;
1924        struct dm_snap_pending_exception *pe;
1925        struct dm_snap_pending_exception *pe_to_start_now = NULL;
1926        struct dm_snap_pending_exception *pe_to_start_last = NULL;
1927        chunk_t chunk;
1928
1929        /* Do all the snapshots on this origin */
1930        list_for_each_entry (snap, snapshots, list) {
1931                /*
1932                 * Don't make new exceptions in a merging snapshot
1933                 * because it has effectively been deleted
1934                 */
1935                if (dm_target_is_snapshot_merge(snap->ti))
1936                        continue;
1937
1938                down_write(&snap->lock);
1939
1940                /* Only deal with valid and active snapshots */
1941                if (!snap->valid || !snap->active)
1942                        goto next_snapshot;
1943
1944                /* Nothing to do if writing beyond end of snapshot */
1945                if (sector >= dm_table_get_size(snap->ti->table))
1946                        goto next_snapshot;
1947
1948                /*
1949                 * Remember, different snapshots can have
1950                 * different chunk sizes.
1951                 */
1952                chunk = sector_to_chunk(snap->store, sector);
1953
1954                /*
1955                 * Check exception table to see if block
1956                 * is already remapped in this snapshot
1957                 * and trigger an exception if not.
1958                 */
1959                e = dm_lookup_exception(&snap->complete, chunk);
1960                if (e)
1961                        goto next_snapshot;
1962
1963                pe = __lookup_pending_exception(snap, chunk);
1964                if (!pe) {
1965                        up_write(&snap->lock);
1966                        pe = alloc_pending_exception(snap);
1967                        down_write(&snap->lock);
1968
1969                        if (!snap->valid) {
1970                                free_pending_exception(pe);
1971                                goto next_snapshot;
1972                        }
1973
1974                        e = dm_lookup_exception(&snap->complete, chunk);
1975                        if (e) {
1976                                free_pending_exception(pe);
1977                                goto next_snapshot;
1978                        }
1979
1980                        pe = __find_pending_exception(snap, pe, chunk);
1981                        if (!pe) {
1982                                __invalidate_snapshot(snap, -ENOMEM);
1983                                goto next_snapshot;
1984                        }
1985                }
1986
1987                r = DM_MAPIO_SUBMITTED;
1988
1989                /*
1990                 * If an origin bio was supplied, queue it to wait for the
1991                 * completion of this exception, and start this one last,
1992                 * at the end of the function.
1993                 */
1994                if (bio) {
1995                        bio_list_add(&pe->origin_bios, bio);
1996                        bio = NULL;
1997
1998                        if (!pe->started) {
1999                                pe->started = 1;
2000                                pe_to_start_last = pe;
2001                        }
2002                }
2003
2004                if (!pe->started) {
2005                        pe->started = 1;
2006                        pe_to_start_now = pe;
2007                }
2008
2009next_snapshot:
2010                up_write(&snap->lock);
2011
2012                if (pe_to_start_now) {
2013                        start_copy(pe_to_start_now);
2014                        pe_to_start_now = NULL;
2015                }
2016        }
2017
2018        /*
2019         * Submit the exception against which the bio is queued last,
2020         * to give the other exceptions a head start.
2021         */
2022        if (pe_to_start_last)
2023                start_copy(pe_to_start_last);
2024
2025        return r;
2026}
2027
2028/*
2029 * Called on a write from the origin driver.
2030 */
2031static int do_origin(struct dm_dev *origin, struct bio *bio)
2032{
2033        struct origin *o;
2034        int r = DM_MAPIO_REMAPPED;
2035
2036        down_read(&_origins_lock);
2037        o = __lookup_origin(origin->bdev);
2038        if (o)
2039                r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2040        up_read(&_origins_lock);
2041
2042        return r;
2043}
2044
2045/*
2046 * Trigger exceptions in all non-merging snapshots.
2047 *
2048 * The chunk size of the merging snapshot may be larger than the chunk
2049 * size of some other snapshot so we may need to reallocate multiple
2050 * chunks in other snapshots.
2051 *
2052 * We scan all the overlapping exceptions in the other snapshots.
2053 * Returns 1 if anything was reallocated and must be waited for,
2054 * otherwise returns 0.
2055 *
2056 * size must be a multiple of merging_snap's chunk_size.
2057 */
2058static int origin_write_extent(struct dm_snapshot *merging_snap,
2059                               sector_t sector, unsigned size)
2060{
2061        int must_wait = 0;
2062        sector_t n;
2063        struct origin *o;
2064
2065        /*
2066         * The origin's __minimum_chunk_size() got stored in max_io_len
2067         * by snapshot_merge_resume().
2068         */
2069        down_read(&_origins_lock);
2070        o = __lookup_origin(merging_snap->origin->bdev);
2071        for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2072                if (__origin_write(&o->snapshots, sector + n, NULL) ==
2073                    DM_MAPIO_SUBMITTED)
2074                        must_wait = 1;
2075        up_read(&_origins_lock);
2076
2077        return must_wait;
2078}
2079
2080/*
2081 * Origin: maps a linear range of a device, with hooks for snapshotting.
2082 */
2083
2084/*
2085 * Construct an origin mapping: <dev_path>
2086 * The context for an origin is merely a 'struct dm_dev *'
2087 * pointing to the real device.
2088 */
2089static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2090{
2091        int r;
2092        struct dm_dev *dev;
2093
2094        if (argc != 1) {
2095                ti->error = "origin: incorrect number of arguments";
2096                return -EINVAL;
2097        }
2098
2099        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2100        if (r) {
2101                ti->error = "Cannot get target device";
2102                return r;
2103        }
2104
2105        ti->private = dev;
2106        ti->num_flush_requests = 1;
2107
2108        return 0;
2109}
2110
2111static void origin_dtr(struct dm_target *ti)
2112{
2113        struct dm_dev *dev = ti->private;
2114        dm_put_device(ti, dev);
2115}
2116
2117static int origin_map(struct dm_target *ti, struct bio *bio)
2118{
2119        struct dm_dev *dev = ti->private;
2120        bio->bi_bdev = dev->bdev;
2121
2122        if (bio->bi_rw & REQ_FLUSH)
2123                return DM_MAPIO_REMAPPED;
2124
2125        /* Only tell snapshots if this is a write */
2126        return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2127}
2128
2129/*
2130 * Set the target "max_io_len" field to the minimum of all the snapshots'
2131 * chunk sizes.
2132 */
2133static void origin_resume(struct dm_target *ti)
2134{
2135        struct dm_dev *dev = ti->private;
2136
2137        ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
2138}
2139
2140static void origin_status(struct dm_target *ti, status_type_t type,
2141                          unsigned status_flags, char *result, unsigned maxlen)
2142{
2143        struct dm_dev *dev = ti->private;
2144
2145        switch (type) {
2146        case STATUSTYPE_INFO:
2147                result[0] = '\0';
2148                break;
2149
2150        case STATUSTYPE_TABLE:
2151                snprintf(result, maxlen, "%s", dev->name);
2152                break;
2153        }
2154}
2155
2156static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2157                        struct bio_vec *biovec, int max_size)
2158{
2159        struct dm_dev *dev = ti->private;
2160        struct request_queue *q = bdev_get_queue(dev->bdev);
2161
2162        if (!q->merge_bvec_fn)
2163                return max_size;
2164
2165        bvm->bi_bdev = dev->bdev;
2166
2167        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2168}
2169
2170static int origin_iterate_devices(struct dm_target *ti,
2171                                  iterate_devices_callout_fn fn, void *data)
2172{
2173        struct dm_dev *dev = ti->private;
2174
2175        return fn(ti, dev, 0, ti->len, data);
2176}
2177
2178static struct target_type origin_target = {
2179        .name    = "snapshot-origin",
2180        .version = {1, 8, 1},
2181        .module  = THIS_MODULE,
2182        .ctr     = origin_ctr,
2183        .dtr     = origin_dtr,
2184        .map     = origin_map,
2185        .resume  = origin_resume,
2186        .status  = origin_status,
2187        .merge   = origin_merge,
2188        .iterate_devices = origin_iterate_devices,
2189};
2190
2191static struct target_type snapshot_target = {
2192        .name    = "snapshot",
2193        .version = {1, 11, 1},
2194        .module  = THIS_MODULE,
2195        .ctr     = snapshot_ctr,
2196        .dtr     = snapshot_dtr,
2197        .map     = snapshot_map,
2198        .end_io  = snapshot_end_io,
2199        .preresume  = snapshot_preresume,
2200        .resume  = snapshot_resume,
2201        .status  = snapshot_status,
2202        .iterate_devices = snapshot_iterate_devices,
2203};
2204
2205static struct target_type merge_target = {
2206        .name    = dm_snapshot_merge_target_name,
2207        .version = {1, 2, 0},
2208        .module  = THIS_MODULE,
2209        .ctr     = snapshot_ctr,
2210        .dtr     = snapshot_dtr,
2211        .map     = snapshot_merge_map,
2212        .end_io  = snapshot_end_io,
2213        .presuspend = snapshot_merge_presuspend,
2214        .preresume  = snapshot_preresume,
2215        .resume  = snapshot_merge_resume,
2216        .status  = snapshot_status,
2217        .iterate_devices = snapshot_iterate_devices,
2218};
2219
2220static int __init dm_snapshot_init(void)
2221{
2222        int r;
2223
2224        r = dm_exception_store_init();
2225        if (r) {
2226                DMERR("Failed to initialize exception stores");
2227                return r;
2228        }
2229
2230        r = dm_register_target(&snapshot_target);
2231        if (r < 0) {
2232                DMERR("snapshot target register failed %d", r);
2233                goto bad_register_snapshot_target;
2234        }
2235
2236        r = dm_register_target(&origin_target);
2237        if (r < 0) {
2238                DMERR("Origin target register failed %d", r);
2239                goto bad_register_origin_target;
2240        }
2241
2242        r = dm_register_target(&merge_target);
2243        if (r < 0) {
2244                DMERR("Merge target register failed %d", r);
2245                goto bad_register_merge_target;
2246        }
2247
2248        r = init_origin_hash();
2249        if (r) {
2250                DMERR("init_origin_hash failed.");
2251                goto bad_origin_hash;
2252        }
2253
2254        exception_cache = KMEM_CACHE(dm_exception, 0);
2255        if (!exception_cache) {
2256                DMERR("Couldn't create exception cache.");
2257                r = -ENOMEM;
2258                goto bad_exception_cache;
2259        }
2260
2261        pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2262        if (!pending_cache) {
2263                DMERR("Couldn't create pending cache.");
2264                r = -ENOMEM;
2265                goto bad_pending_cache;
2266        }
2267
2268        return 0;
2269
2270bad_pending_cache:
2271        kmem_cache_destroy(exception_cache);
2272bad_exception_cache:
2273        exit_origin_hash();
2274bad_origin_hash:
2275        dm_unregister_target(&merge_target);
2276bad_register_merge_target:
2277        dm_unregister_target(&origin_target);
2278bad_register_origin_target:
2279        dm_unregister_target(&snapshot_target);
2280bad_register_snapshot_target:
2281        dm_exception_store_exit();
2282
2283        return r;
2284}
2285
2286static void __exit dm_snapshot_exit(void)
2287{
2288        dm_unregister_target(&snapshot_target);
2289        dm_unregister_target(&origin_target);
2290        dm_unregister_target(&merge_target);
2291
2292        exit_origin_hash();
2293        kmem_cache_destroy(pending_cache);
2294        kmem_cache_destroy(exception_cache);
2295
2296        dm_exception_store_exit();
2297}
2298
2299/* Module hooks */
2300module_init(dm_snapshot_init);
2301module_exit(dm_snapshot_exit);
2302
2303MODULE_DESCRIPTION(DM_NAME " snapshot target");
2304MODULE_AUTHOR("Joe Thornber");
2305MODULE_LICENSE("GPL");
2306MODULE_ALIAS("dm-snapshot-origin");
2307MODULE_ALIAS("dm-snapshot-merge");
2308
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.