linux/drivers/md/dm-snap.c
<<
>>
Prefs
   1/*
   2 * dm-snapshot.c
   3 *
   4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include <linux/blkdev.h>
  10#include <linux/device-mapper.h>
  11#include <linux/delay.h>
  12#include <linux/fs.h>
  13#include <linux/init.h>
  14#include <linux/kdev_t.h>
  15#include <linux/list.h>
  16#include <linux/mempool.h>
  17#include <linux/module.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/log2.h>
  21#include <linux/dm-kcopyd.h>
  22
  23#include "dm-exception-store.h"
  24
  25#define DM_MSG_PREFIX "snapshots"
  26
  27static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
  28
  29#define dm_target_is_snapshot_merge(ti) \
  30        ((ti)->type->name == dm_snapshot_merge_target_name)
  31
  32/*
  33 * The size of the mempool used to track chunks in use.
  34 */
  35#define MIN_IOS 256
  36
  37#define DM_TRACKED_CHUNK_HASH_SIZE      16
  38#define DM_TRACKED_CHUNK_HASH(x)        ((unsigned long)(x) & \
  39                                         (DM_TRACKED_CHUNK_HASH_SIZE - 1))
  40
  41struct dm_exception_table {
  42        uint32_t hash_mask;
  43        unsigned hash_shift;
  44        struct list_head *table;
  45};
  46
  47struct dm_snapshot {
  48        struct rw_semaphore lock;
  49
  50        struct dm_dev *origin;
  51        struct dm_dev *cow;
  52
  53        struct dm_target *ti;
  54
  55        /* List of snapshots per Origin */
  56        struct list_head list;
  57
  58        /*
  59         * You can't use a snapshot if this is 0 (e.g. if full).
  60         * A snapshot-merge target never clears this.
  61         */
  62        int valid;
  63
  64        /* Origin writes don't trigger exceptions until this is set */
  65        int active;
  66
  67        atomic_t pending_exceptions_count;
  68
  69        mempool_t *pending_pool;
  70
  71        struct dm_exception_table pending;
  72        struct dm_exception_table complete;
  73
  74        /*
  75         * pe_lock protects all pending_exception operations and access
  76         * as well as the snapshot_bios list.
  77         */
  78        spinlock_t pe_lock;
  79
  80        /* Chunks with outstanding reads */
  81        spinlock_t tracked_chunk_lock;
  82        struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
  83
  84        /* The on disk metadata handler */
  85        struct dm_exception_store *store;
  86
  87        struct dm_kcopyd_client *kcopyd_client;
  88
  89        /* Wait for events based on state_bits */
  90        unsigned long state_bits;
  91
  92        /* Range of chunks currently being merged. */
  93        chunk_t first_merging_chunk;
  94        int num_merging_chunks;
  95
  96        /*
  97         * The merge operation failed if this flag is set.
  98         * Failure modes are handled as follows:
  99         * - I/O error reading the header
 100         *      => don't load the target; abort.
 101         * - Header does not have "valid" flag set
 102         *      => use the origin; forget about the snapshot.
 103         * - I/O error when reading exceptions
 104         *      => don't load the target; abort.
 105         *         (We can't use the intermediate origin state.)
 106         * - I/O error while merging
 107         *      => stop merging; set merge_failed; process I/O normally.
 108         */
 109        int merge_failed;
 110
 111        /*
 112         * Incoming bios that overlap with chunks being merged must wait
 113         * for them to be committed.
 114         */
 115        struct bio_list bios_queued_during_merge;
 116};
 117
 118/*
 119 * state_bits:
 120 *   RUNNING_MERGE  - Merge operation is in progress.
 121 *   SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
 122 *                    cleared afterwards.
 123 */
 124#define RUNNING_MERGE          0
 125#define SHUTDOWN_MERGE         1
 126
 127DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
 128                "A percentage of time allocated for copy on write");
 129
 130struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
 131{
 132        return s->origin;
 133}
 134EXPORT_SYMBOL(dm_snap_origin);
 135
 136struct dm_dev *dm_snap_cow(struct dm_snapshot *s)
 137{
 138        return s->cow;
 139}
 140EXPORT_SYMBOL(dm_snap_cow);
 141
 142static sector_t chunk_to_sector(struct dm_exception_store *store,
 143                                chunk_t chunk)
 144{
 145        return chunk << store->chunk_shift;
 146}
 147
 148static int bdev_equal(struct block_device *lhs, struct block_device *rhs)
 149{
 150        /*
 151         * There is only ever one instance of a particular block
 152         * device so we can compare pointers safely.
 153         */
 154        return lhs == rhs;
 155}
 156
 157struct dm_snap_pending_exception {
 158        struct dm_exception e;
 159
 160        /*
 161         * Origin buffers waiting for this to complete are held
 162         * in a bio list
 163         */
 164        struct bio_list origin_bios;
 165        struct bio_list snapshot_bios;
 166
 167        /* Pointer back to snapshot context */
 168        struct dm_snapshot *snap;
 169
 170        /*
 171         * 1 indicates the exception has already been sent to
 172         * kcopyd.
 173         */
 174        int started;
 175
 176        /*
 177         * For writing a complete chunk, bypassing the copy.
 178         */
 179        struct bio *full_bio;
 180        bio_end_io_t *full_bio_end_io;
 181        void *full_bio_private;
 182};
 183
 184/*
 185 * Hash table mapping origin volumes to lists of snapshots and
 186 * a lock to protect it
 187 */
 188static struct kmem_cache *exception_cache;
 189static struct kmem_cache *pending_cache;
 190
 191struct dm_snap_tracked_chunk {
 192        struct hlist_node node;
 193        chunk_t chunk;
 194};
 195
 196static void init_tracked_chunk(struct bio *bio)
 197{
 198        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 199        INIT_HLIST_NODE(&c->node);
 200}
 201
 202static bool is_bio_tracked(struct bio *bio)
 203{
 204        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 205        return !hlist_unhashed(&c->node);
 206}
 207
 208static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
 209{
 210        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 211
 212        c->chunk = chunk;
 213
 214        spin_lock_irq(&s->tracked_chunk_lock);
 215        hlist_add_head(&c->node,
 216                       &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
 217        spin_unlock_irq(&s->tracked_chunk_lock);
 218}
 219
 220static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
 221{
 222        struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
 223        unsigned long flags;
 224
 225        spin_lock_irqsave(&s->tracked_chunk_lock, flags);
 226        hlist_del(&c->node);
 227        spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
 228}
 229
 230static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
 231{
 232        struct dm_snap_tracked_chunk *c;
 233        int found = 0;
 234
 235        spin_lock_irq(&s->tracked_chunk_lock);
 236
 237        hlist_for_each_entry(c,
 238            &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
 239                if (c->chunk == chunk) {
 240                        found = 1;
 241                        break;
 242                }
 243        }
 244
 245        spin_unlock_irq(&s->tracked_chunk_lock);
 246
 247        return found;
 248}
 249
 250/*
 251 * This conflicting I/O is extremely improbable in the caller,
 252 * so msleep(1) is sufficient and there is no need for a wait queue.
 253 */
 254static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
 255{
 256        while (__chunk_is_tracked(s, chunk))
 257                msleep(1);
 258}
 259
 260/*
 261 * One of these per registered origin, held in the snapshot_origins hash
 262 */
 263struct origin {
 264        /* The origin device */
 265        struct block_device *bdev;
 266
 267        struct list_head hash_list;
 268
 269        /* List of snapshots for this origin */
 270        struct list_head snapshots;
 271};
 272
 273/*
 274 * Size of the hash table for origin volumes. If we make this
 275 * the size of the minors list then it should be nearly perfect
 276 */
 277#define ORIGIN_HASH_SIZE 256
 278#define ORIGIN_MASK      0xFF
 279static struct list_head *_origins;
 280static struct rw_semaphore _origins_lock;
 281
 282static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done);
 283static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock);
 284static uint64_t _pending_exceptions_done_count;
 285
 286static int init_origin_hash(void)
 287{
 288        int i;
 289
 290        _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
 291                           GFP_KERNEL);
 292        if (!_origins) {
 293                DMERR("unable to allocate memory");
 294                return -ENOMEM;
 295        }
 296
 297        for (i = 0; i < ORIGIN_HASH_SIZE; i++)
 298                INIT_LIST_HEAD(_origins + i);
 299        init_rwsem(&_origins_lock);
 300
 301        return 0;
 302}
 303
 304static void exit_origin_hash(void)
 305{
 306        kfree(_origins);
 307}
 308
 309static unsigned origin_hash(struct block_device *bdev)
 310{
 311        return bdev->bd_dev & ORIGIN_MASK;
 312}
 313
 314static struct origin *__lookup_origin(struct block_device *origin)
 315{
 316        struct list_head *ol;
 317        struct origin *o;
 318
 319        ol = &_origins[origin_hash(origin)];
 320        list_for_each_entry (o, ol, hash_list)
 321                if (bdev_equal(o->bdev, origin))
 322                        return o;
 323
 324        return NULL;
 325}
 326
 327static void __insert_origin(struct origin *o)
 328{
 329        struct list_head *sl = &_origins[origin_hash(o->bdev)];
 330        list_add_tail(&o->hash_list, sl);
 331}
 332
 333/*
 334 * _origins_lock must be held when calling this function.
 335 * Returns number of snapshots registered using the supplied cow device, plus:
 336 * snap_src - a snapshot suitable for use as a source of exception handover
 337 * snap_dest - a snapshot capable of receiving exception handover.
 338 * snap_merge - an existing snapshot-merge target linked to the same origin.
 339 *   There can be at most one snapshot-merge target. The parameter is optional.
 340 *
 341 * Possible return values and states of snap_src and snap_dest.
 342 *   0: NULL, NULL  - first new snapshot
 343 *   1: snap_src, NULL - normal snapshot
 344 *   2: snap_src, snap_dest  - waiting for handover
 345 *   2: snap_src, NULL - handed over, waiting for old to be deleted
 346 *   1: NULL, snap_dest - source got destroyed without handover
 347 */
 348static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
 349                                        struct dm_snapshot **snap_src,
 350                                        struct dm_snapshot **snap_dest,
 351                                        struct dm_snapshot **snap_merge)
 352{
 353        struct dm_snapshot *s;
 354        struct origin *o;
 355        int count = 0;
 356        int active;
 357
 358        o = __lookup_origin(snap->origin->bdev);
 359        if (!o)
 360                goto out;
 361
 362        list_for_each_entry(s, &o->snapshots, list) {
 363                if (dm_target_is_snapshot_merge(s->ti) && snap_merge)
 364                        *snap_merge = s;
 365                if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
 366                        continue;
 367
 368                down_read(&s->lock);
 369                active = s->active;
 370                up_read(&s->lock);
 371
 372                if (active) {
 373                        if (snap_src)
 374                                *snap_src = s;
 375                } else if (snap_dest)
 376                        *snap_dest = s;
 377
 378                count++;
 379        }
 380
 381out:
 382        return count;
 383}
 384
 385/*
 386 * On success, returns 1 if this snapshot is a handover destination,
 387 * otherwise returns 0.
 388 */
 389static int __validate_exception_handover(struct dm_snapshot *snap)
 390{
 391        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
 392        struct dm_snapshot *snap_merge = NULL;
 393
 394        /* Does snapshot need exceptions handed over to it? */
 395        if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
 396                                          &snap_merge) == 2) ||
 397            snap_dest) {
 398                snap->ti->error = "Snapshot cow pairing for exception "
 399                                  "table handover failed";
 400                return -EINVAL;
 401        }
 402
 403        /*
 404         * If no snap_src was found, snap cannot become a handover
 405         * destination.
 406         */
 407        if (!snap_src)
 408                return 0;
 409
 410        /*
 411         * Non-snapshot-merge handover?
 412         */
 413        if (!dm_target_is_snapshot_merge(snap->ti))
 414                return 1;
 415
 416        /*
 417         * Do not allow more than one merging snapshot.
 418         */
 419        if (snap_merge) {
 420                snap->ti->error = "A snapshot is already merging.";
 421                return -EINVAL;
 422        }
 423
 424        if (!snap_src->store->type->prepare_merge ||
 425            !snap_src->store->type->commit_merge) {
 426                snap->ti->error = "Snapshot exception store does not "
 427                                  "support snapshot-merge.";
 428                return -EINVAL;
 429        }
 430
 431        return 1;
 432}
 433
 434static void __insert_snapshot(struct origin *o, struct dm_snapshot *s)
 435{
 436        struct dm_snapshot *l;
 437
 438        /* Sort the list according to chunk size, largest-first smallest-last */
 439        list_for_each_entry(l, &o->snapshots, list)
 440                if (l->store->chunk_size < s->store->chunk_size)
 441                        break;
 442        list_add_tail(&s->list, &l->list);
 443}
 444
 445/*
 446 * Make a note of the snapshot and its origin so we can look it
 447 * up when the origin has a write on it.
 448 *
 449 * Also validate snapshot exception store handovers.
 450 * On success, returns 1 if this registration is a handover destination,
 451 * otherwise returns 0.
 452 */
 453static int register_snapshot(struct dm_snapshot *snap)
 454{
 455        struct origin *o, *new_o = NULL;
 456        struct block_device *bdev = snap->origin->bdev;
 457        int r = 0;
 458
 459        new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
 460        if (!new_o)
 461                return -ENOMEM;
 462
 463        down_write(&_origins_lock);
 464
 465        r = __validate_exception_handover(snap);
 466        if (r < 0) {
 467                kfree(new_o);
 468                goto out;
 469        }
 470
 471        o = __lookup_origin(bdev);
 472        if (o)
 473                kfree(new_o);
 474        else {
 475                /* New origin */
 476                o = new_o;
 477
 478                /* Initialise the struct */
 479                INIT_LIST_HEAD(&o->snapshots);
 480                o->bdev = bdev;
 481
 482                __insert_origin(o);
 483        }
 484
 485        __insert_snapshot(o, snap);
 486
 487out:
 488        up_write(&_origins_lock);
 489
 490        return r;
 491}
 492
 493/*
 494 * Move snapshot to correct place in list according to chunk size.
 495 */
 496static void reregister_snapshot(struct dm_snapshot *s)
 497{
 498        struct block_device *bdev = s->origin->bdev;
 499
 500        down_write(&_origins_lock);
 501
 502        list_del(&s->list);
 503        __insert_snapshot(__lookup_origin(bdev), s);
 504
 505        up_write(&_origins_lock);
 506}
 507
 508static void unregister_snapshot(struct dm_snapshot *s)
 509{
 510        struct origin *o;
 511
 512        down_write(&_origins_lock);
 513        o = __lookup_origin(s->origin->bdev);
 514
 515        list_del(&s->list);
 516        if (o && list_empty(&o->snapshots)) {
 517                list_del(&o->hash_list);
 518                kfree(o);
 519        }
 520
 521        up_write(&_origins_lock);
 522}
 523
 524/*
 525 * Implementation of the exception hash tables.
 526 * The lowest hash_shift bits of the chunk number are ignored, allowing
 527 * some consecutive chunks to be grouped together.
 528 */
 529static int dm_exception_table_init(struct dm_exception_table *et,
 530                                   uint32_t size, unsigned hash_shift)
 531{
 532        unsigned int i;
 533
 534        et->hash_shift = hash_shift;
 535        et->hash_mask = size - 1;
 536        et->table = dm_vcalloc(size, sizeof(struct list_head));
 537        if (!et->table)
 538                return -ENOMEM;
 539
 540        for (i = 0; i < size; i++)
 541                INIT_LIST_HEAD(et->table + i);
 542
 543        return 0;
 544}
 545
 546static void dm_exception_table_exit(struct dm_exception_table *et,
 547                                    struct kmem_cache *mem)
 548{
 549        struct list_head *slot;
 550        struct dm_exception *ex, *next;
 551        int i, size;
 552
 553        size = et->hash_mask + 1;
 554        for (i = 0; i < size; i++) {
 555                slot = et->table + i;
 556
 557                list_for_each_entry_safe (ex, next, slot, hash_list)
 558                        kmem_cache_free(mem, ex);
 559        }
 560
 561        vfree(et->table);
 562}
 563
 564static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
 565{
 566        return (chunk >> et->hash_shift) & et->hash_mask;
 567}
 568
 569static void dm_remove_exception(struct dm_exception *e)
 570{
 571        list_del(&e->hash_list);
 572}
 573
 574/*
 575 * Return the exception data for a sector, or NULL if not
 576 * remapped.
 577 */
 578static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
 579                                                chunk_t chunk)
 580{
 581        struct list_head *slot;
 582        struct dm_exception *e;
 583
 584        slot = &et->table[exception_hash(et, chunk)];
 585        list_for_each_entry (e, slot, hash_list)
 586                if (chunk >= e->old_chunk &&
 587                    chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
 588                        return e;
 589
 590        return NULL;
 591}
 592
 593static struct dm_exception *alloc_completed_exception(void)
 594{
 595        struct dm_exception *e;
 596
 597        e = kmem_cache_alloc(exception_cache, GFP_NOIO);
 598        if (!e)
 599                e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
 600
 601        return e;
 602}
 603
 604static void free_completed_exception(struct dm_exception *e)
 605{
 606        kmem_cache_free(exception_cache, e);
 607}
 608
 609static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
 610{
 611        struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
 612                                                             GFP_NOIO);
 613
 614        atomic_inc(&s->pending_exceptions_count);
 615        pe->snap = s;
 616
 617        return pe;
 618}
 619
 620static void free_pending_exception(struct dm_snap_pending_exception *pe)
 621{
 622        struct dm_snapshot *s = pe->snap;
 623
 624        mempool_free(pe, s->pending_pool);
 625        smp_mb__before_atomic_dec();
 626        atomic_dec(&s->pending_exceptions_count);
 627}
 628
 629static void dm_insert_exception(struct dm_exception_table *eh,
 630                                struct dm_exception *new_e)
 631{
 632        struct list_head *l;
 633        struct dm_exception *e = NULL;
 634
 635        l = &eh->table[exception_hash(eh, new_e->old_chunk)];
 636
 637        /* Add immediately if this table doesn't support consecutive chunks */
 638        if (!eh->hash_shift)
 639                goto out;
 640
 641        /* List is ordered by old_chunk */
 642        list_for_each_entry_reverse(e, l, hash_list) {
 643                /* Insert after an existing chunk? */
 644                if (new_e->old_chunk == (e->old_chunk +
 645                                         dm_consecutive_chunk_count(e) + 1) &&
 646                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
 647                                         dm_consecutive_chunk_count(e) + 1)) {
 648                        dm_consecutive_chunk_count_inc(e);
 649                        free_completed_exception(new_e);
 650                        return;
 651                }
 652
 653                /* Insert before an existing chunk? */
 654                if (new_e->old_chunk == (e->old_chunk - 1) &&
 655                    new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
 656                        dm_consecutive_chunk_count_inc(e);
 657                        e->old_chunk--;
 658                        e->new_chunk--;
 659                        free_completed_exception(new_e);
 660                        return;
 661                }
 662
 663                if (new_e->old_chunk > e->old_chunk)
 664                        break;
 665        }
 666
 667out:
 668        list_add(&new_e->hash_list, e ? &e->hash_list : l);
 669}
 670
 671/*
 672 * Callback used by the exception stores to load exceptions when
 673 * initialising.
 674 */
 675static int dm_add_exception(void *context, chunk_t old, chunk_t new)
 676{
 677        struct dm_snapshot *s = context;
 678        struct dm_exception *e;
 679
 680        e = alloc_completed_exception();
 681        if (!e)
 682                return -ENOMEM;
 683
 684        e->old_chunk = old;
 685
 686        /* Consecutive_count is implicitly initialised to zero */
 687        e->new_chunk = new;
 688
 689        dm_insert_exception(&s->complete, e);
 690
 691        return 0;
 692}
 693
 694/*
 695 * Return a minimum chunk size of all snapshots that have the specified origin.
 696 * Return zero if the origin has no snapshots.
 697 */
 698static uint32_t __minimum_chunk_size(struct origin *o)
 699{
 700        struct dm_snapshot *snap;
 701        unsigned chunk_size = 0;
 702
 703        if (o)
 704                list_for_each_entry(snap, &o->snapshots, list)
 705                        chunk_size = min_not_zero(chunk_size,
 706                                                  snap->store->chunk_size);
 707
 708        return (uint32_t) chunk_size;
 709}
 710
 711/*
 712 * Hard coded magic.
 713 */
 714static int calc_max_buckets(void)
 715{
 716        /* use a fixed size of 2MB */
 717        unsigned long mem = 2 * 1024 * 1024;
 718        mem /= sizeof(struct list_head);
 719
 720        return mem;
 721}
 722
 723/*
 724 * Allocate room for a suitable hash table.
 725 */
 726static int init_hash_tables(struct dm_snapshot *s)
 727{
 728        sector_t hash_size, cow_dev_size, max_buckets;
 729
 730        /*
 731         * Calculate based on the size of the original volume or
 732         * the COW volume...
 733         */
 734        cow_dev_size = get_dev_size(s->cow->bdev);
 735        max_buckets = calc_max_buckets();
 736
 737        hash_size = cow_dev_size >> s->store->chunk_shift;
 738        hash_size = min(hash_size, max_buckets);
 739
 740        if (hash_size < 64)
 741                hash_size = 64;
 742        hash_size = rounddown_pow_of_two(hash_size);
 743        if (dm_exception_table_init(&s->complete, hash_size,
 744                                    DM_CHUNK_CONSECUTIVE_BITS))
 745                return -ENOMEM;
 746
 747        /*
 748         * Allocate hash table for in-flight exceptions
 749         * Make this smaller than the real hash table
 750         */
 751        hash_size >>= 3;
 752        if (hash_size < 64)
 753                hash_size = 64;
 754
 755        if (dm_exception_table_init(&s->pending, hash_size, 0)) {
 756                dm_exception_table_exit(&s->complete, exception_cache);
 757                return -ENOMEM;
 758        }
 759
 760        return 0;
 761}
 762
 763static void merge_shutdown(struct dm_snapshot *s)
 764{
 765        clear_bit_unlock(RUNNING_MERGE, &s->state_bits);
 766        smp_mb__after_clear_bit();
 767        wake_up_bit(&s->state_bits, RUNNING_MERGE);
 768}
 769
 770static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
 771{
 772        s->first_merging_chunk = 0;
 773        s->num_merging_chunks = 0;
 774
 775        return bio_list_get(&s->bios_queued_during_merge);
 776}
 777
 778/*
 779 * Remove one chunk from the index of completed exceptions.
 780 */
 781static int __remove_single_exception_chunk(struct dm_snapshot *s,
 782                                           chunk_t old_chunk)
 783{
 784        struct dm_exception *e;
 785
 786        e = dm_lookup_exception(&s->complete, old_chunk);
 787        if (!e) {
 788                DMERR("Corruption detected: exception for block %llu is "
 789                      "on disk but not in memory",
 790                      (unsigned long long)old_chunk);
 791                return -EINVAL;
 792        }
 793
 794        /*
 795         * If this is the only chunk using this exception, remove exception.
 796         */
 797        if (!dm_consecutive_chunk_count(e)) {
 798                dm_remove_exception(e);
 799                free_completed_exception(e);
 800                return 0;
 801        }
 802
 803        /*
 804         * The chunk may be either at the beginning or the end of a
 805         * group of consecutive chunks - never in the middle.  We are
 806         * removing chunks in the opposite order to that in which they
 807         * were added, so this should always be true.
 808         * Decrement the consecutive chunk counter and adjust the
 809         * starting point if necessary.
 810         */
 811        if (old_chunk == e->old_chunk) {
 812                e->old_chunk++;
 813                e->new_chunk++;
 814        } else if (old_chunk != e->old_chunk +
 815                   dm_consecutive_chunk_count(e)) {
 816                DMERR("Attempt to merge block %llu from the "
 817                      "middle of a chunk range [%llu - %llu]",
 818                      (unsigned long long)old_chunk,
 819                      (unsigned long long)e->old_chunk,
 820                      (unsigned long long)
 821                      e->old_chunk + dm_consecutive_chunk_count(e));
 822                return -EINVAL;
 823        }
 824
 825        dm_consecutive_chunk_count_dec(e);
 826
 827        return 0;
 828}
 829
 830static void flush_bios(struct bio *bio);
 831
 832static int remove_single_exception_chunk(struct dm_snapshot *s)
 833{
 834        struct bio *b = NULL;
 835        int r;
 836        chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
 837
 838        down_write(&s->lock);
 839
 840        /*
 841         * Process chunks (and associated exceptions) in reverse order
 842         * so that dm_consecutive_chunk_count_dec() accounting works.
 843         */
 844        do {
 845                r = __remove_single_exception_chunk(s, old_chunk);
 846                if (r)
 847                        goto out;
 848        } while (old_chunk-- > s->first_merging_chunk);
 849
 850        b = __release_queued_bios_after_merge(s);
 851
 852out:
 853        up_write(&s->lock);
 854        if (b)
 855                flush_bios(b);
 856
 857        return r;
 858}
 859
 860static int origin_write_extent(struct dm_snapshot *merging_snap,
 861                               sector_t sector, unsigned chunk_size);
 862
 863static void merge_callback(int read_err, unsigned long write_err,
 864                           void *context);
 865
 866static uint64_t read_pending_exceptions_done_count(void)
 867{
 868        uint64_t pending_exceptions_done;
 869
 870        spin_lock(&_pending_exceptions_done_spinlock);
 871        pending_exceptions_done = _pending_exceptions_done_count;
 872        spin_unlock(&_pending_exceptions_done_spinlock);
 873
 874        return pending_exceptions_done;
 875}
 876
 877static void increment_pending_exceptions_done_count(void)
 878{
 879        spin_lock(&_pending_exceptions_done_spinlock);
 880        _pending_exceptions_done_count++;
 881        spin_unlock(&_pending_exceptions_done_spinlock);
 882
 883        wake_up_all(&_pending_exceptions_done);
 884}
 885
 886static void snapshot_merge_next_chunks(struct dm_snapshot *s)
 887{
 888        int i, linear_chunks;
 889        chunk_t old_chunk, new_chunk;
 890        struct dm_io_region src, dest;
 891        sector_t io_size;
 892        uint64_t previous_count;
 893
 894        BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits));
 895        if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits)))
 896                goto shut;
 897
 898        /*
 899         * valid flag never changes during merge, so no lock required.
 900         */
 901        if (!s->valid) {
 902                DMERR("Snapshot is invalid: can't merge");
 903                goto shut;
 904        }
 905
 906        linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk,
 907                                                      &new_chunk);
 908        if (linear_chunks <= 0) {
 909                if (linear_chunks < 0) {
 910                        DMERR("Read error in exception store: "
 911                              "shutting down merge");
 912                        down_write(&s->lock);
 913                        s->merge_failed = 1;
 914                        up_write(&s->lock);
 915                }
 916                goto shut;
 917        }
 918
 919        /* Adjust old_chunk and new_chunk to reflect start of linear region */
 920        old_chunk = old_chunk + 1 - linear_chunks;
 921        new_chunk = new_chunk + 1 - linear_chunks;
 922
 923        /*
 924         * Use one (potentially large) I/O to copy all 'linear_chunks'
 925         * from the exception store to the origin
 926         */
 927        io_size = linear_chunks * s->store->chunk_size;
 928
 929        dest.bdev = s->origin->bdev;
 930        dest.sector = chunk_to_sector(s->store, old_chunk);
 931        dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector);
 932
 933        src.bdev = s->cow->bdev;
 934        src.sector = chunk_to_sector(s->store, new_chunk);
 935        src.count = dest.count;
 936
 937        /*
 938         * Reallocate any exceptions needed in other snapshots then
 939         * wait for the pending exceptions to complete.
 940         * Each time any pending exception (globally on the system)
 941         * completes we are woken and repeat the process to find out
 942         * if we can proceed.  While this may not seem a particularly
 943         * efficient algorithm, it is not expected to have any
 944         * significant impact on performance.
 945         */
 946        previous_count = read_pending_exceptions_done_count();
 947        while (origin_write_extent(s, dest.sector, io_size)) {
 948                wait_event(_pending_exceptions_done,
 949                           (read_pending_exceptions_done_count() !=
 950                            previous_count));
 951                /* Retry after the wait, until all exceptions are done. */
 952                previous_count = read_pending_exceptions_done_count();
 953        }
 954
 955        down_write(&s->lock);
 956        s->first_merging_chunk = old_chunk;
 957        s->num_merging_chunks = linear_chunks;
 958        up_write(&s->lock);
 959
 960        /* Wait until writes to all 'linear_chunks' drain */
 961        for (i = 0; i < linear_chunks; i++)
 962                __check_for_conflicting_io(s, old_chunk + i);
 963
 964        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
 965        return;
 966
 967shut:
 968        merge_shutdown(s);
 969}
 970
 971static void error_bios(struct bio *bio);
 972
 973static void merge_callback(int read_err, unsigned long write_err, void *context)
 974{
 975        struct dm_snapshot *s = context;
 976        struct bio *b = NULL;
 977
 978        if (read_err || write_err) {
 979                if (read_err)
 980                        DMERR("Read error: shutting down merge.");
 981                else
 982                        DMERR("Write error: shutting down merge.");
 983                goto shut;
 984        }
 985
 986        if (s->store->type->commit_merge(s->store,
 987                                         s->num_merging_chunks) < 0) {
 988                DMERR("Write error in exception store: shutting down merge");
 989                goto shut;
 990        }
 991
 992        if (remove_single_exception_chunk(s) < 0)
 993                goto shut;
 994
 995        snapshot_merge_next_chunks(s);
 996
 997        return;
 998
 999shut:
1000        down_write(&s->lock);
1001        s->merge_failed = 1;
1002        b = __release_queued_bios_after_merge(s);
1003        up_write(&s->lock);
1004        error_bios(b);
1005
1006        merge_shutdown(s);
1007}
1008
1009static void start_merge(struct dm_snapshot *s)
1010{
1011        if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits))
1012                snapshot_merge_next_chunks(s);
1013}
1014
1015static int wait_schedule(void *ptr)
1016{
1017        schedule();
1018
1019        return 0;
1020}
1021
1022/*
1023 * Stop the merging process and wait until it finishes.
1024 */
1025static void stop_merge(struct dm_snapshot *s)
1026{
1027        set_bit(SHUTDOWN_MERGE, &s->state_bits);
1028        wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
1029                    TASK_UNINTERRUPTIBLE);
1030        clear_bit(SHUTDOWN_MERGE, &s->state_bits);
1031}
1032
1033/*
1034 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1035 */
1036static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1037{
1038        struct dm_snapshot *s;
1039        int i;
1040        int r = -EINVAL;
1041        char *origin_path, *cow_path;
1042        unsigned args_used, num_flush_bios = 1;
1043        fmode_t origin_mode = FMODE_READ;
1044
1045        if (argc != 4) {
1046                ti->error = "requires exactly 4 arguments";
1047                r = -EINVAL;
1048                goto bad;
1049        }
1050
1051        if (dm_target_is_snapshot_merge(ti)) {
1052                num_flush_bios = 2;
1053                origin_mode = FMODE_WRITE;
1054        }
1055
1056        s = kmalloc(sizeof(*s), GFP_KERNEL);
1057        if (!s) {
1058                ti->error = "Cannot allocate private snapshot structure";
1059                r = -ENOMEM;
1060                goto bad;
1061        }
1062
1063        origin_path = argv[0];
1064        argv++;
1065        argc--;
1066
1067        r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
1068        if (r) {
1069                ti->error = "Cannot get origin device";
1070                goto bad_origin;
1071        }
1072
1073        cow_path = argv[0];
1074        argv++;
1075        argc--;
1076
1077        r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
1078        if (r) {
1079                ti->error = "Cannot get COW device";
1080                goto bad_cow;
1081        }
1082
1083        r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store);
1084        if (r) {
1085                ti->error = "Couldn't create exception store";
1086                r = -EINVAL;
1087                goto bad_store;
1088        }
1089
1090        argv += args_used;
1091        argc -= args_used;
1092
1093        s->ti = ti;
1094        s->valid = 1;
1095        s->active = 0;
1096        atomic_set(&s->pending_exceptions_count, 0);
1097        init_rwsem(&s->lock);
1098        INIT_LIST_HEAD(&s->list);
1099        spin_lock_init(&s->pe_lock);
1100        s->state_bits = 0;
1101        s->merge_failed = 0;
1102        s->first_merging_chunk = 0;
1103        s->num_merging_chunks = 0;
1104        bio_list_init(&s->bios_queued_during_merge);
1105
1106        /* Allocate hash table for COW data */
1107        if (init_hash_tables(s)) {
1108                ti->error = "Unable to allocate hash table space";
1109                r = -ENOMEM;
1110                goto bad_hash_tables;
1111        }
1112
1113        s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
1114        if (IS_ERR(s->kcopyd_client)) {
1115                r = PTR_ERR(s->kcopyd_client);
1116                ti->error = "Could not create kcopyd client";
1117                goto bad_kcopyd;
1118        }
1119
1120        s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
1121        if (!s->pending_pool) {
1122                ti->error = "Could not allocate mempool for pending exceptions";
1123                r = -ENOMEM;
1124                goto bad_pending_pool;
1125        }
1126
1127        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1128                INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
1129
1130        spin_lock_init(&s->tracked_chunk_lock);
1131
1132        ti->private = s;
1133        ti->num_flush_bios = num_flush_bios;
1134        ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
1135
1136        /* Add snapshot to the list of snapshots for this origin */
1137        /* Exceptions aren't triggered till snapshot_resume() is called */
1138        r = register_snapshot(s);
1139        if (r == -ENOMEM) {
1140                ti->error = "Snapshot origin struct allocation failed";
1141                goto bad_load_and_register;
1142        } else if (r < 0) {
1143                /* invalid handover, register_snapshot has set ti->error */
1144                goto bad_load_and_register;
1145        }
1146
1147        /*
1148         * Metadata must only be loaded into one table at once, so skip this
1149         * if metadata will be handed over during resume.
1150         * Chunk size will be set during the handover - set it to zero to
1151         * ensure it's ignored.
1152         */
1153        if (r > 0) {
1154                s->store->chunk_size = 0;
1155                return 0;
1156        }
1157
1158        r = s->store->type->read_metadata(s->store, dm_add_exception,
1159                                          (void *)s);
1160        if (r < 0) {
1161                ti->error = "Failed to read snapshot metadata";
1162                goto bad_read_metadata;
1163        } else if (r > 0) {
1164                s->valid = 0;
1165                DMWARN("Snapshot is marked invalid.");
1166        }
1167
1168        if (!s->store->chunk_size) {
1169                ti->error = "Chunk size not set";
1170                goto bad_read_metadata;
1171        }
1172
1173        r = dm_set_target_max_io_len(ti, s->store->chunk_size);
1174        if (r)
1175                goto bad_read_metadata;
1176
1177        return 0;
1178
1179bad_read_metadata:
1180        unregister_snapshot(s);
1181
1182bad_load_and_register:
1183        mempool_destroy(s->pending_pool);
1184
1185bad_pending_pool:
1186        dm_kcopyd_client_destroy(s->kcopyd_client);
1187
1188bad_kcopyd:
1189        dm_exception_table_exit(&s->pending, pending_cache);
1190        dm_exception_table_exit(&s->complete, exception_cache);
1191
1192bad_hash_tables:
1193        dm_exception_store_destroy(s->store);
1194
1195bad_store:
1196        dm_put_device(ti, s->cow);
1197
1198bad_cow:
1199        dm_put_device(ti, s->origin);
1200
1201bad_origin:
1202        kfree(s);
1203
1204bad:
1205        return r;
1206}
1207
1208static void __free_exceptions(struct dm_snapshot *s)
1209{
1210        dm_kcopyd_client_destroy(s->kcopyd_client);
1211        s->kcopyd_client = NULL;
1212
1213        dm_exception_table_exit(&s->pending, pending_cache);
1214        dm_exception_table_exit(&s->complete, exception_cache);
1215}
1216
1217static void __handover_exceptions(struct dm_snapshot *snap_src,
1218                                  struct dm_snapshot *snap_dest)
1219{
1220        union {
1221                struct dm_exception_table table_swap;
1222                struct dm_exception_store *store_swap;
1223        } u;
1224
1225        /*
1226         * Swap all snapshot context information between the two instances.
1227         */
1228        u.table_swap = snap_dest->complete;
1229        snap_dest->complete = snap_src->complete;
1230        snap_src->complete = u.table_swap;
1231
1232        u.store_swap = snap_dest->store;
1233        snap_dest->store = snap_src->store;
1234        snap_src->store = u.store_swap;
1235
1236        snap_dest->store->snap = snap_dest;
1237        snap_src->store->snap = snap_src;
1238
1239        snap_dest->ti->max_io_len = snap_dest->store->chunk_size;
1240        snap_dest->valid = snap_src->valid;
1241
1242        /*
1243         * Set source invalid to ensure it receives no further I/O.
1244         */
1245        snap_src->valid = 0;
1246}
1247
1248static void snapshot_dtr(struct dm_target *ti)
1249{
1250#ifdef CONFIG_DM_DEBUG
1251        int i;
1252#endif
1253        struct dm_snapshot *s = ti->private;
1254        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1255
1256        down_read(&_origins_lock);
1257        /* Check whether exception handover must be cancelled */
1258        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1259        if (snap_src && snap_dest && (s == snap_src)) {
1260                down_write(&snap_dest->lock);
1261                snap_dest->valid = 0;
1262                up_write(&snap_dest->lock);
1263                DMERR("Cancelling snapshot handover.");
1264        }
1265        up_read(&_origins_lock);
1266
1267        if (dm_target_is_snapshot_merge(ti))
1268                stop_merge(s);
1269
1270        /* Prevent further origin writes from using this snapshot. */
1271        /* After this returns there can be no new kcopyd jobs. */
1272        unregister_snapshot(s);
1273
1274        while (atomic_read(&s->pending_exceptions_count))
1275                msleep(1);
1276        /*
1277         * Ensure instructions in mempool_destroy aren't reordered
1278         * before atomic_read.
1279         */
1280        smp_mb();
1281
1282#ifdef CONFIG_DM_DEBUG
1283        for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
1284                BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
1285#endif
1286
1287        __free_exceptions(s);
1288
1289        mempool_destroy(s->pending_pool);
1290
1291        dm_exception_store_destroy(s->store);
1292
1293        dm_put_device(ti, s->cow);
1294
1295        dm_put_device(ti, s->origin);
1296
1297        kfree(s);
1298}
1299
1300/*
1301 * Flush a list of buffers.
1302 */
1303static void flush_bios(struct bio *bio)
1304{
1305        struct bio *n;
1306
1307        while (bio) {
1308                n = bio->bi_next;
1309                bio->bi_next = NULL;
1310                generic_make_request(bio);
1311                bio = n;
1312        }
1313}
1314
1315static int do_origin(struct dm_dev *origin, struct bio *bio);
1316
1317/*
1318 * Flush a list of buffers.
1319 */
1320static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1321{
1322        struct bio *n;
1323        int r;
1324
1325        while (bio) {
1326                n = bio->bi_next;
1327                bio->bi_next = NULL;
1328                r = do_origin(s->origin, bio);
1329                if (r == DM_MAPIO_REMAPPED)
1330                        generic_make_request(bio);
1331                bio = n;
1332        }
1333}
1334
1335/*
1336 * Error a list of buffers.
1337 */
1338static void error_bios(struct bio *bio)
1339{
1340        struct bio *n;
1341
1342        while (bio) {
1343                n = bio->bi_next;
1344                bio->bi_next = NULL;
1345                bio_io_error(bio);
1346                bio = n;
1347        }
1348}
1349
1350static void __invalidate_snapshot(struct dm_snapshot *s, int err)
1351{
1352        if (!s->valid)
1353                return;
1354
1355        if (err == -EIO)
1356                DMERR("Invalidating snapshot: Error reading/writing.");
1357        else if (err == -ENOMEM)
1358                DMERR("Invalidating snapshot: Unable to allocate exception.");
1359
1360        if (s->store->type->drop_snapshot)
1361                s->store->type->drop_snapshot(s->store);
1362
1363        s->valid = 0;
1364
1365        dm_table_event(s->ti->table);
1366}
1367
1368static void pending_complete(struct dm_snap_pending_exception *pe, int success)
1369{
1370        struct dm_exception *e;
1371        struct dm_snapshot *s = pe->snap;
1372        struct bio *origin_bios = NULL;
1373        struct bio *snapshot_bios = NULL;
1374        struct bio *full_bio = NULL;
1375        int error = 0;
1376
1377        if (!success) {
1378                /* Read/write error - snapshot is unusable */
1379                down_write(&s->lock);
1380                __invalidate_snapshot(s, -EIO);
1381                error = 1;
1382                goto out;
1383        }
1384
1385        e = alloc_completed_exception();
1386        if (!e) {
1387                down_write(&s->lock);
1388                __invalidate_snapshot(s, -ENOMEM);
1389                error = 1;
1390                goto out;
1391        }
1392        *e = pe->e;
1393
1394        down_write(&s->lock);
1395        if (!s->valid) {
1396                free_completed_exception(e);
1397                error = 1;
1398                goto out;
1399        }
1400
1401        /* Check for conflicting reads */
1402        __check_for_conflicting_io(s, pe->e.old_chunk);
1403
1404        /*
1405         * Add a proper exception, and remove the
1406         * in-flight exception from the list.
1407         */
1408        dm_insert_exception(&s->complete, e);
1409
1410out:
1411        dm_remove_exception(&pe->e);
1412        snapshot_bios = bio_list_get(&pe->snapshot_bios);
1413        origin_bios = bio_list_get(&pe->origin_bios);
1414        full_bio = pe->full_bio;
1415        if (full_bio) {
1416                full_bio->bi_end_io = pe->full_bio_end_io;
1417                full_bio->bi_private = pe->full_bio_private;
1418        }
1419        free_pending_exception(pe);
1420
1421        increment_pending_exceptions_done_count();
1422
1423        up_write(&s->lock);
1424
1425        /* Submit any pending write bios */
1426        if (error) {
1427                if (full_bio)
1428                        bio_io_error(full_bio);
1429                error_bios(snapshot_bios);
1430        } else {
1431                if (full_bio)
1432                        bio_endio(full_bio, 0);
1433                flush_bios(snapshot_bios);
1434        }
1435
1436        retry_origin_bios(s, origin_bios);
1437}
1438
1439static void commit_callback(void *context, int success)
1440{
1441        struct dm_snap_pending_exception *pe = context;
1442
1443        pending_complete(pe, success);
1444}
1445
1446/*
1447 * Called when the copy I/O has finished.  kcopyd actually runs
1448 * this code so don't block.
1449 */
1450static void copy_callback(int read_err, unsigned long write_err, void *context)
1451{
1452        struct dm_snap_pending_exception *pe = context;
1453        struct dm_snapshot *s = pe->snap;
1454
1455        if (read_err || write_err)
1456                pending_complete(pe, 0);
1457
1458        else
1459                /* Update the metadata if we are persistent */
1460                s->store->type->commit_exception(s->store, &pe->e,
1461                                                 commit_callback, pe);
1462}
1463
1464/*
1465 * Dispatches the copy operation to kcopyd.
1466 */
1467static void start_copy(struct dm_snap_pending_exception *pe)
1468{
1469        struct dm_snapshot *s = pe->snap;
1470        struct dm_io_region src, dest;
1471        struct block_device *bdev = s->origin->bdev;
1472        sector_t dev_size;
1473
1474        dev_size = get_dev_size(bdev);
1475
1476        src.bdev = bdev;
1477        src.sector = chunk_to_sector(s->store, pe->e.old_chunk);
1478        src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector);
1479
1480        dest.bdev = s->cow->bdev;
1481        dest.sector = chunk_to_sector(s->store, pe->e.new_chunk);
1482        dest.count = src.count;
1483
1484        /* Hand over to kcopyd */
1485        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
1486}
1487
1488static void full_bio_end_io(struct bio *bio, int error)
1489{
1490        void *callback_data = bio->bi_private;
1491
1492        dm_kcopyd_do_callback(callback_data, 0, error ? 1 : 0);
1493}
1494
1495static void start_full_bio(struct dm_snap_pending_exception *pe,
1496                           struct bio *bio)
1497{
1498        struct dm_snapshot *s = pe->snap;
1499        void *callback_data;
1500
1501        pe->full_bio = bio;
1502        pe->full_bio_end_io = bio->bi_end_io;
1503        pe->full_bio_private = bio->bi_private;
1504
1505        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
1506                                                   copy_callback, pe);
1507
1508        bio->bi_end_io = full_bio_end_io;
1509        bio->bi_private = callback_data;
1510
1511        generic_make_request(bio);
1512}
1513
1514static struct dm_snap_pending_exception *
1515__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
1516{
1517        struct dm_exception *e = dm_lookup_exception(&s->pending, chunk);
1518
1519        if (!e)
1520                return NULL;
1521
1522        return container_of(e, struct dm_snap_pending_exception, e);
1523}
1524
1525/*
1526 * Looks to see if this snapshot already has a pending exception
1527 * for this chunk, otherwise it allocates a new one and inserts
1528 * it into the pending table.
1529 *
1530 * NOTE: a write lock must be held on snap->lock before calling
1531 * this.
1532 */
1533static struct dm_snap_pending_exception *
1534__find_pending_exception(struct dm_snapshot *s,
1535                         struct dm_snap_pending_exception *pe, chunk_t chunk)
1536{
1537        struct dm_snap_pending_exception *pe2;
1538
1539        pe2 = __lookup_pending_exception(s, chunk);
1540        if (pe2) {
1541                free_pending_exception(pe);
1542                return pe2;
1543        }
1544
1545        pe->e.old_chunk = chunk;
1546        bio_list_init(&pe->origin_bios);
1547        bio_list_init(&pe->snapshot_bios);
1548        pe->started = 0;
1549        pe->full_bio = NULL;
1550
1551        if (s->store->type->prepare_exception(s->store, &pe->e)) {
1552                free_pending_exception(pe);
1553                return NULL;
1554        }
1555
1556        dm_insert_exception(&s->pending, &pe->e);
1557
1558        return pe;
1559}
1560
1561static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1562                            struct bio *bio, chunk_t chunk)
1563{
1564        bio->bi_bdev = s->cow->bdev;
1565        bio->bi_sector = chunk_to_sector(s->store,
1566                                         dm_chunk_number(e->new_chunk) +
1567                                         (chunk - e->old_chunk)) +
1568                                         (bio->bi_sector &
1569                                          s->store->chunk_mask);
1570}
1571
1572static int snapshot_map(struct dm_target *ti, struct bio *bio)
1573{
1574        struct dm_exception *e;
1575        struct dm_snapshot *s = ti->private;
1576        int r = DM_MAPIO_REMAPPED;
1577        chunk_t chunk;
1578        struct dm_snap_pending_exception *pe = NULL;
1579
1580        init_tracked_chunk(bio);
1581
1582        if (bio->bi_rw & REQ_FLUSH) {
1583                bio->bi_bdev = s->cow->bdev;
1584                return DM_MAPIO_REMAPPED;
1585        }
1586
1587        chunk = sector_to_chunk(s->store, bio->bi_sector);
1588
1589        /* Full snapshots are not usable */
1590        /* To get here the table must be live so s->active is always set. */
1591        if (!s->valid)
1592                return -EIO;
1593
1594        /* FIXME: should only take write lock if we need
1595         * to copy an exception */
1596        down_write(&s->lock);
1597
1598        if (!s->valid) {
1599                r = -EIO;
1600                goto out_unlock;
1601        }
1602
1603        /* If the block is already remapped - use that, else remap it */
1604        e = dm_lookup_exception(&s->complete, chunk);
1605        if (e) {
1606                remap_exception(s, e, bio, chunk);
1607                goto out_unlock;
1608        }
1609
1610        /*
1611         * Write to snapshot - higher level takes care of RW/RO
1612         * flags so we should only get this if we are
1613         * writeable.
1614         */
1615        if (bio_rw(bio) == WRITE) {
1616                pe = __lookup_pending_exception(s, chunk);
1617                if (!pe) {
1618                        up_write(&s->lock);
1619                        pe = alloc_pending_exception(s);
1620                        down_write(&s->lock);
1621
1622                        if (!s->valid) {
1623                                free_pending_exception(pe);
1624                                r = -EIO;
1625                                goto out_unlock;
1626                        }
1627
1628                        e = dm_lookup_exception(&s->complete, chunk);
1629                        if (e) {
1630                                free_pending_exception(pe);
1631                                remap_exception(s, e, bio, chunk);
1632                                goto out_unlock;
1633                        }
1634
1635                        pe = __find_pending_exception(s, pe, chunk);
1636                        if (!pe) {
1637                                __invalidate_snapshot(s, -ENOMEM);
1638                                r = -EIO;
1639                                goto out_unlock;
1640                        }
1641                }
1642
1643                remap_exception(s, &pe->e, bio, chunk);
1644
1645                r = DM_MAPIO_SUBMITTED;
1646
1647                if (!pe->started &&
1648                    bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) {
1649                        pe->started = 1;
1650                        up_write(&s->lock);
1651                        start_full_bio(pe, bio);
1652                        goto out;
1653                }
1654
1655                bio_list_add(&pe->snapshot_bios, bio);
1656
1657                if (!pe->started) {
1658                        /* this is protected by snap->lock */
1659                        pe->started = 1;
1660                        up_write(&s->lock);
1661                        start_copy(pe);
1662                        goto out;
1663                }
1664        } else {
1665                bio->bi_bdev = s->origin->bdev;
1666                track_chunk(s, bio, chunk);
1667        }
1668
1669out_unlock:
1670        up_write(&s->lock);
1671out:
1672        return r;
1673}
1674
1675/*
1676 * A snapshot-merge target behaves like a combination of a snapshot
1677 * target and a snapshot-origin target.  It only generates new
1678 * exceptions in other snapshots and not in the one that is being
1679 * merged.
1680 *
1681 * For each chunk, if there is an existing exception, it is used to
1682 * redirect I/O to the cow device.  Otherwise I/O is sent to the origin,
1683 * which in turn might generate exceptions in other snapshots.
1684 * If merging is currently taking place on the chunk in question, the
1685 * I/O is deferred by adding it to s->bios_queued_during_merge.
1686 */
1687static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1688{
1689        struct dm_exception *e;
1690        struct dm_snapshot *s = ti->private;
1691        int r = DM_MAPIO_REMAPPED;
1692        chunk_t chunk;
1693
1694        init_tracked_chunk(bio);
1695
1696        if (bio->bi_rw & REQ_FLUSH) {
1697                if (!dm_bio_get_target_bio_nr(bio))
1698                        bio->bi_bdev = s->origin->bdev;
1699                else
1700                        bio->bi_bdev = s->cow->bdev;
1701                return DM_MAPIO_REMAPPED;
1702        }
1703
1704        chunk = sector_to_chunk(s->store, bio->bi_sector);
1705
1706        down_write(&s->lock);
1707
1708        /* Full merging snapshots are redirected to the origin */
1709        if (!s->valid)
1710                goto redirect_to_origin;
1711
1712        /* If the block is already remapped - use that */
1713        e = dm_lookup_exception(&s->complete, chunk);
1714        if (e) {
1715                /* Queue writes overlapping with chunks being merged */
1716                if (bio_rw(bio) == WRITE &&
1717                    chunk >= s->first_merging_chunk &&
1718                    chunk < (s->first_merging_chunk +
1719                             s->num_merging_chunks)) {
1720                        bio->bi_bdev = s->origin->bdev;
1721                        bio_list_add(&s->bios_queued_during_merge, bio);
1722                        r = DM_MAPIO_SUBMITTED;
1723                        goto out_unlock;
1724                }
1725
1726                remap_exception(s, e, bio, chunk);
1727
1728                if (bio_rw(bio) == WRITE)
1729                        track_chunk(s, bio, chunk);
1730                goto out_unlock;
1731        }
1732
1733redirect_to_origin:
1734        bio->bi_bdev = s->origin->bdev;
1735
1736        if (bio_rw(bio) == WRITE) {
1737                up_write(&s->lock);
1738                return do_origin(s->origin, bio);
1739        }
1740
1741out_unlock:
1742        up_write(&s->lock);
1743
1744        return r;
1745}
1746
1747static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error)
1748{
1749        struct dm_snapshot *s = ti->private;
1750
1751        if (is_bio_tracked(bio))
1752                stop_tracking_chunk(s, bio);
1753
1754        return 0;
1755}
1756
1757static void snapshot_merge_presuspend(struct dm_target *ti)
1758{
1759        struct dm_snapshot *s = ti->private;
1760
1761        stop_merge(s);
1762}
1763
1764static int snapshot_preresume(struct dm_target *ti)
1765{
1766        int r = 0;
1767        struct dm_snapshot *s = ti->private;
1768        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1769
1770        down_read(&_origins_lock);
1771        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1772        if (snap_src && snap_dest) {
1773                down_read(&snap_src->lock);
1774                if (s == snap_src) {
1775                        DMERR("Unable to resume snapshot source until "
1776                              "handover completes.");
1777                        r = -EINVAL;
1778                } else if (!dm_suspended(snap_src->ti)) {
1779                        DMERR("Unable to perform snapshot handover until "
1780                              "source is suspended.");
1781                        r = -EINVAL;
1782                }
1783                up_read(&snap_src->lock);
1784        }
1785        up_read(&_origins_lock);
1786
1787        return r;
1788}
1789
1790static void snapshot_resume(struct dm_target *ti)
1791{
1792        struct dm_snapshot *s = ti->private;
1793        struct dm_snapshot *snap_src = NULL, *snap_dest = NULL;
1794
1795        down_read(&_origins_lock);
1796        (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
1797        if (snap_src && snap_dest) {
1798                down_write(&snap_src->lock);
1799                down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
1800                __handover_exceptions(snap_src, snap_dest);
1801                up_write(&snap_dest->lock);
1802                up_write(&snap_src->lock);
1803        }
1804        up_read(&_origins_lock);
1805
1806        /* Now we have correct chunk size, reregister */
1807        reregister_snapshot(s);
1808
1809        down_write(&s->lock);
1810        s->active = 1;
1811        up_write(&s->lock);
1812}
1813
1814static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
1815{
1816        uint32_t min_chunksize;
1817
1818        down_read(&_origins_lock);
1819        min_chunksize = __minimum_chunk_size(__lookup_origin(bdev));
1820        up_read(&_origins_lock);
1821
1822        return min_chunksize;
1823}
1824
1825static void snapshot_merge_resume(struct dm_target *ti)
1826{
1827        struct dm_snapshot *s = ti->private;
1828
1829        /*
1830         * Handover exceptions from existing snapshot.
1831         */
1832        snapshot_resume(ti);
1833
1834        /*
1835         * snapshot-merge acts as an origin, so set ti->max_io_len
1836         */
1837        ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev);
1838
1839        start_merge(s);
1840}
1841
1842static void snapshot_status(struct dm_target *ti, status_type_t type,
1843                            unsigned status_flags, char *result, unsigned maxlen)
1844{
1845        unsigned sz = 0;
1846        struct dm_snapshot *snap = ti->private;
1847
1848        switch (type) {
1849        case STATUSTYPE_INFO:
1850
1851                down_write(&snap->lock);
1852
1853                if (!snap->valid)
1854                        DMEMIT("Invalid");
1855                else if (snap->merge_failed)
1856                        DMEMIT("Merge failed");
1857                else {
1858                        if (snap->store->type->usage) {
1859                                sector_t total_sectors, sectors_allocated,
1860                                         metadata_sectors;
1861                                snap->store->type->usage(snap->store,
1862                                                         &total_sectors,
1863                                                         &sectors_allocated,
1864                                                         &metadata_sectors);
1865                                DMEMIT("%llu/%llu %llu",
1866                                       (unsigned long long)sectors_allocated,
1867                                       (unsigned long long)total_sectors,
1868                                       (unsigned long long)metadata_sectors);
1869                        }
1870                        else
1871                                DMEMIT("Unknown");
1872                }
1873
1874                up_write(&snap->lock);
1875
1876                break;
1877
1878        case STATUSTYPE_TABLE:
1879                /*
1880                 * kdevname returns a static pointer so we need
1881                 * to make private copies if the output is to
1882                 * make sense.
1883                 */
1884                DMEMIT("%s %s", snap->origin->name, snap->cow->name);
1885                snap->store->type->status(snap->store, type, result + sz,
1886                                          maxlen - sz);
1887                break;
1888        }
1889}
1890
1891static int snapshot_iterate_devices(struct dm_target *ti,
1892                                    iterate_devices_callout_fn fn, void *data)
1893{
1894        struct dm_snapshot *snap = ti->private;
1895        int r;
1896
1897        r = fn(ti, snap->origin, 0, ti->len, data);
1898
1899        if (!r)
1900                r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data);
1901
1902        return r;
1903}
1904
1905
1906/*-----------------------------------------------------------------
1907 * Origin methods
1908 *---------------------------------------------------------------*/
1909
1910/*
1911 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1912 * supplied bio was ignored.  The caller may submit it immediately.
1913 * (No remapping actually occurs as the origin is always a direct linear
1914 * map.)
1915 *
1916 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1917 * and any supplied bio is added to a list to be submitted once all
1918 * the necessary exceptions exist.
1919 */
1920static int __origin_write(struct list_head *snapshots, sector_t sector,
1921                          struct bio *bio)
1922{
1923        int r = DM_MAPIO_REMAPPED;
1924        struct dm_snapshot *snap;
1925        struct dm_exception *e;
1926        struct dm_snap_pending_exception *pe;
1927        struct dm_snap_pending_exception *pe_to_start_now = NULL;
1928        struct dm_snap_pending_exception *pe_to_start_last = NULL;
1929        chunk_t chunk;
1930
1931        /* Do all the snapshots on this origin */
1932        list_for_each_entry (snap, snapshots, list) {
1933                /*
1934                 * Don't make new exceptions in a merging snapshot
1935                 * because it has effectively been deleted
1936                 */
1937                if (dm_target_is_snapshot_merge(snap->ti))
1938                        continue;
1939
1940                down_write(&snap->lock);
1941
1942                /* Only deal with valid and active snapshots */
1943                if (!snap->valid || !snap->active)
1944                        goto next_snapshot;
1945
1946                /* Nothing to do if writing beyond end of snapshot */
1947                if (sector >= dm_table_get_size(snap->ti->table))
1948                        goto next_snapshot;
1949
1950                /*
1951                 * Remember, different snapshots can have
1952                 * different chunk sizes.
1953                 */
1954                chunk = sector_to_chunk(snap->store, sector);
1955
1956                /*
1957                 * Check exception table to see if block
1958                 * is already remapped in this snapshot
1959                 * and trigger an exception if not.
1960                 */
1961                e = dm_lookup_exception(&snap->complete, chunk);
1962                if (e)
1963                        goto next_snapshot;
1964
1965                pe = __lookup_pending_exception(snap, chunk);
1966                if (!pe) {
1967                        up_write(&snap->lock);
1968                        pe = alloc_pending_exception(snap);
1969                        down_write(&snap->lock);
1970
1971                        if (!snap->valid) {
1972                                free_pending_exception(pe);
1973                                goto next_snapshot;
1974                        }
1975
1976                        e = dm_lookup_exception(&snap->complete, chunk);
1977                        if (e) {
1978                                free_pending_exception(pe);
1979                                goto next_snapshot;
1980                        }
1981
1982                        pe = __find_pending_exception(snap, pe, chunk);
1983                        if (!pe) {
1984                                __invalidate_snapshot(snap, -ENOMEM);
1985                                goto next_snapshot;
1986                        }
1987                }
1988
1989                r = DM_MAPIO_SUBMITTED;
1990
1991                /*
1992                 * If an origin bio was supplied, queue it to wait for the
1993                 * completion of this exception, and start this one last,
1994                 * at the end of the function.
1995                 */
1996                if (bio) {
1997                        bio_list_add(&pe->origin_bios, bio);
1998                        bio = NULL;
1999
2000                        if (!pe->started) {
2001                                pe->started = 1;
2002                                pe_to_start_last = pe;
2003                        }
2004                }
2005
2006                if (!pe->started) {
2007                        pe->started = 1;
2008                        pe_to_start_now = pe;
2009                }
2010
2011next_snapshot:
2012                up_write(&snap->lock);
2013
2014                if (pe_to_start_now) {
2015                        start_copy(pe_to_start_now);
2016                        pe_to_start_now = NULL;
2017                }
2018        }
2019
2020        /*
2021         * Submit the exception against which the bio is queued last,
2022         * to give the other exceptions a head start.
2023         */
2024        if (pe_to_start_last)
2025                start_copy(pe_to_start_last);
2026
2027        return r;
2028}
2029
2030/*
2031 * Called on a write from the origin driver.
2032 */
2033static int do_origin(struct dm_dev *origin, struct bio *bio)
2034{
2035        struct origin *o;
2036        int r = DM_MAPIO_REMAPPED;
2037
2038        down_read(&_origins_lock);
2039        o = __lookup_origin(origin->bdev);
2040        if (o)
2041                r = __origin_write(&o->snapshots, bio->bi_sector, bio);
2042        up_read(&_origins_lock);
2043
2044        return r;
2045}
2046
2047/*
2048 * Trigger exceptions in all non-merging snapshots.
2049 *
2050 * The chunk size of the merging snapshot may be larger than the chunk
2051 * size of some other snapshot so we may need to reallocate multiple
2052 * chunks in other snapshots.
2053 *
2054 * We scan all the overlapping exceptions in the other snapshots.
2055 * Returns 1 if anything was reallocated and must be waited for,
2056 * otherwise returns 0.
2057 *
2058 * size must be a multiple of merging_snap's chunk_size.
2059 */
2060static int origin_write_extent(struct dm_snapshot *merging_snap,
2061                               sector_t sector, unsigned size)
2062{
2063        int must_wait = 0;
2064        sector_t n;
2065        struct origin *o;
2066
2067        /*
2068         * The origin's __minimum_chunk_size() got stored in max_io_len
2069         * by snapshot_merge_resume().
2070         */
2071        down_read(&_origins_lock);
2072        o = __lookup_origin(merging_snap->origin->bdev);
2073        for (n = 0; n < size; n += merging_snap->ti->max_io_len)
2074                if (__origin_write(&o->snapshots, sector + n, NULL) ==
2075                    DM_MAPIO_SUBMITTED)
2076                        must_wait = 1;
2077        up_read(&_origins_lock);
2078
2079        return must_wait;
2080}
2081
2082/*
2083 * Origin: maps a linear range of a device, with hooks for snapshotting.
2084 */
2085
2086/*
2087 * Construct an origin mapping: <dev_path>
2088 * The context for an origin is merely a 'struct dm_dev *'
2089 * pointing to the real device.
2090 */
2091static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2092{
2093        int r;
2094        struct dm_dev *dev;
2095
2096        if (argc != 1) {
2097                ti->error = "origin: incorrect number of arguments";
2098                return -EINVAL;
2099        }
2100
2101        r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
2102        if (r) {
2103                ti->error = "Cannot get target device";
2104                return r;
2105        }
2106
2107        ti->private = dev;
2108        ti->num_flush_bios = 1;
2109
2110        return 0;
2111}
2112
2113static void origin_dtr(struct dm_target *ti)
2114{
2115        struct dm_dev *dev = ti->private;
2116        dm_put_device(ti, dev);
2117}
2118
2119static int origin_map(struct dm_target *ti, struct bio *bio)
2120{
2121        struct dm_dev *dev = ti->private;
2122        bio->bi_bdev = dev->bdev;
2123
2124        if (bio->bi_rw & REQ_FLUSH)
2125                return DM_MAPIO_REMAPPED;
2126
2127        /* Only tell snapshots if this is a write */
2128        return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
2129}
2130
2131/*
2132 * Set the target "max_io_len" field to the minimum of all the snapshots'
2133 * chunk sizes.
2134 */
2135static void origin_resume(struct dm_target *ti)
2136{
2137        struct dm_dev *dev = ti->private;
2138
2139        ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
2140}
2141
2142static void origin_status(struct dm_target *ti, status_type_t type,
2143                          unsigned status_flags, char *result, unsigned maxlen)
2144{
2145        struct dm_dev *dev = ti->private;
2146
2147        switch (type) {
2148        case STATUSTYPE_INFO:
2149                result[0] = '\0';
2150                break;
2151
2152        case STATUSTYPE_TABLE:
2153                snprintf(result, maxlen, "%s", dev->name);
2154                break;
2155        }
2156}
2157
2158static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2159                        struct bio_vec *biovec, int max_size)
2160{
2161        struct dm_dev *dev = ti->private;
2162        struct request_queue *q = bdev_get_queue(dev->bdev);
2163
2164        if (!q->merge_bvec_fn)
2165                return max_size;
2166
2167        bvm->bi_bdev = dev->bdev;
2168
2169        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2170}
2171
2172static int origin_iterate_devices(struct dm_target *ti,
2173                                  iterate_devices_callout_fn fn, void *data)
2174{
2175        struct dm_dev *dev = ti->private;
2176
2177        return fn(ti, dev, 0, ti->len, data);
2178}
2179
2180static struct target_type origin_target = {
2181        .name    = "snapshot-origin",
2182        .version = {1, 8, 1},
2183        .module  = THIS_MODULE,
2184        .ctr     = origin_ctr,
2185        .dtr     = origin_dtr,
2186        .map     = origin_map,
2187        .resume  = origin_resume,
2188        .status  = origin_status,
2189        .merge   = origin_merge,
2190        .iterate_devices = origin_iterate_devices,
2191};
2192
2193static struct target_type snapshot_target = {
2194        .name    = "snapshot",
2195        .version = {1, 11, 1},
2196        .module  = THIS_MODULE,
2197        .ctr     = snapshot_ctr,
2198        .dtr     = snapshot_dtr,
2199        .map     = snapshot_map,
2200        .end_io  = snapshot_end_io,
2201        .preresume  = snapshot_preresume,
2202        .resume  = snapshot_resume,
2203        .status  = snapshot_status,
2204        .iterate_devices = snapshot_iterate_devices,
2205};
2206
2207static struct target_type merge_target = {
2208        .name    = dm_snapshot_merge_target_name,
2209        .version = {1, 2, 0},
2210        .module  = THIS_MODULE,
2211        .ctr     = snapshot_ctr,
2212        .dtr     = snapshot_dtr,
2213        .map     = snapshot_merge_map,
2214        .end_io  = snapshot_end_io,
2215        .presuspend = snapshot_merge_presuspend,
2216        .preresume  = snapshot_preresume,
2217        .resume  = snapshot_merge_resume,
2218        .status  = snapshot_status,
2219        .iterate_devices = snapshot_iterate_devices,
2220};
2221
2222static int __init dm_snapshot_init(void)
2223{
2224        int r;
2225
2226        r = dm_exception_store_init();
2227        if (r) {
2228                DMERR("Failed to initialize exception stores");
2229                return r;
2230        }
2231
2232        r = dm_register_target(&snapshot_target);
2233        if (r < 0) {
2234                DMERR("snapshot target register failed %d", r);
2235                goto bad_register_snapshot_target;
2236        }
2237
2238        r = dm_register_target(&origin_target);
2239        if (r < 0) {
2240                DMERR("Origin target register failed %d", r);
2241                goto bad_register_origin_target;
2242        }
2243
2244        r = dm_register_target(&merge_target);
2245        if (r < 0) {
2246                DMERR("Merge target register failed %d", r);
2247                goto bad_register_merge_target;
2248        }
2249
2250        r = init_origin_hash();
2251        if (r) {
2252                DMERR("init_origin_hash failed.");
2253                goto bad_origin_hash;
2254        }
2255
2256        exception_cache = KMEM_CACHE(dm_exception, 0);
2257        if (!exception_cache) {
2258                DMERR("Couldn't create exception cache.");
2259                r = -ENOMEM;
2260                goto bad_exception_cache;
2261        }
2262
2263        pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
2264        if (!pending_cache) {
2265                DMERR("Couldn't create pending cache.");
2266                r = -ENOMEM;
2267                goto bad_pending_cache;
2268        }
2269
2270        return 0;
2271
2272bad_pending_cache:
2273        kmem_cache_destroy(exception_cache);
2274bad_exception_cache:
2275        exit_origin_hash();
2276bad_origin_hash:
2277        dm_unregister_target(&merge_target);
2278bad_register_merge_target:
2279        dm_unregister_target(&origin_target);
2280bad_register_origin_target:
2281        dm_unregister_target(&snapshot_target);
2282bad_register_snapshot_target:
2283        dm_exception_store_exit();
2284
2285        return r;
2286}
2287
2288static void __exit dm_snapshot_exit(void)
2289{
2290        dm_unregister_target(&snapshot_target);
2291        dm_unregister_target(&origin_target);
2292        dm_unregister_target(&merge_target);
2293
2294        exit_origin_hash();
2295        kmem_cache_destroy(pending_cache);
2296        kmem_cache_destroy(exception_cache);
2297
2298        dm_exception_store_exit();
2299}
2300
2301/* Module hooks */
2302module_init(dm_snapshot_init);
2303module_exit(dm_snapshot_exit);
2304
2305MODULE_DESCRIPTION(DM_NAME " snapshot target");
2306MODULE_AUTHOR("Joe Thornber");
2307MODULE_LICENSE("GPL");
2308MODULE_ALIAS("dm-snapshot-origin");
2309MODULE_ALIAS("dm-snapshot-merge");
2310
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.