linux/drivers/md/dm-snap-persistent.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
   3 * Copyright (C) 2006-2008 Red Hat GmbH
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm-exception-store.h"
   9
  10#include <linux/mm.h>
  11#include <linux/pagemap.h>
  12#include <linux/vmalloc.h>
  13#include <linux/export.h>
  14#include <linux/slab.h>
  15#include <linux/dm-io.h>
  16
  17#define DM_MSG_PREFIX "persistent snapshot"
  18#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32        /* 16KB */
  19
  20/*-----------------------------------------------------------------
  21 * Persistent snapshots, by persistent we mean that the snapshot
  22 * will survive a reboot.
  23 *---------------------------------------------------------------*/
  24
  25/*
  26 * We need to store a record of which parts of the origin have
  27 * been copied to the snapshot device.  The snapshot code
  28 * requires that we copy exception chunks to chunk aligned areas
  29 * of the COW store.  It makes sense therefore, to store the
  30 * metadata in chunk size blocks.
  31 *
  32 * There is no backward or forward compatibility implemented,
  33 * snapshots with different disk versions than the kernel will
  34 * not be usable.  It is expected that "lvcreate" will blank out
  35 * the start of a fresh COW device before calling the snapshot
  36 * constructor.
  37 *
  38 * The first chunk of the COW device just contains the header.
  39 * After this there is a chunk filled with exception metadata,
  40 * followed by as many exception chunks as can fit in the
  41 * metadata areas.
  42 *
  43 * All on disk structures are in little-endian format.  The end
  44 * of the exceptions info is indicated by an exception with a
  45 * new_chunk of 0, which is invalid since it would point to the
  46 * header chunk.
  47 */
  48
  49/*
  50 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
  51 */
  52#define SNAP_MAGIC 0x70416e53
  53
  54/*
  55 * The on-disk version of the metadata.
  56 */
  57#define SNAPSHOT_DISK_VERSION 1
  58
  59#define NUM_SNAPSHOT_HDR_CHUNKS 1
  60
  61struct disk_header {
  62        __le32 magic;
  63
  64        /*
  65         * Is this snapshot valid.  There is no way of recovering
  66         * an invalid snapshot.
  67         */
  68        __le32 valid;
  69
  70        /*
  71         * Simple, incrementing version. no backward
  72         * compatibility.
  73         */
  74        __le32 version;
  75
  76        /* In sectors */
  77        __le32 chunk_size;
  78} __packed;
  79
  80struct disk_exception {
  81        __le64 old_chunk;
  82        __le64 new_chunk;
  83} __packed;
  84
  85struct core_exception {
  86        uint64_t old_chunk;
  87        uint64_t new_chunk;
  88};
  89
  90struct commit_callback {
  91        void (*callback)(void *, int success);
  92        void *context;
  93};
  94
  95/*
  96 * The top level structure for a persistent exception store.
  97 */
  98struct pstore {
  99        struct dm_exception_store *store;
 100        int version;
 101        int valid;
 102        uint32_t exceptions_per_area;
 103
 104        /*
 105         * Now that we have an asynchronous kcopyd there is no
 106         * need for large chunk sizes, so it wont hurt to have a
 107         * whole chunks worth of metadata in memory at once.
 108         */
 109        void *area;
 110
 111        /*
 112         * An area of zeros used to clear the next area.
 113         */
 114        void *zero_area;
 115
 116        /*
 117         * An area used for header. The header can be written
 118         * concurrently with metadata (when invalidating the snapshot),
 119         * so it needs a separate buffer.
 120         */
 121        void *header_area;
 122
 123        /*
 124         * Used to keep track of which metadata area the data in
 125         * 'chunk' refers to.
 126         */
 127        chunk_t current_area;
 128
 129        /*
 130         * The next free chunk for an exception.
 131         *
 132         * When creating exceptions, all the chunks here and above are
 133         * free.  It holds the next chunk to be allocated.  On rare
 134         * occasions (e.g. after a system crash) holes can be left in
 135         * the exception store because chunks can be committed out of
 136         * order.
 137         *
 138         * When merging exceptions, it does not necessarily mean all the
 139         * chunks here and above are free.  It holds the value it would
 140         * have held if all chunks had been committed in order of
 141         * allocation.  Consequently the value may occasionally be
 142         * slightly too low, but since it's only used for 'status' and
 143         * it can never reach its minimum value too early this doesn't
 144         * matter.
 145         */
 146
 147        chunk_t next_free;
 148
 149        /*
 150         * The index of next free exception in the current
 151         * metadata area.
 152         */
 153        uint32_t current_committed;
 154
 155        atomic_t pending_count;
 156        uint32_t callback_count;
 157        struct commit_callback *callbacks;
 158        struct dm_io_client *io_client;
 159
 160        struct workqueue_struct *metadata_wq;
 161};
 162
 163static int alloc_area(struct pstore *ps)
 164{
 165        int r = -ENOMEM;
 166        size_t len;
 167
 168        len = ps->store->chunk_size << SECTOR_SHIFT;
 169
 170        /*
 171         * Allocate the chunk_size block of memory that will hold
 172         * a single metadata area.
 173         */
 174        ps->area = vmalloc(len);
 175        if (!ps->area)
 176                goto err_area;
 177
 178        ps->zero_area = vzalloc(len);
 179        if (!ps->zero_area)
 180                goto err_zero_area;
 181
 182        ps->header_area = vmalloc(len);
 183        if (!ps->header_area)
 184                goto err_header_area;
 185
 186        return 0;
 187
 188err_header_area:
 189        vfree(ps->zero_area);
 190
 191err_zero_area:
 192        vfree(ps->area);
 193
 194err_area:
 195        return r;
 196}
 197
 198static void free_area(struct pstore *ps)
 199{
 200        if (ps->area)
 201                vfree(ps->area);
 202        ps->area = NULL;
 203
 204        if (ps->zero_area)
 205                vfree(ps->zero_area);
 206        ps->zero_area = NULL;
 207
 208        if (ps->header_area)
 209                vfree(ps->header_area);
 210        ps->header_area = NULL;
 211}
 212
 213struct mdata_req {
 214        struct dm_io_region *where;
 215        struct dm_io_request *io_req;
 216        struct work_struct work;
 217        int result;
 218};
 219
 220static void do_metadata(struct work_struct *work)
 221{
 222        struct mdata_req *req = container_of(work, struct mdata_req, work);
 223
 224        req->result = dm_io(req->io_req, 1, req->where, NULL);
 225}
 226
 227/*
 228 * Read or write a chunk aligned and sized block of data from a device.
 229 */
 230static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
 231                    int metadata)
 232{
 233        struct dm_io_region where = {
 234                .bdev = dm_snap_cow(ps->store->snap)->bdev,
 235                .sector = ps->store->chunk_size * chunk,
 236                .count = ps->store->chunk_size,
 237        };
 238        struct dm_io_request io_req = {
 239                .bi_rw = rw,
 240                .mem.type = DM_IO_VMA,
 241                .mem.ptr.vma = area,
 242                .client = ps->io_client,
 243                .notify.fn = NULL,
 244        };
 245        struct mdata_req req;
 246
 247        if (!metadata)
 248                return dm_io(&io_req, 1, &where, NULL);
 249
 250        req.where = &where;
 251        req.io_req = &io_req;
 252
 253        /*
 254         * Issue the synchronous I/O from a different thread
 255         * to avoid generic_make_request recursion.
 256         */
 257        INIT_WORK_ONSTACK(&req.work, do_metadata);
 258        queue_work(ps->metadata_wq, &req.work);
 259        flush_workqueue(ps->metadata_wq);
 260
 261        return req.result;
 262}
 263
 264/*
 265 * Convert a metadata area index to a chunk index.
 266 */
 267static chunk_t area_location(struct pstore *ps, chunk_t area)
 268{
 269        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 270}
 271
 272static void skip_metadata(struct pstore *ps)
 273{
 274        uint32_t stride = ps->exceptions_per_area + 1;
 275        chunk_t next_free = ps->next_free;
 276        if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
 277                ps->next_free++;
 278}
 279
 280/*
 281 * Read or write a metadata area.  Remembering to skip the first
 282 * chunk which holds the header.
 283 */
 284static int area_io(struct pstore *ps, int rw)
 285{
 286        int r;
 287        chunk_t chunk;
 288
 289        chunk = area_location(ps, ps->current_area);
 290
 291        r = chunk_io(ps, ps->area, chunk, rw, 0);
 292        if (r)
 293                return r;
 294
 295        return 0;
 296}
 297
 298static void zero_memory_area(struct pstore *ps)
 299{
 300        memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 301}
 302
 303static int zero_disk_area(struct pstore *ps, chunk_t area)
 304{
 305        return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
 306}
 307
 308static int read_header(struct pstore *ps, int *new_snapshot)
 309{
 310        int r;
 311        struct disk_header *dh;
 312        unsigned chunk_size;
 313        int chunk_size_supplied = 1;
 314        char *chunk_err;
 315
 316        /*
 317         * Use default chunk size (or logical_block_size, if larger)
 318         * if none supplied
 319         */
 320        if (!ps->store->chunk_size) {
 321                ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
 322                    bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
 323                                            bdev) >> 9);
 324                ps->store->chunk_mask = ps->store->chunk_size - 1;
 325                ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
 326                chunk_size_supplied = 0;
 327        }
 328
 329        ps->io_client = dm_io_client_create();
 330        if (IS_ERR(ps->io_client))
 331                return PTR_ERR(ps->io_client);
 332
 333        r = alloc_area(ps);
 334        if (r)
 335                return r;
 336
 337        r = chunk_io(ps, ps->header_area, 0, READ, 1);
 338        if (r)
 339                goto bad;
 340
 341        dh = ps->header_area;
 342
 343        if (le32_to_cpu(dh->magic) == 0) {
 344                *new_snapshot = 1;
 345                return 0;
 346        }
 347
 348        if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
 349                DMWARN("Invalid or corrupt snapshot");
 350                r = -ENXIO;
 351                goto bad;
 352        }
 353
 354        *new_snapshot = 0;
 355        ps->valid = le32_to_cpu(dh->valid);
 356        ps->version = le32_to_cpu(dh->version);
 357        chunk_size = le32_to_cpu(dh->chunk_size);
 358
 359        if (ps->store->chunk_size == chunk_size)
 360                return 0;
 361
 362        if (chunk_size_supplied)
 363                DMWARN("chunk size %u in device metadata overrides "
 364                       "table chunk size of %u.",
 365                       chunk_size, ps->store->chunk_size);
 366
 367        /* We had a bogus chunk_size. Fix stuff up. */
 368        free_area(ps);
 369
 370        r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
 371                                              &chunk_err);
 372        if (r) {
 373                DMERR("invalid on-disk chunk size %u: %s.",
 374                      chunk_size, chunk_err);
 375                return r;
 376        }
 377
 378        r = alloc_area(ps);
 379        return r;
 380
 381bad:
 382        free_area(ps);
 383        return r;
 384}
 385
 386static int write_header(struct pstore *ps)
 387{
 388        struct disk_header *dh;
 389
 390        memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
 391
 392        dh = ps->header_area;
 393        dh->magic = cpu_to_le32(SNAP_MAGIC);
 394        dh->valid = cpu_to_le32(ps->valid);
 395        dh->version = cpu_to_le32(ps->version);
 396        dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
 397
 398        return chunk_io(ps, ps->header_area, 0, WRITE, 1);
 399}
 400
 401/*
 402 * Access functions for the disk exceptions, these do the endian conversions.
 403 */
 404static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
 405{
 406        BUG_ON(index >= ps->exceptions_per_area);
 407
 408        return ((struct disk_exception *) ps->area) + index;
 409}
 410
 411static void read_exception(struct pstore *ps,
 412                           uint32_t index, struct core_exception *result)
 413{
 414        struct disk_exception *de = get_exception(ps, index);
 415
 416        /* copy it */
 417        result->old_chunk = le64_to_cpu(de->old_chunk);
 418        result->new_chunk = le64_to_cpu(de->new_chunk);
 419}
 420
 421static void write_exception(struct pstore *ps,
 422                            uint32_t index, struct core_exception *e)
 423{
 424        struct disk_exception *de = get_exception(ps, index);
 425
 426        /* copy it */
 427        de->old_chunk = cpu_to_le64(e->old_chunk);
 428        de->new_chunk = cpu_to_le64(e->new_chunk);
 429}
 430
 431static void clear_exception(struct pstore *ps, uint32_t index)
 432{
 433        struct disk_exception *de = get_exception(ps, index);
 434
 435        /* clear it */
 436        de->old_chunk = 0;
 437        de->new_chunk = 0;
 438}
 439
 440/*
 441 * Registers the exceptions that are present in the current area.
 442 * 'full' is filled in to indicate if the area has been
 443 * filled.
 444 */
 445static int insert_exceptions(struct pstore *ps,
 446                             int (*callback)(void *callback_context,
 447                                             chunk_t old, chunk_t new),
 448                             void *callback_context,
 449                             int *full)
 450{
 451        int r;
 452        unsigned int i;
 453        struct core_exception e;
 454
 455        /* presume the area is full */
 456        *full = 1;
 457
 458        for (i = 0; i < ps->exceptions_per_area; i++) {
 459                read_exception(ps, i, &e);
 460
 461                /*
 462                 * If the new_chunk is pointing at the start of
 463                 * the COW device, where the first metadata area
 464                 * is we know that we've hit the end of the
 465                 * exceptions.  Therefore the area is not full.
 466                 */
 467                if (e.new_chunk == 0LL) {
 468                        ps->current_committed = i;
 469                        *full = 0;
 470                        break;
 471                }
 472
 473                /*
 474                 * Keep track of the start of the free chunks.
 475                 */
 476                if (ps->next_free <= e.new_chunk)
 477                        ps->next_free = e.new_chunk + 1;
 478
 479                /*
 480                 * Otherwise we add the exception to the snapshot.
 481                 */
 482                r = callback(callback_context, e.old_chunk, e.new_chunk);
 483                if (r)
 484                        return r;
 485        }
 486
 487        return 0;
 488}
 489
 490static int read_exceptions(struct pstore *ps,
 491                           int (*callback)(void *callback_context, chunk_t old,
 492                                           chunk_t new),
 493                           void *callback_context)
 494{
 495        int r, full = 1;
 496
 497        /*
 498         * Keeping reading chunks and inserting exceptions until
 499         * we find a partially full area.
 500         */
 501        for (ps->current_area = 0; full; ps->current_area++) {
 502                r = area_io(ps, READ);
 503                if (r)
 504                        return r;
 505
 506                r = insert_exceptions(ps, callback, callback_context, &full);
 507                if (r)
 508                        return r;
 509        }
 510
 511        ps->current_area--;
 512
 513        skip_metadata(ps);
 514
 515        return 0;
 516}
 517
 518static struct pstore *get_info(struct dm_exception_store *store)
 519{
 520        return (struct pstore *) store->context;
 521}
 522
 523static void persistent_usage(struct dm_exception_store *store,
 524                             sector_t *total_sectors,
 525                             sector_t *sectors_allocated,
 526                             sector_t *metadata_sectors)
 527{
 528        struct pstore *ps = get_info(store);
 529
 530        *sectors_allocated = ps->next_free * store->chunk_size;
 531        *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
 532
 533        /*
 534         * First chunk is the fixed header.
 535         * Then there are (ps->current_area + 1) metadata chunks, each one
 536         * separated from the next by ps->exceptions_per_area data chunks.
 537         */
 538        *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
 539                            store->chunk_size;
 540}
 541
 542static void persistent_dtr(struct dm_exception_store *store)
 543{
 544        struct pstore *ps = get_info(store);
 545
 546        destroy_workqueue(ps->metadata_wq);
 547
 548        /* Created in read_header */
 549        if (ps->io_client)
 550                dm_io_client_destroy(ps->io_client);
 551        free_area(ps);
 552
 553        /* Allocated in persistent_read_metadata */
 554        if (ps->callbacks)
 555                vfree(ps->callbacks);
 556
 557        kfree(ps);
 558}
 559
 560static int persistent_read_metadata(struct dm_exception_store *store,
 561                                    int (*callback)(void *callback_context,
 562                                                    chunk_t old, chunk_t new),
 563                                    void *callback_context)
 564{
 565        int r, uninitialized_var(new_snapshot);
 566        struct pstore *ps = get_info(store);
 567
 568        /*
 569         * Read the snapshot header.
 570         */
 571        r = read_header(ps, &new_snapshot);
 572        if (r)
 573                return r;
 574
 575        /*
 576         * Now we know correct chunk_size, complete the initialisation.
 577         */
 578        ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
 579                                  sizeof(struct disk_exception);
 580        ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
 581                                   sizeof(*ps->callbacks));
 582        if (!ps->callbacks)
 583                return -ENOMEM;
 584
 585        /*
 586         * Do we need to setup a new snapshot ?
 587         */
 588        if (new_snapshot) {
 589                r = write_header(ps);
 590                if (r) {
 591                        DMWARN("write_header failed");
 592                        return r;
 593                }
 594
 595                ps->current_area = 0;
 596                zero_memory_area(ps);
 597                r = zero_disk_area(ps, 0);
 598                if (r)
 599                        DMWARN("zero_disk_area(0) failed");
 600                return r;
 601        }
 602        /*
 603         * Sanity checks.
 604         */
 605        if (ps->version != SNAPSHOT_DISK_VERSION) {
 606                DMWARN("unable to handle snapshot disk version %d",
 607                       ps->version);
 608                return -EINVAL;
 609        }
 610
 611        /*
 612         * Metadata are valid, but snapshot is invalidated
 613         */
 614        if (!ps->valid)
 615                return 1;
 616
 617        /*
 618         * Read the metadata.
 619         */
 620        r = read_exceptions(ps, callback, callback_context);
 621
 622        return r;
 623}
 624
 625static int persistent_prepare_exception(struct dm_exception_store *store,
 626                                        struct dm_exception *e)
 627{
 628        struct pstore *ps = get_info(store);
 629        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 630
 631        /* Is there enough room ? */
 632        if (size < ((ps->next_free + 1) * store->chunk_size))
 633                return -ENOSPC;
 634
 635        e->new_chunk = ps->next_free;
 636
 637        /*
 638         * Move onto the next free pending, making sure to take
 639         * into account the location of the metadata chunks.
 640         */
 641        ps->next_free++;
 642        skip_metadata(ps);
 643
 644        atomic_inc(&ps->pending_count);
 645        return 0;
 646}
 647
 648static void persistent_commit_exception(struct dm_exception_store *store,
 649                                        struct dm_exception *e,
 650                                        void (*callback) (void *, int success),
 651                                        void *callback_context)
 652{
 653        unsigned int i;
 654        struct pstore *ps = get_info(store);
 655        struct core_exception ce;
 656        struct commit_callback *cb;
 657
 658        ce.old_chunk = e->old_chunk;
 659        ce.new_chunk = e->new_chunk;
 660        write_exception(ps, ps->current_committed++, &ce);
 661
 662        /*
 663         * Add the callback to the back of the array.  This code
 664         * is the only place where the callback array is
 665         * manipulated, and we know that it will never be called
 666         * multiple times concurrently.
 667         */
 668        cb = ps->callbacks + ps->callback_count++;
 669        cb->callback = callback;
 670        cb->context = callback_context;
 671
 672        /*
 673         * If there are exceptions in flight and we have not yet
 674         * filled this metadata area there's nothing more to do.
 675         */
 676        if (!atomic_dec_and_test(&ps->pending_count) &&
 677            (ps->current_committed != ps->exceptions_per_area))
 678                return;
 679
 680        /*
 681         * If we completely filled the current area, then wipe the next one.
 682         */
 683        if ((ps->current_committed == ps->exceptions_per_area) &&
 684            zero_disk_area(ps, ps->current_area + 1))
 685                ps->valid = 0;
 686
 687        /*
 688         * Commit exceptions to disk.
 689         */
 690        if (ps->valid && area_io(ps, WRITE_FLUSH_FUA))
 691                ps->valid = 0;
 692
 693        /*
 694         * Advance to the next area if this one is full.
 695         */
 696        if (ps->current_committed == ps->exceptions_per_area) {
 697                ps->current_committed = 0;
 698                ps->current_area++;
 699                zero_memory_area(ps);
 700        }
 701
 702        for (i = 0; i < ps->callback_count; i++) {
 703                cb = ps->callbacks + i;
 704                cb->callback(cb->context, ps->valid);
 705        }
 706
 707        ps->callback_count = 0;
 708}
 709
 710static int persistent_prepare_merge(struct dm_exception_store *store,
 711                                    chunk_t *last_old_chunk,
 712                                    chunk_t *last_new_chunk)
 713{
 714        struct pstore *ps = get_info(store);
 715        struct core_exception ce;
 716        int nr_consecutive;
 717        int r;
 718
 719        /*
 720         * When current area is empty, move back to preceding area.
 721         */
 722        if (!ps->current_committed) {
 723                /*
 724                 * Have we finished?
 725                 */
 726                if (!ps->current_area)
 727                        return 0;
 728
 729                ps->current_area--;
 730                r = area_io(ps, READ);
 731                if (r < 0)
 732                        return r;
 733                ps->current_committed = ps->exceptions_per_area;
 734        }
 735
 736        read_exception(ps, ps->current_committed - 1, &ce);
 737        *last_old_chunk = ce.old_chunk;
 738        *last_new_chunk = ce.new_chunk;
 739
 740        /*
 741         * Find number of consecutive chunks within the current area,
 742         * working backwards.
 743         */
 744        for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
 745             nr_consecutive++) {
 746                read_exception(ps, ps->current_committed - 1 - nr_consecutive,
 747                               &ce);
 748                if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
 749                    ce.new_chunk != *last_new_chunk - nr_consecutive)
 750                        break;
 751        }
 752
 753        return nr_consecutive;
 754}
 755
 756static int persistent_commit_merge(struct dm_exception_store *store,
 757                                   int nr_merged)
 758{
 759        int r, i;
 760        struct pstore *ps = get_info(store);
 761
 762        BUG_ON(nr_merged > ps->current_committed);
 763
 764        for (i = 0; i < nr_merged; i++)
 765                clear_exception(ps, ps->current_committed - 1 - i);
 766
 767        r = area_io(ps, WRITE_FLUSH_FUA);
 768        if (r < 0)
 769                return r;
 770
 771        ps->current_committed -= nr_merged;
 772
 773        /*
 774         * At this stage, only persistent_usage() uses ps->next_free, so
 775         * we make no attempt to keep ps->next_free strictly accurate
 776         * as exceptions may have been committed out-of-order originally.
 777         * Once a snapshot has become merging, we set it to the value it
 778         * would have held had all the exceptions been committed in order.
 779         *
 780         * ps->current_area does not get reduced by prepare_merge() until
 781         * after commit_merge() has removed the nr_merged previous exceptions.
 782         */
 783        ps->next_free = area_location(ps, ps->current_area) +
 784                        ps->current_committed + 1;
 785
 786        return 0;
 787}
 788
 789static void persistent_drop_snapshot(struct dm_exception_store *store)
 790{
 791        struct pstore *ps = get_info(store);
 792
 793        ps->valid = 0;
 794        if (write_header(ps))
 795                DMWARN("write header failed");
 796}
 797
 798static int persistent_ctr(struct dm_exception_store *store,
 799                          unsigned argc, char **argv)
 800{
 801        struct pstore *ps;
 802
 803        /* allocate the pstore */
 804        ps = kzalloc(sizeof(*ps), GFP_KERNEL);
 805        if (!ps)
 806                return -ENOMEM;
 807
 808        ps->store = store;
 809        ps->valid = 1;
 810        ps->version = SNAPSHOT_DISK_VERSION;
 811        ps->area = NULL;
 812        ps->zero_area = NULL;
 813        ps->header_area = NULL;
 814        ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
 815        ps->current_committed = 0;
 816
 817        ps->callback_count = 0;
 818        atomic_set(&ps->pending_count, 0);
 819        ps->callbacks = NULL;
 820
 821        ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
 822        if (!ps->metadata_wq) {
 823                kfree(ps);
 824                DMERR("couldn't start header metadata update thread");
 825                return -ENOMEM;
 826        }
 827
 828        store->context = ps;
 829
 830        return 0;
 831}
 832
 833static unsigned persistent_status(struct dm_exception_store *store,
 834                                  status_type_t status, char *result,
 835                                  unsigned maxlen)
 836{
 837        unsigned sz = 0;
 838
 839        switch (status) {
 840        case STATUSTYPE_INFO:
 841                break;
 842        case STATUSTYPE_TABLE:
 843                DMEMIT(" P %llu", (unsigned long long)store->chunk_size);
 844        }
 845
 846        return sz;
 847}
 848
 849static struct dm_exception_store_type _persistent_type = {
 850        .name = "persistent",
 851        .module = THIS_MODULE,
 852        .ctr = persistent_ctr,
 853        .dtr = persistent_dtr,
 854        .read_metadata = persistent_read_metadata,
 855        .prepare_exception = persistent_prepare_exception,
 856        .commit_exception = persistent_commit_exception,
 857        .prepare_merge = persistent_prepare_merge,
 858        .commit_merge = persistent_commit_merge,
 859        .drop_snapshot = persistent_drop_snapshot,
 860        .usage = persistent_usage,
 861        .status = persistent_status,
 862};
 863
 864static struct dm_exception_store_type _persistent_compat_type = {
 865        .name = "P",
 866        .module = THIS_MODULE,
 867        .ctr = persistent_ctr,
 868        .dtr = persistent_dtr,
 869        .read_metadata = persistent_read_metadata,
 870        .prepare_exception = persistent_prepare_exception,
 871        .commit_exception = persistent_commit_exception,
 872        .prepare_merge = persistent_prepare_merge,
 873        .commit_merge = persistent_commit_merge,
 874        .drop_snapshot = persistent_drop_snapshot,
 875        .usage = persistent_usage,
 876        .status = persistent_status,
 877};
 878
 879int dm_persistent_snapshot_init(void)
 880{
 881        int r;
 882
 883        r = dm_exception_store_type_register(&_persistent_type);
 884        if (r) {
 885                DMERR("Unable to register persistent exception store type");
 886                return r;
 887        }
 888
 889        r = dm_exception_store_type_register(&_persistent_compat_type);
 890        if (r) {
 891                DMERR("Unable to register old-style persistent exception "
 892                      "store type");
 893                dm_exception_store_type_unregister(&_persistent_type);
 894                return r;
 895        }
 896
 897        return r;
 898}
 899
 900void dm_persistent_snapshot_exit(void)
 901{
 902        dm_exception_store_type_unregister(&_persistent_type);
 903        dm_exception_store_type_unregister(&_persistent_compat_type);
 904}
 905
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.