linux/drivers/md/dm-table.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21
  22#define DM_MSG_PREFIX "table"
  23
  24#define MAX_DEPTH 16
  25#define NODE_SIZE L1_CACHE_BYTES
  26#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  27#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  28
  29struct dm_table {
  30        struct mapped_device *md;
  31        unsigned type;
  32
  33        /* btree table */
  34        unsigned int depth;
  35        unsigned int counts[MAX_DEPTH]; /* in nodes */
  36        sector_t *index[MAX_DEPTH];
  37
  38        unsigned int num_targets;
  39        unsigned int num_allocated;
  40        sector_t *highs;
  41        struct dm_target *targets;
  42
  43        struct target_type *immutable_target_type;
  44        unsigned integrity_supported:1;
  45        unsigned singleton:1;
  46
  47        /*
  48         * Indicates the rw permissions for the new logical
  49         * device.  This should be a combination of FMODE_READ
  50         * and FMODE_WRITE.
  51         */
  52        fmode_t mode;
  53
  54        /* a list of devices used by this table */
  55        struct list_head devices;
  56
  57        /* events get handed up using this callback */
  58        void (*event_fn)(void *);
  59        void *event_context;
  60
  61        struct dm_md_mempools *mempools;
  62
  63        struct list_head target_callbacks;
  64};
  65
  66/*
  67 * Similar to ceiling(log_size(n))
  68 */
  69static unsigned int int_log(unsigned int n, unsigned int base)
  70{
  71        int result = 0;
  72
  73        while (n > 1) {
  74                n = dm_div_up(n, base);
  75                result++;
  76        }
  77
  78        return result;
  79}
  80
  81/*
  82 * Calculate the index of the child node of the n'th node k'th key.
  83 */
  84static inline unsigned int get_child(unsigned int n, unsigned int k)
  85{
  86        return (n * CHILDREN_PER_NODE) + k;
  87}
  88
  89/*
  90 * Return the n'th node of level l from table t.
  91 */
  92static inline sector_t *get_node(struct dm_table *t,
  93                                 unsigned int l, unsigned int n)
  94{
  95        return t->index[l] + (n * KEYS_PER_NODE);
  96}
  97
  98/*
  99 * Return the highest key that you could lookup from the n'th
 100 * node on level l of the btree.
 101 */
 102static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
 103{
 104        for (; l < t->depth - 1; l++)
 105                n = get_child(n, CHILDREN_PER_NODE - 1);
 106
 107        if (n >= t->counts[l])
 108                return (sector_t) - 1;
 109
 110        return get_node(t, l, n)[KEYS_PER_NODE - 1];
 111}
 112
 113/*
 114 * Fills in a level of the btree based on the highs of the level
 115 * below it.
 116 */
 117static int setup_btree_index(unsigned int l, struct dm_table *t)
 118{
 119        unsigned int n, k;
 120        sector_t *node;
 121
 122        for (n = 0U; n < t->counts[l]; n++) {
 123                node = get_node(t, l, n);
 124
 125                for (k = 0U; k < KEYS_PER_NODE; k++)
 126                        node[k] = high(t, l + 1, get_child(n, k));
 127        }
 128
 129        return 0;
 130}
 131
 132void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
 133{
 134        unsigned long size;
 135        void *addr;
 136
 137        /*
 138         * Check that we're not going to overflow.
 139         */
 140        if (nmemb > (ULONG_MAX / elem_size))
 141                return NULL;
 142
 143        size = nmemb * elem_size;
 144        addr = vzalloc(size);
 145
 146        return addr;
 147}
 148EXPORT_SYMBOL(dm_vcalloc);
 149
 150/*
 151 * highs, and targets are managed as dynamic arrays during a
 152 * table load.
 153 */
 154static int alloc_targets(struct dm_table *t, unsigned int num)
 155{
 156        sector_t *n_highs;
 157        struct dm_target *n_targets;
 158        int n = t->num_targets;
 159
 160        /*
 161         * Allocate both the target array and offset array at once.
 162         * Append an empty entry to catch sectors beyond the end of
 163         * the device.
 164         */
 165        n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
 166                                          sizeof(sector_t));
 167        if (!n_highs)
 168                return -ENOMEM;
 169
 170        n_targets = (struct dm_target *) (n_highs + num);
 171
 172        if (n) {
 173                memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
 174                memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
 175        }
 176
 177        memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
 178        vfree(t->highs);
 179
 180        t->num_allocated = num;
 181        t->highs = n_highs;
 182        t->targets = n_targets;
 183
 184        return 0;
 185}
 186
 187int dm_table_create(struct dm_table **result, fmode_t mode,
 188                    unsigned num_targets, struct mapped_device *md)
 189{
 190        struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 191
 192        if (!t)
 193                return -ENOMEM;
 194
 195        INIT_LIST_HEAD(&t->devices);
 196        INIT_LIST_HEAD(&t->target_callbacks);
 197
 198        if (!num_targets)
 199                num_targets = KEYS_PER_NODE;
 200
 201        num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 202
 203        if (alloc_targets(t, num_targets)) {
 204                kfree(t);
 205                return -ENOMEM;
 206        }
 207
 208        t->mode = mode;
 209        t->md = md;
 210        *result = t;
 211        return 0;
 212}
 213
 214static void free_devices(struct list_head *devices)
 215{
 216        struct list_head *tmp, *next;
 217
 218        list_for_each_safe(tmp, next, devices) {
 219                struct dm_dev_internal *dd =
 220                    list_entry(tmp, struct dm_dev_internal, list);
 221                DMWARN("dm_table_destroy: dm_put_device call missing for %s",
 222                       dd->dm_dev.name);
 223                kfree(dd);
 224        }
 225}
 226
 227void dm_table_destroy(struct dm_table *t)
 228{
 229        unsigned int i;
 230
 231        if (!t)
 232                return;
 233
 234        /* free the indexes */
 235        if (t->depth >= 2)
 236                vfree(t->index[t->depth - 2]);
 237
 238        /* free the targets */
 239        for (i = 0; i < t->num_targets; i++) {
 240                struct dm_target *tgt = t->targets + i;
 241
 242                if (tgt->type->dtr)
 243                        tgt->type->dtr(tgt);
 244
 245                dm_put_target_type(tgt->type);
 246        }
 247
 248        vfree(t->highs);
 249
 250        /* free the device list */
 251        free_devices(&t->devices);
 252
 253        dm_free_md_mempools(t->mempools);
 254
 255        kfree(t);
 256}
 257
 258/*
 259 * Checks to see if we need to extend highs or targets.
 260 */
 261static inline int check_space(struct dm_table *t)
 262{
 263        if (t->num_targets >= t->num_allocated)
 264                return alloc_targets(t, t->num_allocated * 2);
 265
 266        return 0;
 267}
 268
 269/*
 270 * See if we've already got a device in the list.
 271 */
 272static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 273{
 274        struct dm_dev_internal *dd;
 275
 276        list_for_each_entry (dd, l, list)
 277                if (dd->dm_dev.bdev->bd_dev == dev)
 278                        return dd;
 279
 280        return NULL;
 281}
 282
 283/*
 284 * Open a device so we can use it as a map destination.
 285 */
 286static int open_dev(struct dm_dev_internal *d, dev_t dev,
 287                    struct mapped_device *md)
 288{
 289        static char *_claim_ptr = "I belong to device-mapper";
 290        struct block_device *bdev;
 291
 292        int r;
 293
 294        BUG_ON(d->dm_dev.bdev);
 295
 296        bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
 297        if (IS_ERR(bdev))
 298                return PTR_ERR(bdev);
 299
 300        r = bd_link_disk_holder(bdev, dm_disk(md));
 301        if (r) {
 302                blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
 303                return r;
 304        }
 305
 306        d->dm_dev.bdev = bdev;
 307        return 0;
 308}
 309
 310/*
 311 * Close a device that we've been using.
 312 */
 313static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
 314{
 315        if (!d->dm_dev.bdev)
 316                return;
 317
 318        bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
 319        blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
 320        d->dm_dev.bdev = NULL;
 321}
 322
 323/*
 324 * If possible, this checks an area of a destination device is invalid.
 325 */
 326static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 327                                  sector_t start, sector_t len, void *data)
 328{
 329        struct request_queue *q;
 330        struct queue_limits *limits = data;
 331        struct block_device *bdev = dev->bdev;
 332        sector_t dev_size =
 333                i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 334        unsigned short logical_block_size_sectors =
 335                limits->logical_block_size >> SECTOR_SHIFT;
 336        char b[BDEVNAME_SIZE];
 337
 338        /*
 339         * Some devices exist without request functions,
 340         * such as loop devices not yet bound to backing files.
 341         * Forbid the use of such devices.
 342         */
 343        q = bdev_get_queue(bdev);
 344        if (!q || !q->make_request_fn) {
 345                DMWARN("%s: %s is not yet initialised: "
 346                       "start=%llu, len=%llu, dev_size=%llu",
 347                       dm_device_name(ti->table->md), bdevname(bdev, b),
 348                       (unsigned long long)start,
 349                       (unsigned long long)len,
 350                       (unsigned long long)dev_size);
 351                return 1;
 352        }
 353
 354        if (!dev_size)
 355                return 0;
 356
 357        if ((start >= dev_size) || (start + len > dev_size)) {
 358                DMWARN("%s: %s too small for target: "
 359                       "start=%llu, len=%llu, dev_size=%llu",
 360                       dm_device_name(ti->table->md), bdevname(bdev, b),
 361                       (unsigned long long)start,
 362                       (unsigned long long)len,
 363                       (unsigned long long)dev_size);
 364                return 1;
 365        }
 366
 367        if (logical_block_size_sectors <= 1)
 368                return 0;
 369
 370        if (start & (logical_block_size_sectors - 1)) {
 371                DMWARN("%s: start=%llu not aligned to h/w "
 372                       "logical block size %u of %s",
 373                       dm_device_name(ti->table->md),
 374                       (unsigned long long)start,
 375                       limits->logical_block_size, bdevname(bdev, b));
 376                return 1;
 377        }
 378
 379        if (len & (logical_block_size_sectors - 1)) {
 380                DMWARN("%s: len=%llu not aligned to h/w "
 381                       "logical block size %u of %s",
 382                       dm_device_name(ti->table->md),
 383                       (unsigned long long)len,
 384                       limits->logical_block_size, bdevname(bdev, b));
 385                return 1;
 386        }
 387
 388        return 0;
 389}
 390
 391/*
 392 * This upgrades the mode on an already open dm_dev, being
 393 * careful to leave things as they were if we fail to reopen the
 394 * device and not to touch the existing bdev field in case
 395 * it is accessed concurrently inside dm_table_any_congested().
 396 */
 397static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 398                        struct mapped_device *md)
 399{
 400        int r;
 401        struct dm_dev_internal dd_new, dd_old;
 402
 403        dd_new = dd_old = *dd;
 404
 405        dd_new.dm_dev.mode |= new_mode;
 406        dd_new.dm_dev.bdev = NULL;
 407
 408        r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
 409        if (r)
 410                return r;
 411
 412        dd->dm_dev.mode |= new_mode;
 413        close_dev(&dd_old, md);
 414
 415        return 0;
 416}
 417
 418/*
 419 * Add a device to the list, or just increment the usage count if
 420 * it's already present.
 421 */
 422int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 423                  struct dm_dev **result)
 424{
 425        int r;
 426        dev_t uninitialized_var(dev);
 427        struct dm_dev_internal *dd;
 428        unsigned int major, minor;
 429        struct dm_table *t = ti->table;
 430        char dummy;
 431
 432        BUG_ON(!t);
 433
 434        if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 435                /* Extract the major/minor numbers */
 436                dev = MKDEV(major, minor);
 437                if (MAJOR(dev) != major || MINOR(dev) != minor)
 438                        return -EOVERFLOW;
 439        } else {
 440                /* convert the path to a device */
 441                struct block_device *bdev = lookup_bdev(path);
 442
 443                if (IS_ERR(bdev))
 444                        return PTR_ERR(bdev);
 445                dev = bdev->bd_dev;
 446                bdput(bdev);
 447        }
 448
 449        dd = find_device(&t->devices, dev);
 450        if (!dd) {
 451                dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 452                if (!dd)
 453                        return -ENOMEM;
 454
 455                dd->dm_dev.mode = mode;
 456                dd->dm_dev.bdev = NULL;
 457
 458                if ((r = open_dev(dd, dev, t->md))) {
 459                        kfree(dd);
 460                        return r;
 461                }
 462
 463                format_dev_t(dd->dm_dev.name, dev);
 464
 465                atomic_set(&dd->count, 0);
 466                list_add(&dd->list, &t->devices);
 467
 468        } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
 469                r = upgrade_mode(dd, mode, t->md);
 470                if (r)
 471                        return r;
 472        }
 473        atomic_inc(&dd->count);
 474
 475        *result = &dd->dm_dev;
 476        return 0;
 477}
 478EXPORT_SYMBOL(dm_get_device);
 479
 480int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 481                         sector_t start, sector_t len, void *data)
 482{
 483        struct queue_limits *limits = data;
 484        struct block_device *bdev = dev->bdev;
 485        struct request_queue *q = bdev_get_queue(bdev);
 486        char b[BDEVNAME_SIZE];
 487
 488        if (unlikely(!q)) {
 489                DMWARN("%s: Cannot set limits for nonexistent device %s",
 490                       dm_device_name(ti->table->md), bdevname(bdev, b));
 491                return 0;
 492        }
 493
 494        if (bdev_stack_limits(limits, bdev, start) < 0)
 495                DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 496                       "physical_block_size=%u, logical_block_size=%u, "
 497                       "alignment_offset=%u, start=%llu",
 498                       dm_device_name(ti->table->md), bdevname(bdev, b),
 499                       q->limits.physical_block_size,
 500                       q->limits.logical_block_size,
 501                       q->limits.alignment_offset,
 502                       (unsigned long long) start << SECTOR_SHIFT);
 503
 504        /*
 505         * Check if merge fn is supported.
 506         * If not we'll force DM to use PAGE_SIZE or
 507         * smaller I/O, just to be safe.
 508         */
 509        if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
 510                blk_limits_max_hw_sectors(limits,
 511                                          (unsigned int) (PAGE_SIZE >> 9));
 512        return 0;
 513}
 514EXPORT_SYMBOL_GPL(dm_set_device_limits);
 515
 516/*
 517 * Decrement a device's use count and remove it if necessary.
 518 */
 519void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 520{
 521        struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
 522                                                  dm_dev);
 523
 524        if (atomic_dec_and_test(&dd->count)) {
 525                close_dev(dd, ti->table->md);
 526                list_del(&dd->list);
 527                kfree(dd);
 528        }
 529}
 530EXPORT_SYMBOL(dm_put_device);
 531
 532/*
 533 * Checks to see if the target joins onto the end of the table.
 534 */
 535static int adjoin(struct dm_table *table, struct dm_target *ti)
 536{
 537        struct dm_target *prev;
 538
 539        if (!table->num_targets)
 540                return !ti->begin;
 541
 542        prev = &table->targets[table->num_targets - 1];
 543        return (ti->begin == (prev->begin + prev->len));
 544}
 545
 546/*
 547 * Used to dynamically allocate the arg array.
 548 */
 549static char **realloc_argv(unsigned *array_size, char **old_argv)
 550{
 551        char **argv;
 552        unsigned new_size;
 553
 554        new_size = *array_size ? *array_size * 2 : 64;
 555        argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
 556        if (argv) {
 557                memcpy(argv, old_argv, *array_size * sizeof(*argv));
 558                *array_size = new_size;
 559        }
 560
 561        kfree(old_argv);
 562        return argv;
 563}
 564
 565/*
 566 * Destructively splits up the argument list to pass to ctr.
 567 */
 568int dm_split_args(int *argc, char ***argvp, char *input)
 569{
 570        char *start, *end = input, *out, **argv = NULL;
 571        unsigned array_size = 0;
 572
 573        *argc = 0;
 574
 575        if (!input) {
 576                *argvp = NULL;
 577                return 0;
 578        }
 579
 580        argv = realloc_argv(&array_size, argv);
 581        if (!argv)
 582                return -ENOMEM;
 583
 584        while (1) {
 585                /* Skip whitespace */
 586                start = skip_spaces(end);
 587
 588                if (!*start)
 589                        break;  /* success, we hit the end */
 590
 591                /* 'out' is used to remove any back-quotes */
 592                end = out = start;
 593                while (*end) {
 594                        /* Everything apart from '\0' can be quoted */
 595                        if (*end == '\\' && *(end + 1)) {
 596                                *out++ = *(end + 1);
 597                                end += 2;
 598                                continue;
 599                        }
 600
 601                        if (isspace(*end))
 602                                break;  /* end of token */
 603
 604                        *out++ = *end++;
 605                }
 606
 607                /* have we already filled the array ? */
 608                if ((*argc + 1) > array_size) {
 609                        argv = realloc_argv(&array_size, argv);
 610                        if (!argv)
 611                                return -ENOMEM;
 612                }
 613
 614                /* we know this is whitespace */
 615                if (*end)
 616                        end++;
 617
 618                /* terminate the string and put it in the array */
 619                *out = '\0';
 620                argv[*argc] = start;
 621                (*argc)++;
 622        }
 623
 624        *argvp = argv;
 625        return 0;
 626}
 627
 628/*
 629 * Impose necessary and sufficient conditions on a devices's table such
 630 * that any incoming bio which respects its logical_block_size can be
 631 * processed successfully.  If it falls across the boundary between
 632 * two or more targets, the size of each piece it gets split into must
 633 * be compatible with the logical_block_size of the target processing it.
 634 */
 635static int validate_hardware_logical_block_alignment(struct dm_table *table,
 636                                                 struct queue_limits *limits)
 637{
 638        /*
 639         * This function uses arithmetic modulo the logical_block_size
 640         * (in units of 512-byte sectors).
 641         */
 642        unsigned short device_logical_block_size_sects =
 643                limits->logical_block_size >> SECTOR_SHIFT;
 644
 645        /*
 646         * Offset of the start of the next table entry, mod logical_block_size.
 647         */
 648        unsigned short next_target_start = 0;
 649
 650        /*
 651         * Given an aligned bio that extends beyond the end of a
 652         * target, how many sectors must the next target handle?
 653         */
 654        unsigned short remaining = 0;
 655
 656        struct dm_target *uninitialized_var(ti);
 657        struct queue_limits ti_limits;
 658        unsigned i = 0;
 659
 660        /*
 661         * Check each entry in the table in turn.
 662         */
 663        while (i < dm_table_get_num_targets(table)) {
 664                ti = dm_table_get_target(table, i++);
 665
 666                blk_set_stacking_limits(&ti_limits);
 667
 668                /* combine all target devices' limits */
 669                if (ti->type->iterate_devices)
 670                        ti->type->iterate_devices(ti, dm_set_device_limits,
 671                                                  &ti_limits);
 672
 673                /*
 674                 * If the remaining sectors fall entirely within this
 675                 * table entry are they compatible with its logical_block_size?
 676                 */
 677                if (remaining < ti->len &&
 678                    remaining & ((ti_limits.logical_block_size >>
 679                                  SECTOR_SHIFT) - 1))
 680                        break;  /* Error */
 681
 682                next_target_start =
 683                    (unsigned short) ((next_target_start + ti->len) &
 684                                      (device_logical_block_size_sects - 1));
 685                remaining = next_target_start ?
 686                    device_logical_block_size_sects - next_target_start : 0;
 687        }
 688
 689        if (remaining) {
 690                DMWARN("%s: table line %u (start sect %llu len %llu) "
 691                       "not aligned to h/w logical block size %u",
 692                       dm_device_name(table->md), i,
 693                       (unsigned long long) ti->begin,
 694                       (unsigned long long) ti->len,
 695                       limits->logical_block_size);
 696                return -EINVAL;
 697        }
 698
 699        return 0;
 700}
 701
 702int dm_table_add_target(struct dm_table *t, const char *type,
 703                        sector_t start, sector_t len, char *params)
 704{
 705        int r = -EINVAL, argc;
 706        char **argv;
 707        struct dm_target *tgt;
 708
 709        if (t->singleton) {
 710                DMERR("%s: target type %s must appear alone in table",
 711                      dm_device_name(t->md), t->targets->type->name);
 712                return -EINVAL;
 713        }
 714
 715        if ((r = check_space(t)))
 716                return r;
 717
 718        tgt = t->targets + t->num_targets;
 719        memset(tgt, 0, sizeof(*tgt));
 720
 721        if (!len) {
 722                DMERR("%s: zero-length target", dm_device_name(t->md));
 723                return -EINVAL;
 724        }
 725
 726        tgt->type = dm_get_target_type(type);
 727        if (!tgt->type) {
 728                DMERR("%s: %s: unknown target type", dm_device_name(t->md),
 729                      type);
 730                return -EINVAL;
 731        }
 732
 733        if (dm_target_needs_singleton(tgt->type)) {
 734                if (t->num_targets) {
 735                        DMERR("%s: target type %s must appear alone in table",
 736                              dm_device_name(t->md), type);
 737                        return -EINVAL;
 738                }
 739                t->singleton = 1;
 740        }
 741
 742        if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 743                DMERR("%s: target type %s may not be included in read-only tables",
 744                      dm_device_name(t->md), type);
 745                return -EINVAL;
 746        }
 747
 748        if (t->immutable_target_type) {
 749                if (t->immutable_target_type != tgt->type) {
 750                        DMERR("%s: immutable target type %s cannot be mixed with other target types",
 751                              dm_device_name(t->md), t->immutable_target_type->name);
 752                        return -EINVAL;
 753                }
 754        } else if (dm_target_is_immutable(tgt->type)) {
 755                if (t->num_targets) {
 756                        DMERR("%s: immutable target type %s cannot be mixed with other target types",
 757                              dm_device_name(t->md), tgt->type->name);
 758                        return -EINVAL;
 759                }
 760                t->immutable_target_type = tgt->type;
 761        }
 762
 763        tgt->table = t;
 764        tgt->begin = start;
 765        tgt->len = len;
 766        tgt->error = "Unknown error";
 767
 768        /*
 769         * Does this target adjoin the previous one ?
 770         */
 771        if (!adjoin(t, tgt)) {
 772                tgt->error = "Gap in table";
 773                r = -EINVAL;
 774                goto bad;
 775        }
 776
 777        r = dm_split_args(&argc, &argv, params);
 778        if (r) {
 779                tgt->error = "couldn't split parameters (insufficient memory)";
 780                goto bad;
 781        }
 782
 783        r = tgt->type->ctr(tgt, argc, argv);
 784        kfree(argv);
 785        if (r)
 786                goto bad;
 787
 788        t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 789
 790        if (!tgt->num_discard_bios && tgt->discards_supported)
 791                DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
 792                       dm_device_name(t->md), type);
 793
 794        return 0;
 795
 796 bad:
 797        DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 798        dm_put_target_type(tgt->type);
 799        return r;
 800}
 801
 802/*
 803 * Target argument parsing helpers.
 804 */
 805static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 806                             unsigned *value, char **error, unsigned grouped)
 807{
 808        const char *arg_str = dm_shift_arg(arg_set);
 809        char dummy;
 810
 811        if (!arg_str ||
 812            (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 813            (*value < arg->min) ||
 814            (*value > arg->max) ||
 815            (grouped && arg_set->argc < *value)) {
 816                *error = arg->error;
 817                return -EINVAL;
 818        }
 819
 820        return 0;
 821}
 822
 823int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 824                unsigned *value, char **error)
 825{
 826        return validate_next_arg(arg, arg_set, value, error, 0);
 827}
 828EXPORT_SYMBOL(dm_read_arg);
 829
 830int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
 831                      unsigned *value, char **error)
 832{
 833        return validate_next_arg(arg, arg_set, value, error, 1);
 834}
 835EXPORT_SYMBOL(dm_read_arg_group);
 836
 837const char *dm_shift_arg(struct dm_arg_set *as)
 838{
 839        char *r;
 840
 841        if (as->argc) {
 842                as->argc--;
 843                r = *as->argv;
 844                as->argv++;
 845                return r;
 846        }
 847
 848        return NULL;
 849}
 850EXPORT_SYMBOL(dm_shift_arg);
 851
 852void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 853{
 854        BUG_ON(as->argc < num_args);
 855        as->argc -= num_args;
 856        as->argv += num_args;
 857}
 858EXPORT_SYMBOL(dm_consume_args);
 859
 860static int dm_table_set_type(struct dm_table *t)
 861{
 862        unsigned i;
 863        unsigned bio_based = 0, request_based = 0, hybrid = 0;
 864        struct dm_target *tgt;
 865        struct dm_dev_internal *dd;
 866        struct list_head *devices;
 867        unsigned live_md_type;
 868
 869        for (i = 0; i < t->num_targets; i++) {
 870                tgt = t->targets + i;
 871                if (dm_target_hybrid(tgt))
 872                        hybrid = 1;
 873                else if (dm_target_request_based(tgt))
 874                        request_based = 1;
 875                else
 876                        bio_based = 1;
 877
 878                if (bio_based && request_based) {
 879                        DMWARN("Inconsistent table: different target types"
 880                               " can't be mixed up");
 881                        return -EINVAL;
 882                }
 883        }
 884
 885        if (hybrid && !bio_based && !request_based) {
 886                /*
 887                 * The targets can work either way.
 888                 * Determine the type from the live device.
 889                 * Default to bio-based if device is new.
 890                 */
 891                live_md_type = dm_get_md_type(t->md);
 892                if (live_md_type == DM_TYPE_REQUEST_BASED)
 893                        request_based = 1;
 894                else
 895                        bio_based = 1;
 896        }
 897
 898        if (bio_based) {
 899                /* We must use this table as bio-based */
 900                t->type = DM_TYPE_BIO_BASED;
 901                return 0;
 902        }
 903
 904        BUG_ON(!request_based); /* No targets in this table */
 905
 906        /* Non-request-stackable devices can't be used for request-based dm */
 907        devices = dm_table_get_devices(t);
 908        list_for_each_entry(dd, devices, list) {
 909                if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
 910                        DMWARN("table load rejected: including"
 911                               " non-request-stackable devices");
 912                        return -EINVAL;
 913                }
 914        }
 915
 916        /*
 917         * Request-based dm supports only tables that have a single target now.
 918         * To support multiple targets, request splitting support is needed,
 919         * and that needs lots of changes in the block-layer.
 920         * (e.g. request completion process for partial completion.)
 921         */
 922        if (t->num_targets > 1) {
 923                DMWARN("Request-based dm doesn't support multiple targets yet");
 924                return -EINVAL;
 925        }
 926
 927        t->type = DM_TYPE_REQUEST_BASED;
 928
 929        return 0;
 930}
 931
 932unsigned dm_table_get_type(struct dm_table *t)
 933{
 934        return t->type;
 935}
 936
 937struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 938{
 939        return t->immutable_target_type;
 940}
 941
 942bool dm_table_request_based(struct dm_table *t)
 943{
 944        return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
 945}
 946
 947int dm_table_alloc_md_mempools(struct dm_table *t)
 948{
 949        unsigned type = dm_table_get_type(t);
 950        unsigned per_bio_data_size = 0;
 951        struct dm_target *tgt;
 952        unsigned i;
 953
 954        if (unlikely(type == DM_TYPE_NONE)) {
 955                DMWARN("no table type is set, can't allocate mempools");
 956                return -EINVAL;
 957        }
 958
 959        if (type == DM_TYPE_BIO_BASED)
 960                for (i = 0; i < t->num_targets; i++) {
 961                        tgt = t->targets + i;
 962                        per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
 963                }
 964
 965        t->mempools = dm_alloc_md_mempools(type, t->integrity_supported, per_bio_data_size);
 966        if (!t->mempools)
 967                return -ENOMEM;
 968
 969        return 0;
 970}
 971
 972void dm_table_free_md_mempools(struct dm_table *t)
 973{
 974        dm_free_md_mempools(t->mempools);
 975        t->mempools = NULL;
 976}
 977
 978struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
 979{
 980        return t->mempools;
 981}
 982
 983static int setup_indexes(struct dm_table *t)
 984{
 985        int i;
 986        unsigned int total = 0;
 987        sector_t *indexes;
 988
 989        /* allocate the space for *all* the indexes */
 990        for (i = t->depth - 2; i >= 0; i--) {
 991                t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
 992                total += t->counts[i];
 993        }
 994
 995        indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
 996        if (!indexes)
 997                return -ENOMEM;
 998
 999        /* set up internal nodes, bottom-up */
1000        for (i = t->depth - 2; i >= 0; i--) {
1001                t->index[i] = indexes;
1002                indexes += (KEYS_PER_NODE * t->counts[i]);
1003                setup_btree_index(i, t);
1004        }
1005
1006        return 0;
1007}
1008
1009/*
1010 * Builds the btree to index the map.
1011 */
1012static int dm_table_build_index(struct dm_table *t)
1013{
1014        int r = 0;
1015        unsigned int leaf_nodes;
1016
1017        /* how many indexes will the btree have ? */
1018        leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1019        t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1020
1021        /* leaf layer has already been set up */
1022        t->counts[t->depth - 1] = leaf_nodes;
1023        t->index[t->depth - 1] = t->highs;
1024
1025        if (t->depth >= 2)
1026                r = setup_indexes(t);
1027
1028        return r;
1029}
1030
1031/*
1032 * Get a disk whose integrity profile reflects the table's profile.
1033 * If %match_all is true, all devices' profiles must match.
1034 * If %match_all is false, all devices must at least have an
1035 * allocated integrity profile; but uninitialized is ok.
1036 * Returns NULL if integrity support was inconsistent or unavailable.
1037 */
1038static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1039                                                    bool match_all)
1040{
1041        struct list_head *devices = dm_table_get_devices(t);
1042        struct dm_dev_internal *dd = NULL;
1043        struct gendisk *prev_disk = NULL, *template_disk = NULL;
1044
1045        list_for_each_entry(dd, devices, list) {
1046                template_disk = dd->dm_dev.bdev->bd_disk;
1047                if (!blk_get_integrity(template_disk))
1048                        goto no_integrity;
1049                if (!match_all && !blk_integrity_is_initialized(template_disk))
1050                        continue; /* skip uninitialized profiles */
1051                else if (prev_disk &&
1052                         blk_integrity_compare(prev_disk, template_disk) < 0)
1053                        goto no_integrity;
1054                prev_disk = template_disk;
1055        }
1056
1057        return template_disk;
1058
1059no_integrity:
1060        if (prev_disk)
1061                DMWARN("%s: integrity not set: %s and %s profile mismatch",
1062                       dm_device_name(t->md),
1063                       prev_disk->disk_name,
1064                       template_disk->disk_name);
1065        return NULL;
1066}
1067
1068/*
1069 * Register the mapped device for blk_integrity support if
1070 * the underlying devices have an integrity profile.  But all devices
1071 * may not have matching profiles (checking all devices isn't reliable
1072 * during table load because this table may use other DM device(s) which
1073 * must be resumed before they will have an initialized integity profile).
1074 * Stacked DM devices force a 2 stage integrity profile validation:
1075 * 1 - during load, validate all initialized integrity profiles match
1076 * 2 - during resume, validate all integrity profiles match
1077 */
1078static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
1079{
1080        struct gendisk *template_disk = NULL;
1081
1082        template_disk = dm_table_get_integrity_disk(t, false);
1083        if (!template_disk)
1084                return 0;
1085
1086        if (!blk_integrity_is_initialized(dm_disk(md))) {
1087                t->integrity_supported = 1;
1088                return blk_integrity_register(dm_disk(md), NULL);
1089        }
1090
1091        /*
1092         * If DM device already has an initalized integrity
1093         * profile the new profile should not conflict.
1094         */
1095        if (blk_integrity_is_initialized(template_disk) &&
1096            blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1097                DMWARN("%s: conflict with existing integrity profile: "
1098                       "%s profile mismatch",
1099                       dm_device_name(t->md),
1100                       template_disk->disk_name);
1101                return 1;
1102        }
1103
1104        /* Preserve existing initialized integrity profile */
1105        t->integrity_supported = 1;
1106        return 0;
1107}
1108
1109/*
1110 * Prepares the table for use by building the indices,
1111 * setting the type, and allocating mempools.
1112 */
1113int dm_table_complete(struct dm_table *t)
1114{
1115        int r;
1116
1117        r = dm_table_set_type(t);
1118        if (r) {
1119                DMERR("unable to set table type");
1120                return r;
1121        }
1122
1123        r = dm_table_build_index(t);
1124        if (r) {
1125                DMERR("unable to build btrees");
1126                return r;
1127        }
1128
1129        r = dm_table_prealloc_integrity(t, t->md);
1130        if (r) {
1131                DMERR("could not register integrity profile.");
1132                return r;
1133        }
1134
1135        r = dm_table_alloc_md_mempools(t);
1136        if (r)
1137                DMERR("unable to allocate mempools");
1138
1139        return r;
1140}
1141
1142static DEFINE_MUTEX(_event_lock);
1143void dm_table_event_callback(struct dm_table *t,
1144                             void (*fn)(void *), void *context)
1145{
1146        mutex_lock(&_event_lock);
1147        t->event_fn = fn;
1148        t->event_context = context;
1149        mutex_unlock(&_event_lock);
1150}
1151
1152void dm_table_event(struct dm_table *t)
1153{
1154        /*
1155         * You can no longer call dm_table_event() from interrupt
1156         * context, use a bottom half instead.
1157         */
1158        BUG_ON(in_interrupt());
1159
1160        mutex_lock(&_event_lock);
1161        if (t->event_fn)
1162                t->event_fn(t->event_context);
1163        mutex_unlock(&_event_lock);
1164}
1165EXPORT_SYMBOL(dm_table_event);
1166
1167sector_t dm_table_get_size(struct dm_table *t)
1168{
1169        return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1170}
1171EXPORT_SYMBOL(dm_table_get_size);
1172
1173struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1174{
1175        if (index >= t->num_targets)
1176                return NULL;
1177
1178        return t->targets + index;
1179}
1180
1181/*
1182 * Search the btree for the correct target.
1183 *
1184 * Caller should check returned pointer with dm_target_is_valid()
1185 * to trap I/O beyond end of device.
1186 */
1187struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1188{
1189        unsigned int l, n = 0, k = 0;
1190        sector_t *node;
1191
1192        for (l = 0; l < t->depth; l++) {
1193                n = get_child(n, k);
1194                node = get_node(t, l, n);
1195
1196                for (k = 0; k < KEYS_PER_NODE; k++)
1197                        if (node[k] >= sector)
1198                                break;
1199        }
1200
1201        return &t->targets[(KEYS_PER_NODE * n) + k];
1202}
1203
1204static int count_device(struct dm_target *ti, struct dm_dev *dev,
1205                        sector_t start, sector_t len, void *data)
1206{
1207        unsigned *num_devices = data;
1208
1209        (*num_devices)++;
1210
1211        return 0;
1212}
1213
1214/*
1215 * Check whether a table has no data devices attached using each
1216 * target's iterate_devices method.
1217 * Returns false if the result is unknown because a target doesn't
1218 * support iterate_devices.
1219 */
1220bool dm_table_has_no_data_devices(struct dm_table *table)
1221{
1222        struct dm_target *uninitialized_var(ti);
1223        unsigned i = 0, num_devices = 0;
1224
1225        while (i < dm_table_get_num_targets(table)) {
1226                ti = dm_table_get_target(table, i++);
1227
1228                if (!ti->type->iterate_devices)
1229                        return false;
1230
1231                ti->type->iterate_devices(ti, count_device, &num_devices);
1232                if (num_devices)
1233                        return false;
1234        }
1235
1236        return true;
1237}
1238
1239/*
1240 * Establish the new table's queue_limits and validate them.
1241 */
1242int dm_calculate_queue_limits(struct dm_table *table,
1243                              struct queue_limits *limits)
1244{
1245        struct dm_target *uninitialized_var(ti);
1246        struct queue_limits ti_limits;
1247        unsigned i = 0;
1248
1249        blk_set_stacking_limits(limits);
1250
1251        while (i < dm_table_get_num_targets(table)) {
1252                blk_set_stacking_limits(&ti_limits);
1253
1254                ti = dm_table_get_target(table, i++);
1255
1256                if (!ti->type->iterate_devices)
1257                        goto combine_limits;
1258
1259                /*
1260                 * Combine queue limits of all the devices this target uses.
1261                 */
1262                ti->type->iterate_devices(ti, dm_set_device_limits,
1263                                          &ti_limits);
1264
1265                /* Set I/O hints portion of queue limits */
1266                if (ti->type->io_hints)
1267                        ti->type->io_hints(ti, &ti_limits);
1268
1269                /*
1270                 * Check each device area is consistent with the target's
1271                 * overall queue limits.
1272                 */
1273                if (ti->type->iterate_devices(ti, device_area_is_invalid,
1274                                              &ti_limits))
1275                        return -EINVAL;
1276
1277combine_limits:
1278                /*
1279                 * Merge this target's queue limits into the overall limits
1280                 * for the table.
1281                 */
1282                if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1283                        DMWARN("%s: adding target device "
1284                               "(start sect %llu len %llu) "
1285                               "caused an alignment inconsistency",
1286                               dm_device_name(table->md),
1287                               (unsigned long long) ti->begin,
1288                               (unsigned long long) ti->len);
1289        }
1290
1291        return validate_hardware_logical_block_alignment(table, limits);
1292}
1293
1294/*
1295 * Set the integrity profile for this device if all devices used have
1296 * matching profiles.  We're quite deep in the resume path but still
1297 * don't know if all devices (particularly DM devices this device
1298 * may be stacked on) have matching profiles.  Even if the profiles
1299 * don't match we have no way to fail (to resume) at this point.
1300 */
1301static void dm_table_set_integrity(struct dm_table *t)
1302{
1303        struct gendisk *template_disk = NULL;
1304
1305        if (!blk_get_integrity(dm_disk(t->md)))
1306                return;
1307
1308        template_disk = dm_table_get_integrity_disk(t, true);
1309        if (template_disk)
1310                blk_integrity_register(dm_disk(t->md),
1311                                       blk_get_integrity(template_disk));
1312        else if (blk_integrity_is_initialized(dm_disk(t->md)))
1313                DMWARN("%s: device no longer has a valid integrity profile",
1314                       dm_device_name(t->md));
1315        else
1316                DMWARN("%s: unable to establish an integrity profile",
1317                       dm_device_name(t->md));
1318}
1319
1320static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1321                                sector_t start, sector_t len, void *data)
1322{
1323        unsigned flush = (*(unsigned *)data);
1324        struct request_queue *q = bdev_get_queue(dev->bdev);
1325
1326        return q && (q->flush_flags & flush);
1327}
1328
1329static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1330{
1331        struct dm_target *ti;
1332        unsigned i = 0;
1333
1334        /*
1335         * Require at least one underlying device to support flushes.
1336         * t->devices includes internal dm devices such as mirror logs
1337         * so we need to use iterate_devices here, which targets
1338         * supporting flushes must provide.
1339         */
1340        while (i < dm_table_get_num_targets(t)) {
1341                ti = dm_table_get_target(t, i++);
1342
1343                if (!ti->num_flush_bios)
1344                        continue;
1345
1346                if (ti->flush_supported)
1347                        return 1;
1348
1349                if (ti->type->iterate_devices &&
1350                    ti->type->iterate_devices(ti, device_flush_capable, &flush))
1351                        return 1;
1352        }
1353
1354        return 0;
1355}
1356
1357static bool dm_table_discard_zeroes_data(struct dm_table *t)
1358{
1359        struct dm_target *ti;
1360        unsigned i = 0;
1361
1362        /* Ensure that all targets supports discard_zeroes_data. */
1363        while (i < dm_table_get_num_targets(t)) {
1364                ti = dm_table_get_target(t, i++);
1365
1366                if (ti->discard_zeroes_data_unsupported)
1367                        return 0;
1368        }
1369
1370        return 1;
1371}
1372
1373static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1374                            sector_t start, sector_t len, void *data)
1375{
1376        struct request_queue *q = bdev_get_queue(dev->bdev);
1377
1378        return q && blk_queue_nonrot(q);
1379}
1380
1381static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1382                             sector_t start, sector_t len, void *data)
1383{
1384        struct request_queue *q = bdev_get_queue(dev->bdev);
1385
1386        return q && !blk_queue_add_random(q);
1387}
1388
1389static bool dm_table_all_devices_attribute(struct dm_table *t,
1390                                           iterate_devices_callout_fn func)
1391{
1392        struct dm_target *ti;
1393        unsigned i = 0;
1394
1395        while (i < dm_table_get_num_targets(t)) {
1396                ti = dm_table_get_target(t, i++);
1397
1398                if (!ti->type->iterate_devices ||
1399                    !ti->type->iterate_devices(ti, func, NULL))
1400                        return 0;
1401        }
1402
1403        return 1;
1404}
1405
1406static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
1407                                         sector_t start, sector_t len, void *data)
1408{
1409        struct request_queue *q = bdev_get_queue(dev->bdev);
1410
1411        return q && !q->limits.max_write_same_sectors;
1412}
1413
1414static bool dm_table_supports_write_same(struct dm_table *t)
1415{
1416        struct dm_target *ti;
1417        unsigned i = 0;
1418
1419        while (i < dm_table_get_num_targets(t)) {
1420                ti = dm_table_get_target(t, i++);
1421
1422                if (!ti->num_write_same_bios)
1423                        return false;
1424
1425                if (!ti->type->iterate_devices ||
1426                    ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
1427                        return false;
1428        }
1429
1430        return true;
1431}
1432
1433void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1434                               struct queue_limits *limits)
1435{
1436        unsigned flush = 0;
1437
1438        /*
1439         * Copy table's limits to the DM device's request_queue
1440         */
1441        q->limits = *limits;
1442
1443        if (!dm_table_supports_discards(t))
1444                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1445        else
1446                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1447
1448        if (dm_table_supports_flush(t, REQ_FLUSH)) {
1449                flush |= REQ_FLUSH;
1450                if (dm_table_supports_flush(t, REQ_FUA))
1451                        flush |= REQ_FUA;
1452        }
1453        blk_queue_flush(q, flush);
1454
1455        if (!dm_table_discard_zeroes_data(t))
1456                q->limits.discard_zeroes_data = 0;
1457
1458        /* Ensure that all underlying devices are non-rotational. */
1459        if (dm_table_all_devices_attribute(t, device_is_nonrot))
1460                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1461        else
1462                queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1463
1464        if (!dm_table_supports_write_same(t))
1465                q->limits.max_write_same_sectors = 0;
1466
1467        dm_table_set_integrity(t);
1468
1469        /*
1470         * Determine whether or not this queue's I/O timings contribute
1471         * to the entropy pool, Only request-based targets use this.
1472         * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1473         * have it set.
1474         */
1475        if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1476                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1477
1478        /*
1479         * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1480         * visible to other CPUs because, once the flag is set, incoming bios
1481         * are processed by request-based dm, which refers to the queue
1482         * settings.
1483         * Until the flag set, bios are passed to bio-based dm and queued to
1484         * md->deferred where queue settings are not needed yet.
1485         * Those bios are passed to request-based dm at the resume time.
1486         */
1487        smp_mb();
1488        if (dm_table_request_based(t))
1489                queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1490}
1491
1492unsigned int dm_table_get_num_targets(struct dm_table *t)
1493{
1494        return t->num_targets;
1495}
1496
1497struct list_head *dm_table_get_devices(struct dm_table *t)
1498{
1499        return &t->devices;
1500}
1501
1502fmode_t dm_table_get_mode(struct dm_table *t)
1503{
1504        return t->mode;
1505}
1506EXPORT_SYMBOL(dm_table_get_mode);
1507
1508static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1509{
1510        int i = t->num_targets;
1511        struct dm_target *ti = t->targets;
1512
1513        while (i--) {
1514                if (postsuspend) {
1515                        if (ti->type->postsuspend)
1516                                ti->type->postsuspend(ti);
1517                } else if (ti->type->presuspend)
1518                        ti->type->presuspend(ti);
1519
1520                ti++;
1521        }
1522}
1523
1524void dm_table_presuspend_targets(struct dm_table *t)
1525{
1526        if (!t)
1527                return;
1528
1529        suspend_targets(t, 0);
1530}
1531
1532void dm_table_postsuspend_targets(struct dm_table *t)
1533{
1534        if (!t)
1535                return;
1536
1537        suspend_targets(t, 1);
1538}
1539
1540int dm_table_resume_targets(struct dm_table *t)
1541{
1542        int i, r = 0;
1543
1544        for (i = 0; i < t->num_targets; i++) {
1545                struct dm_target *ti = t->targets + i;
1546
1547                if (!ti->type->preresume)
1548                        continue;
1549
1550                r = ti->type->preresume(ti);
1551                if (r)
1552                        return r;
1553        }
1554
1555        for (i = 0; i < t->num_targets; i++) {
1556                struct dm_target *ti = t->targets + i;
1557
1558                if (ti->type->resume)
1559                        ti->type->resume(ti);
1560        }
1561
1562        return 0;
1563}
1564
1565void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1566{
1567        list_add(&cb->list, &t->target_callbacks);
1568}
1569EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1570
1571int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1572{
1573        struct dm_dev_internal *dd;
1574        struct list_head *devices = dm_table_get_devices(t);
1575        struct dm_target_callbacks *cb;
1576        int r = 0;
1577
1578        list_for_each_entry(dd, devices, list) {
1579                struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1580                char b[BDEVNAME_SIZE];
1581
1582                if (likely(q))
1583                        r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1584                else
1585                        DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1586                                     dm_device_name(t->md),
1587                                     bdevname(dd->dm_dev.bdev, b));
1588        }
1589
1590        list_for_each_entry(cb, &t->target_callbacks, list)
1591                if (cb->congested_fn)
1592                        r |= cb->congested_fn(cb, bdi_bits);
1593
1594        return r;
1595}
1596
1597int dm_table_any_busy_target(struct dm_table *t)
1598{
1599        unsigned i;
1600        struct dm_target *ti;
1601
1602        for (i = 0; i < t->num_targets; i++) {
1603                ti = t->targets + i;
1604                if (ti->type->busy && ti->type->busy(ti))
1605                        return 1;
1606        }
1607
1608        return 0;
1609}
1610
1611struct mapped_device *dm_table_get_md(struct dm_table *t)
1612{
1613        return t->md;
1614}
1615EXPORT_SYMBOL(dm_table_get_md);
1616
1617static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1618                                  sector_t start, sector_t len, void *data)
1619{
1620        struct request_queue *q = bdev_get_queue(dev->bdev);
1621
1622        return q && blk_queue_discard(q);
1623}
1624
1625bool dm_table_supports_discards(struct dm_table *t)
1626{
1627        struct dm_target *ti;
1628        unsigned i = 0;
1629
1630        /*
1631         * Unless any target used by the table set discards_supported,
1632         * require at least one underlying device to support discards.
1633         * t->devices includes internal dm devices such as mirror logs
1634         * so we need to use iterate_devices here, which targets
1635         * supporting discard selectively must provide.
1636         */
1637        while (i < dm_table_get_num_targets(t)) {
1638                ti = dm_table_get_target(t, i++);
1639
1640                if (!ti->num_discard_bios)
1641                        continue;
1642
1643                if (ti->discards_supported)
1644                        return 1;
1645
1646                if (ti->type->iterate_devices &&
1647                    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1648                        return 1;
1649        }
1650
1651        return 0;
1652}
1653
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.