linux/drivers/md/dm-table.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the GPL.
   6 */
   7
   8#include "dm.h"
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/blkdev.h>
  13#include <linux/namei.h>
  14#include <linux/ctype.h>
  15#include <linux/string.h>
  16#include <linux/slab.h>
  17#include <linux/interrupt.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/atomic.h>
  21
  22#define DM_MSG_PREFIX "table"
  23
  24#define MAX_DEPTH 16
  25#define NODE_SIZE L1_CACHE_BYTES
  26#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
  27#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
  28
  29/*
  30 * The table has always exactly one reference from either mapped_device->map
  31 * or hash_cell->new_map. This reference is not counted in table->holders.
  32 * A pair of dm_create_table/dm_destroy_table functions is used for table
  33 * creation/destruction.
  34 *
  35 * Temporary references from the other code increase table->holders. A pair
  36 * of dm_table_get/dm_table_put functions is used to manipulate it.
  37 *
  38 * When the table is about to be destroyed, we wait for table->holders to
  39 * drop to zero.
  40 */
  41
  42struct dm_table {
  43        struct mapped_device *md;
  44        atomic_t holders;
  45        unsigned type;
  46
  47        /* btree table */
  48        unsigned int depth;
  49        unsigned int counts[MAX_DEPTH]; /* in nodes */
  50        sector_t *index[MAX_DEPTH];
  51
  52        unsigned int num_targets;
  53        unsigned int num_allocated;
  54        sector_t *highs;
  55        struct dm_target *targets;
  56
  57        struct target_type *immutable_target_type;
  58        unsigned integrity_supported:1;
  59        unsigned singleton:1;
  60
  61        /*
  62         * Indicates the rw permissions for the new logical
  63         * device.  This should be a combination of FMODE_READ
  64         * and FMODE_WRITE.
  65         */
  66        fmode_t mode;
  67
  68        /* a list of devices used by this table */
  69        struct list_head devices;
  70
  71        /* events get handed up using this callback */
  72        void (*event_fn)(void *);
  73        void *event_context;
  74
  75        struct dm_md_mempools *mempools;
  76
  77        struct list_head target_callbacks;
  78};
  79
  80/*
  81 * Similar to ceiling(log_size(n))
  82 */
  83static unsigned int int_log(unsigned int n, unsigned int base)
  84{
  85        int result = 0;
  86
  87        while (n > 1) {
  88                n = dm_div_up(n, base);
  89                result++;
  90        }
  91
  92        return result;
  93}
  94
  95/*
  96 * Calculate the index of the child node of the n'th node k'th key.
  97 */
  98static inline unsigned int get_child(unsigned int n, unsigned int k)
  99{
 100        return (n * CHILDREN_PER_NODE) + k;
 101}
 102
 103/*
 104 * Return the n'th node of level l from table t.
 105 */
 106static inline sector_t *get_node(struct dm_table *t,
 107                                 unsigned int l, unsigned int n)
 108{
 109        return t->index[l] + (n * KEYS_PER_NODE);
 110}
 111
 112/*
 113 * Return the highest key that you could lookup from the n'th
 114 * node on level l of the btree.
 115 */
 116static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
 117{
 118        for (; l < t->depth - 1; l++)
 119                n = get_child(n, CHILDREN_PER_NODE - 1);
 120
 121        if (n >= t->counts[l])
 122                return (sector_t) - 1;
 123
 124        return get_node(t, l, n)[KEYS_PER_NODE - 1];
 125}
 126
 127/*
 128 * Fills in a level of the btree based on the highs of the level
 129 * below it.
 130 */
 131static int setup_btree_index(unsigned int l, struct dm_table *t)
 132{
 133        unsigned int n, k;
 134        sector_t *node;
 135
 136        for (n = 0U; n < t->counts[l]; n++) {
 137                node = get_node(t, l, n);
 138
 139                for (k = 0U; k < KEYS_PER_NODE; k++)
 140                        node[k] = high(t, l + 1, get_child(n, k));
 141        }
 142
 143        return 0;
 144}
 145
 146void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size)
 147{
 148        unsigned long size;
 149        void *addr;
 150
 151        /*
 152         * Check that we're not going to overflow.
 153         */
 154        if (nmemb > (ULONG_MAX / elem_size))
 155                return NULL;
 156
 157        size = nmemb * elem_size;
 158        addr = vzalloc(size);
 159
 160        return addr;
 161}
 162EXPORT_SYMBOL(dm_vcalloc);
 163
 164/*
 165 * highs, and targets are managed as dynamic arrays during a
 166 * table load.
 167 */
 168static int alloc_targets(struct dm_table *t, unsigned int num)
 169{
 170        sector_t *n_highs;
 171        struct dm_target *n_targets;
 172        int n = t->num_targets;
 173
 174        /*
 175         * Allocate both the target array and offset array at once.
 176         * Append an empty entry to catch sectors beyond the end of
 177         * the device.
 178         */
 179        n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) +
 180                                          sizeof(sector_t));
 181        if (!n_highs)
 182                return -ENOMEM;
 183
 184        n_targets = (struct dm_target *) (n_highs + num);
 185
 186        if (n) {
 187                memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
 188                memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
 189        }
 190
 191        memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
 192        vfree(t->highs);
 193
 194        t->num_allocated = num;
 195        t->highs = n_highs;
 196        t->targets = n_targets;
 197
 198        return 0;
 199}
 200
 201int dm_table_create(struct dm_table **result, fmode_t mode,
 202                    unsigned num_targets, struct mapped_device *md)
 203{
 204        struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
 205
 206        if (!t)
 207                return -ENOMEM;
 208
 209        INIT_LIST_HEAD(&t->devices);
 210        INIT_LIST_HEAD(&t->target_callbacks);
 211        atomic_set(&t->holders, 0);
 212
 213        if (!num_targets)
 214                num_targets = KEYS_PER_NODE;
 215
 216        num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
 217
 218        if (alloc_targets(t, num_targets)) {
 219                kfree(t);
 220                t = NULL;
 221                return -ENOMEM;
 222        }
 223
 224        t->mode = mode;
 225        t->md = md;
 226        *result = t;
 227        return 0;
 228}
 229
 230static void free_devices(struct list_head *devices)
 231{
 232        struct list_head *tmp, *next;
 233
 234        list_for_each_safe(tmp, next, devices) {
 235                struct dm_dev_internal *dd =
 236                    list_entry(tmp, struct dm_dev_internal, list);
 237                DMWARN("dm_table_destroy: dm_put_device call missing for %s",
 238                       dd->dm_dev.name);
 239                kfree(dd);
 240        }
 241}
 242
 243void dm_table_destroy(struct dm_table *t)
 244{
 245        unsigned int i;
 246
 247        if (!t)
 248                return;
 249
 250        while (atomic_read(&t->holders))
 251                msleep(1);
 252        smp_mb();
 253
 254        /* free the indexes */
 255        if (t->depth >= 2)
 256                vfree(t->index[t->depth - 2]);
 257
 258        /* free the targets */
 259        for (i = 0; i < t->num_targets; i++) {
 260                struct dm_target *tgt = t->targets + i;
 261
 262                if (tgt->type->dtr)
 263                        tgt->type->dtr(tgt);
 264
 265                dm_put_target_type(tgt->type);
 266        }
 267
 268        vfree(t->highs);
 269
 270        /* free the device list */
 271        free_devices(&t->devices);
 272
 273        dm_free_md_mempools(t->mempools);
 274
 275        kfree(t);
 276}
 277
 278void dm_table_get(struct dm_table *t)
 279{
 280        atomic_inc(&t->holders);
 281}
 282EXPORT_SYMBOL(dm_table_get);
 283
 284void dm_table_put(struct dm_table *t)
 285{
 286        if (!t)
 287                return;
 288
 289        smp_mb__before_atomic_dec();
 290        atomic_dec(&t->holders);
 291}
 292EXPORT_SYMBOL(dm_table_put);
 293
 294/*
 295 * Checks to see if we need to extend highs or targets.
 296 */
 297static inline int check_space(struct dm_table *t)
 298{
 299        if (t->num_targets >= t->num_allocated)
 300                return alloc_targets(t, t->num_allocated * 2);
 301
 302        return 0;
 303}
 304
 305/*
 306 * See if we've already got a device in the list.
 307 */
 308static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
 309{
 310        struct dm_dev_internal *dd;
 311
 312        list_for_each_entry (dd, l, list)
 313                if (dd->dm_dev.bdev->bd_dev == dev)
 314                        return dd;
 315
 316        return NULL;
 317}
 318
 319/*
 320 * Open a device so we can use it as a map destination.
 321 */
 322static int open_dev(struct dm_dev_internal *d, dev_t dev,
 323                    struct mapped_device *md)
 324{
 325        static char *_claim_ptr = "I belong to device-mapper";
 326        struct block_device *bdev;
 327
 328        int r;
 329
 330        BUG_ON(d->dm_dev.bdev);
 331
 332        bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
 333        if (IS_ERR(bdev))
 334                return PTR_ERR(bdev);
 335
 336        r = bd_link_disk_holder(bdev, dm_disk(md));
 337        if (r) {
 338                blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
 339                return r;
 340        }
 341
 342        d->dm_dev.bdev = bdev;
 343        return 0;
 344}
 345
 346/*
 347 * Close a device that we've been using.
 348 */
 349static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
 350{
 351        if (!d->dm_dev.bdev)
 352                return;
 353
 354        bd_unlink_disk_holder(d->dm_dev.bdev, dm_disk(md));
 355        blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
 356        d->dm_dev.bdev = NULL;
 357}
 358
 359/*
 360 * If possible, this checks an area of a destination device is invalid.
 361 */
 362static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
 363                                  sector_t start, sector_t len, void *data)
 364{
 365        struct request_queue *q;
 366        struct queue_limits *limits = data;
 367        struct block_device *bdev = dev->bdev;
 368        sector_t dev_size =
 369                i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
 370        unsigned short logical_block_size_sectors =
 371                limits->logical_block_size >> SECTOR_SHIFT;
 372        char b[BDEVNAME_SIZE];
 373
 374        /*
 375         * Some devices exist without request functions,
 376         * such as loop devices not yet bound to backing files.
 377         * Forbid the use of such devices.
 378         */
 379        q = bdev_get_queue(bdev);
 380        if (!q || !q->make_request_fn) {
 381                DMWARN("%s: %s is not yet initialised: "
 382                       "start=%llu, len=%llu, dev_size=%llu",
 383                       dm_device_name(ti->table->md), bdevname(bdev, b),
 384                       (unsigned long long)start,
 385                       (unsigned long long)len,
 386                       (unsigned long long)dev_size);
 387                return 1;
 388        }
 389
 390        if (!dev_size)
 391                return 0;
 392
 393        if ((start >= dev_size) || (start + len > dev_size)) {
 394                DMWARN("%s: %s too small for target: "
 395                       "start=%llu, len=%llu, dev_size=%llu",
 396                       dm_device_name(ti->table->md), bdevname(bdev, b),
 397                       (unsigned long long)start,
 398                       (unsigned long long)len,
 399                       (unsigned long long)dev_size);
 400                return 1;
 401        }
 402
 403        if (logical_block_size_sectors <= 1)
 404                return 0;
 405
 406        if (start & (logical_block_size_sectors - 1)) {
 407                DMWARN("%s: start=%llu not aligned to h/w "
 408                       "logical block size %u of %s",
 409                       dm_device_name(ti->table->md),
 410                       (unsigned long long)start,
 411                       limits->logical_block_size, bdevname(bdev, b));
 412                return 1;
 413        }
 414
 415        if (len & (logical_block_size_sectors - 1)) {
 416                DMWARN("%s: len=%llu not aligned to h/w "
 417                       "logical block size %u of %s",
 418                       dm_device_name(ti->table->md),
 419                       (unsigned long long)len,
 420                       limits->logical_block_size, bdevname(bdev, b));
 421                return 1;
 422        }
 423
 424        return 0;
 425}
 426
 427/*
 428 * This upgrades the mode on an already open dm_dev, being
 429 * careful to leave things as they were if we fail to reopen the
 430 * device and not to touch the existing bdev field in case
 431 * it is accessed concurrently inside dm_table_any_congested().
 432 */
 433static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
 434                        struct mapped_device *md)
 435{
 436        int r;
 437        struct dm_dev_internal dd_new, dd_old;
 438
 439        dd_new = dd_old = *dd;
 440
 441        dd_new.dm_dev.mode |= new_mode;
 442        dd_new.dm_dev.bdev = NULL;
 443
 444        r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
 445        if (r)
 446                return r;
 447
 448        dd->dm_dev.mode |= new_mode;
 449        close_dev(&dd_old, md);
 450
 451        return 0;
 452}
 453
 454/*
 455 * Add a device to the list, or just increment the usage count if
 456 * it's already present.
 457 */
 458int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 459                  struct dm_dev **result)
 460{
 461        int r;
 462        dev_t uninitialized_var(dev);
 463        struct dm_dev_internal *dd;
 464        unsigned int major, minor;
 465        struct dm_table *t = ti->table;
 466        char dummy;
 467
 468        BUG_ON(!t);
 469
 470        if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
 471                /* Extract the major/minor numbers */
 472                dev = MKDEV(major, minor);
 473                if (MAJOR(dev) != major || MINOR(dev) != minor)
 474                        return -EOVERFLOW;
 475        } else {
 476                /* convert the path to a device */
 477                struct block_device *bdev = lookup_bdev(path);
 478
 479                if (IS_ERR(bdev))
 480                        return PTR_ERR(bdev);
 481                dev = bdev->bd_dev;
 482                bdput(bdev);
 483        }
 484
 485        dd = find_device(&t->devices, dev);
 486        if (!dd) {
 487                dd = kmalloc(sizeof(*dd), GFP_KERNEL);
 488                if (!dd)
 489                        return -ENOMEM;
 490
 491                dd->dm_dev.mode = mode;
 492                dd->dm_dev.bdev = NULL;
 493
 494                if ((r = open_dev(dd, dev, t->md))) {
 495                        kfree(dd);
 496                        return r;
 497                }
 498
 499                format_dev_t(dd->dm_dev.name, dev);
 500
 501                atomic_set(&dd->count, 0);
 502                list_add(&dd->list, &t->devices);
 503
 504        } else if (dd->dm_dev.mode != (mode | dd->dm_dev.mode)) {
 505                r = upgrade_mode(dd, mode, t->md);
 506                if (r)
 507                        return r;
 508        }
 509        atomic_inc(&dd->count);
 510
 511        *result = &dd->dm_dev;
 512        return 0;
 513}
 514EXPORT_SYMBOL(dm_get_device);
 515
 516int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
 517                         sector_t start, sector_t len, void *data)
 518{
 519        struct queue_limits *limits = data;
 520        struct block_device *bdev = dev->bdev;
 521        struct request_queue *q = bdev_get_queue(bdev);
 522        char b[BDEVNAME_SIZE];
 523
 524        if (unlikely(!q)) {
 525                DMWARN("%s: Cannot set limits for nonexistent device %s",
 526                       dm_device_name(ti->table->md), bdevname(bdev, b));
 527                return 0;
 528        }
 529
 530        if (bdev_stack_limits(limits, bdev, start) < 0)
 531                DMWARN("%s: adding target device %s caused an alignment inconsistency: "
 532                       "physical_block_size=%u, logical_block_size=%u, "
 533                       "alignment_offset=%u, start=%llu",
 534                       dm_device_name(ti->table->md), bdevname(bdev, b),
 535                       q->limits.physical_block_size,
 536                       q->limits.logical_block_size,
 537                       q->limits.alignment_offset,
 538                       (unsigned long long) start << SECTOR_SHIFT);
 539
 540        /*
 541         * Check if merge fn is supported.
 542         * If not we'll force DM to use PAGE_SIZE or
 543         * smaller I/O, just to be safe.
 544         */
 545        if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
 546                blk_limits_max_hw_sectors(limits,
 547                                          (unsigned int) (PAGE_SIZE >> 9));
 548        return 0;
 549}
 550EXPORT_SYMBOL_GPL(dm_set_device_limits);
 551
 552/*
 553 * Decrement a device's use count and remove it if necessary.
 554 */
 555void dm_put_device(struct dm_target *ti, struct dm_dev *d)
 556{
 557        struct dm_dev_internal *dd = container_of(d, struct dm_dev_internal,
 558                                                  dm_dev);
 559
 560        if (atomic_dec_and_test(&dd->count)) {
 561                close_dev(dd, ti->table->md);
 562                list_del(&dd->list);
 563                kfree(dd);
 564        }
 565}
 566EXPORT_SYMBOL(dm_put_device);
 567
 568/*
 569 * Checks to see if the target joins onto the end of the table.
 570 */
 571static int adjoin(struct dm_table *table, struct dm_target *ti)
 572{
 573        struct dm_target *prev;
 574
 575        if (!table->num_targets)
 576                return !ti->begin;
 577
 578        prev = &table->targets[table->num_targets - 1];
 579        return (ti->begin == (prev->begin + prev->len));
 580}
 581
 582/*
 583 * Used to dynamically allocate the arg array.
 584 */
 585static char **realloc_argv(unsigned *array_size, char **old_argv)
 586{
 587        char **argv;
 588        unsigned new_size;
 589
 590        new_size = *array_size ? *array_size * 2 : 64;
 591        argv = kmalloc(new_size * sizeof(*argv), GFP_KERNEL);
 592        if (argv) {
 593                memcpy(argv, old_argv, *array_size * sizeof(*argv));
 594                *array_size = new_size;
 595        }
 596
 597        kfree(old_argv);
 598        return argv;
 599}
 600
 601/*
 602 * Destructively splits up the argument list to pass to ctr.
 603 */
 604int dm_split_args(int *argc, char ***argvp, char *input)
 605{
 606        char *start, *end = input, *out, **argv = NULL;
 607        unsigned array_size = 0;
 608
 609        *argc = 0;
 610
 611        if (!input) {
 612                *argvp = NULL;
 613                return 0;
 614        }
 615
 616        argv = realloc_argv(&array_size, argv);
 617        if (!argv)
 618                return -ENOMEM;
 619
 620        while (1) {
 621                /* Skip whitespace */
 622                start = skip_spaces(end);
 623
 624                if (!*start)
 625                        break;  /* success, we hit the end */
 626
 627                /* 'out' is used to remove any back-quotes */
 628                end = out = start;
 629                while (*end) {
 630                        /* Everything apart from '\0' can be quoted */
 631                        if (*end == '\\' && *(end + 1)) {
 632                                *out++ = *(end + 1);
 633                                end += 2;
 634                                continue;
 635                        }
 636
 637                        if (isspace(*end))
 638                                break;  /* end of token */
 639
 640                        *out++ = *end++;
 641                }
 642
 643                /* have we already filled the array ? */
 644                if ((*argc + 1) > array_size) {
 645                        argv = realloc_argv(&array_size, argv);
 646                        if (!argv)
 647                                return -ENOMEM;
 648                }
 649
 650                /* we know this is whitespace */
 651                if (*end)
 652                        end++;
 653
 654                /* terminate the string and put it in the array */
 655                *out = '\0';
 656                argv[*argc] = start;
 657                (*argc)++;
 658        }
 659
 660        *argvp = argv;
 661        return 0;
 662}
 663
 664/*
 665 * Impose necessary and sufficient conditions on a devices's table such
 666 * that any incoming bio which respects its logical_block_size can be
 667 * processed successfully.  If it falls across the boundary between
 668 * two or more targets, the size of each piece it gets split into must
 669 * be compatible with the logical_block_size of the target processing it.
 670 */
 671static int validate_hardware_logical_block_alignment(struct dm_table *table,
 672                                                 struct queue_limits *limits)
 673{
 674        /*
 675         * This function uses arithmetic modulo the logical_block_size
 676         * (in units of 512-byte sectors).
 677         */
 678        unsigned short device_logical_block_size_sects =
 679                limits->logical_block_size >> SECTOR_SHIFT;
 680
 681        /*
 682         * Offset of the start of the next table entry, mod logical_block_size.
 683         */
 684        unsigned short next_target_start = 0;
 685
 686        /*
 687         * Given an aligned bio that extends beyond the end of a
 688         * target, how many sectors must the next target handle?
 689         */
 690        unsigned short remaining = 0;
 691
 692        struct dm_target *uninitialized_var(ti);
 693        struct queue_limits ti_limits;
 694        unsigned i = 0;
 695
 696        /*
 697         * Check each entry in the table in turn.
 698         */
 699        while (i < dm_table_get_num_targets(table)) {
 700                ti = dm_table_get_target(table, i++);
 701
 702                blk_set_stacking_limits(&ti_limits);
 703
 704                /* combine all target devices' limits */
 705                if (ti->type->iterate_devices)
 706                        ti->type->iterate_devices(ti, dm_set_device_limits,
 707                                                  &ti_limits);
 708
 709                /*
 710                 * If the remaining sectors fall entirely within this
 711                 * table entry are they compatible with its logical_block_size?
 712                 */
 713                if (remaining < ti->len &&
 714                    remaining & ((ti_limits.logical_block_size >>
 715                                  SECTOR_SHIFT) - 1))
 716                        break;  /* Error */
 717
 718                next_target_start =
 719                    (unsigned short) ((next_target_start + ti->len) &
 720                                      (device_logical_block_size_sects - 1));
 721                remaining = next_target_start ?
 722                    device_logical_block_size_sects - next_target_start : 0;
 723        }
 724
 725        if (remaining) {
 726                DMWARN("%s: table line %u (start sect %llu len %llu) "
 727                       "not aligned to h/w logical block size %u",
 728                       dm_device_name(table->md), i,
 729                       (unsigned long long) ti->begin,
 730                       (unsigned long long) ti->len,
 731                       limits->logical_block_size);
 732                return -EINVAL;
 733        }
 734
 735        return 0;
 736}
 737
 738int dm_table_add_target(struct dm_table *t, const char *type,
 739                        sector_t start, sector_t len, char *params)
 740{
 741        int r = -EINVAL, argc;
 742        char **argv;
 743        struct dm_target *tgt;
 744
 745        if (t->singleton) {
 746                DMERR("%s: target type %s must appear alone in table",
 747                      dm_device_name(t->md), t->targets->type->name);
 748                return -EINVAL;
 749        }
 750
 751        if ((r = check_space(t)))
 752                return r;
 753
 754        tgt = t->targets + t->num_targets;
 755        memset(tgt, 0, sizeof(*tgt));
 756
 757        if (!len) {
 758                DMERR("%s: zero-length target", dm_device_name(t->md));
 759                return -EINVAL;
 760        }
 761
 762        tgt->type = dm_get_target_type(type);
 763        if (!tgt->type) {
 764                DMERR("%s: %s: unknown target type", dm_device_name(t->md),
 765                      type);
 766                return -EINVAL;
 767        }
 768
 769        if (dm_target_needs_singleton(tgt->type)) {
 770                if (t->num_targets) {
 771                        DMERR("%s: target type %s must appear alone in table",
 772                              dm_device_name(t->md), type);
 773                        return -EINVAL;
 774                }
 775                t->singleton = 1;
 776        }
 777
 778        if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
 779                DMERR("%s: target type %s may not be included in read-only tables",
 780                      dm_device_name(t->md), type);
 781                return -EINVAL;
 782        }
 783
 784        if (t->immutable_target_type) {
 785                if (t->immutable_target_type != tgt->type) {
 786                        DMERR("%s: immutable target type %s cannot be mixed with other target types",
 787                              dm_device_name(t->md), t->immutable_target_type->name);
 788                        return -EINVAL;
 789                }
 790        } else if (dm_target_is_immutable(tgt->type)) {
 791                if (t->num_targets) {
 792                        DMERR("%s: immutable target type %s cannot be mixed with other target types",
 793                              dm_device_name(t->md), tgt->type->name);
 794                        return -EINVAL;
 795                }
 796                t->immutable_target_type = tgt->type;
 797        }
 798
 799        tgt->table = t;
 800        tgt->begin = start;
 801        tgt->len = len;
 802        tgt->error = "Unknown error";
 803
 804        /*
 805         * Does this target adjoin the previous one ?
 806         */
 807        if (!adjoin(t, tgt)) {
 808                tgt->error = "Gap in table";
 809                r = -EINVAL;
 810                goto bad;
 811        }
 812
 813        r = dm_split_args(&argc, &argv, params);
 814        if (r) {
 815                tgt->error = "couldn't split parameters (insufficient memory)";
 816                goto bad;
 817        }
 818
 819        r = tgt->type->ctr(tgt, argc, argv);
 820        kfree(argv);
 821        if (r)
 822                goto bad;
 823
 824        t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
 825
 826        if (!tgt->num_discard_requests && tgt->discards_supported)
 827                DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
 828                       dm_device_name(t->md), type);
 829
 830        return 0;
 831
 832 bad:
 833        DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error);
 834        dm_put_target_type(tgt->type);
 835        return r;
 836}
 837
 838/*
 839 * Target argument parsing helpers.
 840 */
 841static int validate_next_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 842                             unsigned *value, char **error, unsigned grouped)
 843{
 844        const char *arg_str = dm_shift_arg(arg_set);
 845        char dummy;
 846
 847        if (!arg_str ||
 848            (sscanf(arg_str, "%u%c", value, &dummy) != 1) ||
 849            (*value < arg->min) ||
 850            (*value > arg->max) ||
 851            (grouped && arg_set->argc < *value)) {
 852                *error = arg->error;
 853                return -EINVAL;
 854        }
 855
 856        return 0;
 857}
 858
 859int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
 860                unsigned *value, char **error)
 861{
 862        return validate_next_arg(arg, arg_set, value, error, 0);
 863}
 864EXPORT_SYMBOL(dm_read_arg);
 865
 866int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
 867                      unsigned *value, char **error)
 868{
 869        return validate_next_arg(arg, arg_set, value, error, 1);
 870}
 871EXPORT_SYMBOL(dm_read_arg_group);
 872
 873const char *dm_shift_arg(struct dm_arg_set *as)
 874{
 875        char *r;
 876
 877        if (as->argc) {
 878                as->argc--;
 879                r = *as->argv;
 880                as->argv++;
 881                return r;
 882        }
 883
 884        return NULL;
 885}
 886EXPORT_SYMBOL(dm_shift_arg);
 887
 888void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 889{
 890        BUG_ON(as->argc < num_args);
 891        as->argc -= num_args;
 892        as->argv += num_args;
 893}
 894EXPORT_SYMBOL(dm_consume_args);
 895
 896static int dm_table_set_type(struct dm_table *t)
 897{
 898        unsigned i;
 899        unsigned bio_based = 0, request_based = 0;
 900        struct dm_target *tgt;
 901        struct dm_dev_internal *dd;
 902        struct list_head *devices;
 903
 904        for (i = 0; i < t->num_targets; i++) {
 905                tgt = t->targets + i;
 906                if (dm_target_request_based(tgt))
 907                        request_based = 1;
 908                else
 909                        bio_based = 1;
 910
 911                if (bio_based && request_based) {
 912                        DMWARN("Inconsistent table: different target types"
 913                               " can't be mixed up");
 914                        return -EINVAL;
 915                }
 916        }
 917
 918        if (bio_based) {
 919                /* We must use this table as bio-based */
 920                t->type = DM_TYPE_BIO_BASED;
 921                return 0;
 922        }
 923
 924        BUG_ON(!request_based); /* No targets in this table */
 925
 926        /* Non-request-stackable devices can't be used for request-based dm */
 927        devices = dm_table_get_devices(t);
 928        list_for_each_entry(dd, devices, list) {
 929                if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev.bdev))) {
 930                        DMWARN("table load rejected: including"
 931                               " non-request-stackable devices");
 932                        return -EINVAL;
 933                }
 934        }
 935
 936        /*
 937         * Request-based dm supports only tables that have a single target now.
 938         * To support multiple targets, request splitting support is needed,
 939         * and that needs lots of changes in the block-layer.
 940         * (e.g. request completion process for partial completion.)
 941         */
 942        if (t->num_targets > 1) {
 943                DMWARN("Request-based dm doesn't support multiple targets yet");
 944                return -EINVAL;
 945        }
 946
 947        t->type = DM_TYPE_REQUEST_BASED;
 948
 949        return 0;
 950}
 951
 952unsigned dm_table_get_type(struct dm_table *t)
 953{
 954        return t->type;
 955}
 956
 957struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 958{
 959        return t->immutable_target_type;
 960}
 961
 962bool dm_table_request_based(struct dm_table *t)
 963{
 964        return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED;
 965}
 966
 967int dm_table_alloc_md_mempools(struct dm_table *t)
 968{
 969        unsigned type = dm_table_get_type(t);
 970
 971        if (unlikely(type == DM_TYPE_NONE)) {
 972                DMWARN("no table type is set, can't allocate mempools");
 973                return -EINVAL;
 974        }
 975
 976        t->mempools = dm_alloc_md_mempools(type, t->integrity_supported);
 977        if (!t->mempools)
 978                return -ENOMEM;
 979
 980        return 0;
 981}
 982
 983void dm_table_free_md_mempools(struct dm_table *t)
 984{
 985        dm_free_md_mempools(t->mempools);
 986        t->mempools = NULL;
 987}
 988
 989struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t)
 990{
 991        return t->mempools;
 992}
 993
 994static int setup_indexes(struct dm_table *t)
 995{
 996        int i;
 997        unsigned int total = 0;
 998        sector_t *indexes;
 999
1000        /* allocate the space for *all* the indexes */
1001        for (i = t->depth - 2; i >= 0; i--) {
1002                t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE);
1003                total += t->counts[i];
1004        }
1005
1006        indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE);
1007        if (!indexes)
1008                return -ENOMEM;
1009
1010        /* set up internal nodes, bottom-up */
1011        for (i = t->depth - 2; i >= 0; i--) {
1012                t->index[i] = indexes;
1013                indexes += (KEYS_PER_NODE * t->counts[i]);
1014                setup_btree_index(i, t);
1015        }
1016
1017        return 0;
1018}
1019
1020/*
1021 * Builds the btree to index the map.
1022 */
1023static int dm_table_build_index(struct dm_table *t)
1024{
1025        int r = 0;
1026        unsigned int leaf_nodes;
1027
1028        /* how many indexes will the btree have ? */
1029        leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE);
1030        t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
1031
1032        /* leaf layer has already been set up */
1033        t->counts[t->depth - 1] = leaf_nodes;
1034        t->index[t->depth - 1] = t->highs;
1035
1036        if (t->depth >= 2)
1037                r = setup_indexes(t);
1038
1039        return r;
1040}
1041
1042/*
1043 * Get a disk whose integrity profile reflects the table's profile.
1044 * If %match_all is true, all devices' profiles must match.
1045 * If %match_all is false, all devices must at least have an
1046 * allocated integrity profile; but uninitialized is ok.
1047 * Returns NULL if integrity support was inconsistent or unavailable.
1048 */
1049static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
1050                                                    bool match_all)
1051{
1052        struct list_head *devices = dm_table_get_devices(t);
1053        struct dm_dev_internal *dd = NULL;
1054        struct gendisk *prev_disk = NULL, *template_disk = NULL;
1055
1056        list_for_each_entry(dd, devices, list) {
1057                template_disk = dd->dm_dev.bdev->bd_disk;
1058                if (!blk_get_integrity(template_disk))
1059                        goto no_integrity;
1060                if (!match_all && !blk_integrity_is_initialized(template_disk))
1061                        continue; /* skip uninitialized profiles */
1062                else if (prev_disk &&
1063                         blk_integrity_compare(prev_disk, template_disk) < 0)
1064                        goto no_integrity;
1065                prev_disk = template_disk;
1066        }
1067
1068        return template_disk;
1069
1070no_integrity:
1071        if (prev_disk)
1072                DMWARN("%s: integrity not set: %s and %s profile mismatch",
1073                       dm_device_name(t->md),
1074                       prev_disk->disk_name,
1075                       template_disk->disk_name);
1076        return NULL;
1077}
1078
1079/*
1080 * Register the mapped device for blk_integrity support if
1081 * the underlying devices have an integrity profile.  But all devices
1082 * may not have matching profiles (checking all devices isn't reliable
1083 * during table load because this table may use other DM device(s) which
1084 * must be resumed before they will have an initialized integity profile).
1085 * Stacked DM devices force a 2 stage integrity profile validation:
1086 * 1 - during load, validate all initialized integrity profiles match
1087 * 2 - during resume, validate all integrity profiles match
1088 */
1089static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
1090{
1091        struct gendisk *template_disk = NULL;
1092
1093        template_disk = dm_table_get_integrity_disk(t, false);
1094        if (!template_disk)
1095                return 0;
1096
1097        if (!blk_integrity_is_initialized(dm_disk(md))) {
1098                t->integrity_supported = 1;
1099                return blk_integrity_register(dm_disk(md), NULL);
1100        }
1101
1102        /*
1103         * If DM device already has an initalized integrity
1104         * profile the new profile should not conflict.
1105         */
1106        if (blk_integrity_is_initialized(template_disk) &&
1107            blk_integrity_compare(dm_disk(md), template_disk) < 0) {
1108                DMWARN("%s: conflict with existing integrity profile: "
1109                       "%s profile mismatch",
1110                       dm_device_name(t->md),
1111                       template_disk->disk_name);
1112                return 1;
1113        }
1114
1115        /* Preserve existing initialized integrity profile */
1116        t->integrity_supported = 1;
1117        return 0;
1118}
1119
1120/*
1121 * Prepares the table for use by building the indices,
1122 * setting the type, and allocating mempools.
1123 */
1124int dm_table_complete(struct dm_table *t)
1125{
1126        int r;
1127
1128        r = dm_table_set_type(t);
1129        if (r) {
1130                DMERR("unable to set table type");
1131                return r;
1132        }
1133
1134        r = dm_table_build_index(t);
1135        if (r) {
1136                DMERR("unable to build btrees");
1137                return r;
1138        }
1139
1140        r = dm_table_prealloc_integrity(t, t->md);
1141        if (r) {
1142                DMERR("could not register integrity profile.");
1143                return r;
1144        }
1145
1146        r = dm_table_alloc_md_mempools(t);
1147        if (r)
1148                DMERR("unable to allocate mempools");
1149
1150        return r;
1151}
1152
1153static DEFINE_MUTEX(_event_lock);
1154void dm_table_event_callback(struct dm_table *t,
1155                             void (*fn)(void *), void *context)
1156{
1157        mutex_lock(&_event_lock);
1158        t->event_fn = fn;
1159        t->event_context = context;
1160        mutex_unlock(&_event_lock);
1161}
1162
1163void dm_table_event(struct dm_table *t)
1164{
1165        /*
1166         * You can no longer call dm_table_event() from interrupt
1167         * context, use a bottom half instead.
1168         */
1169        BUG_ON(in_interrupt());
1170
1171        mutex_lock(&_event_lock);
1172        if (t->event_fn)
1173                t->event_fn(t->event_context);
1174        mutex_unlock(&_event_lock);
1175}
1176EXPORT_SYMBOL(dm_table_event);
1177
1178sector_t dm_table_get_size(struct dm_table *t)
1179{
1180        return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1181}
1182EXPORT_SYMBOL(dm_table_get_size);
1183
1184struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index)
1185{
1186        if (index >= t->num_targets)
1187                return NULL;
1188
1189        return t->targets + index;
1190}
1191
1192/*
1193 * Search the btree for the correct target.
1194 *
1195 * Caller should check returned pointer with dm_target_is_valid()
1196 * to trap I/O beyond end of device.
1197 */
1198struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1199{
1200        unsigned int l, n = 0, k = 0;
1201        sector_t *node;
1202
1203        for (l = 0; l < t->depth; l++) {
1204                n = get_child(n, k);
1205                node = get_node(t, l, n);
1206
1207                for (k = 0; k < KEYS_PER_NODE; k++)
1208                        if (node[k] >= sector)
1209                                break;
1210        }
1211
1212        return &t->targets[(KEYS_PER_NODE * n) + k];
1213}
1214
1215static int count_device(struct dm_target *ti, struct dm_dev *dev,
1216                        sector_t start, sector_t len, void *data)
1217{
1218        unsigned *num_devices = data;
1219
1220        (*num_devices)++;
1221
1222        return 0;
1223}
1224
1225/*
1226 * Check whether a table has no data devices attached using each
1227 * target's iterate_devices method.
1228 * Returns false if the result is unknown because a target doesn't
1229 * support iterate_devices.
1230 */
1231bool dm_table_has_no_data_devices(struct dm_table *table)
1232{
1233        struct dm_target *uninitialized_var(ti);
1234        unsigned i = 0, num_devices = 0;
1235
1236        while (i < dm_table_get_num_targets(table)) {
1237                ti = dm_table_get_target(table, i++);
1238
1239                if (!ti->type->iterate_devices)
1240                        return false;
1241
1242                ti->type->iterate_devices(ti, count_device, &num_devices);
1243                if (num_devices)
1244                        return false;
1245        }
1246
1247        return true;
1248}
1249
1250/*
1251 * Establish the new table's queue_limits and validate them.
1252 */
1253int dm_calculate_queue_limits(struct dm_table *table,
1254                              struct queue_limits *limits)
1255{
1256        struct dm_target *uninitialized_var(ti);
1257        struct queue_limits ti_limits;
1258        unsigned i = 0;
1259
1260        blk_set_stacking_limits(limits);
1261
1262        while (i < dm_table_get_num_targets(table)) {
1263                blk_set_stacking_limits(&ti_limits);
1264
1265                ti = dm_table_get_target(table, i++);
1266
1267                if (!ti->type->iterate_devices)
1268                        goto combine_limits;
1269
1270                /*
1271                 * Combine queue limits of all the devices this target uses.
1272                 */
1273                ti->type->iterate_devices(ti, dm_set_device_limits,
1274                                          &ti_limits);
1275
1276                /* Set I/O hints portion of queue limits */
1277                if (ti->type->io_hints)
1278                        ti->type->io_hints(ti, &ti_limits);
1279
1280                /*
1281                 * Check each device area is consistent with the target's
1282                 * overall queue limits.
1283                 */
1284                if (ti->type->iterate_devices(ti, device_area_is_invalid,
1285                                              &ti_limits))
1286                        return -EINVAL;
1287
1288combine_limits:
1289                /*
1290                 * Merge this target's queue limits into the overall limits
1291                 * for the table.
1292                 */
1293                if (blk_stack_limits(limits, &ti_limits, 0) < 0)
1294                        DMWARN("%s: adding target device "
1295                               "(start sect %llu len %llu) "
1296                               "caused an alignment inconsistency",
1297                               dm_device_name(table->md),
1298                               (unsigned long long) ti->begin,
1299                               (unsigned long long) ti->len);
1300        }
1301
1302        return validate_hardware_logical_block_alignment(table, limits);
1303}
1304
1305/*
1306 * Set the integrity profile for this device if all devices used have
1307 * matching profiles.  We're quite deep in the resume path but still
1308 * don't know if all devices (particularly DM devices this device
1309 * may be stacked on) have matching profiles.  Even if the profiles
1310 * don't match we have no way to fail (to resume) at this point.
1311 */
1312static void dm_table_set_integrity(struct dm_table *t)
1313{
1314        struct gendisk *template_disk = NULL;
1315
1316        if (!blk_get_integrity(dm_disk(t->md)))
1317                return;
1318
1319        template_disk = dm_table_get_integrity_disk(t, true);
1320        if (template_disk)
1321                blk_integrity_register(dm_disk(t->md),
1322                                       blk_get_integrity(template_disk));
1323        else if (blk_integrity_is_initialized(dm_disk(t->md)))
1324                DMWARN("%s: device no longer has a valid integrity profile",
1325                       dm_device_name(t->md));
1326        else
1327                DMWARN("%s: unable to establish an integrity profile",
1328                       dm_device_name(t->md));
1329}
1330
1331static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
1332                                sector_t start, sector_t len, void *data)
1333{
1334        unsigned flush = (*(unsigned *)data);
1335        struct request_queue *q = bdev_get_queue(dev->bdev);
1336
1337        return q && (q->flush_flags & flush);
1338}
1339
1340static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
1341{
1342        struct dm_target *ti;
1343        unsigned i = 0;
1344
1345        /*
1346         * Require at least one underlying device to support flushes.
1347         * t->devices includes internal dm devices such as mirror logs
1348         * so we need to use iterate_devices here, which targets
1349         * supporting flushes must provide.
1350         */
1351        while (i < dm_table_get_num_targets(t)) {
1352                ti = dm_table_get_target(t, i++);
1353
1354                if (!ti->num_flush_requests)
1355                        continue;
1356
1357                if (ti->flush_supported)
1358                        return 1;
1359
1360                if (ti->type->iterate_devices &&
1361                    ti->type->iterate_devices(ti, device_flush_capable, &flush))
1362                        return 1;
1363        }
1364
1365        return 0;
1366}
1367
1368static bool dm_table_discard_zeroes_data(struct dm_table *t)
1369{
1370        struct dm_target *ti;
1371        unsigned i = 0;
1372
1373        /* Ensure that all targets supports discard_zeroes_data. */
1374        while (i < dm_table_get_num_targets(t)) {
1375                ti = dm_table_get_target(t, i++);
1376
1377                if (ti->discard_zeroes_data_unsupported)
1378                        return 0;
1379        }
1380
1381        return 1;
1382}
1383
1384static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
1385                            sector_t start, sector_t len, void *data)
1386{
1387        struct request_queue *q = bdev_get_queue(dev->bdev);
1388
1389        return q && blk_queue_nonrot(q);
1390}
1391
1392static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
1393                             sector_t start, sector_t len, void *data)
1394{
1395        struct request_queue *q = bdev_get_queue(dev->bdev);
1396
1397        return q && !blk_queue_add_random(q);
1398}
1399
1400static bool dm_table_all_devices_attribute(struct dm_table *t,
1401                                           iterate_devices_callout_fn func)
1402{
1403        struct dm_target *ti;
1404        unsigned i = 0;
1405
1406        while (i < dm_table_get_num_targets(t)) {
1407                ti = dm_table_get_target(t, i++);
1408
1409                if (!ti->type->iterate_devices ||
1410                    !ti->type->iterate_devices(ti, func, NULL))
1411                        return 0;
1412        }
1413
1414        return 1;
1415}
1416
1417void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1418                               struct queue_limits *limits)
1419{
1420        unsigned flush = 0;
1421
1422        /*
1423         * Copy table's limits to the DM device's request_queue
1424         */
1425        q->limits = *limits;
1426
1427        if (!dm_table_supports_discards(t))
1428                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
1429        else
1430                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
1431
1432        if (dm_table_supports_flush(t, REQ_FLUSH)) {
1433                flush |= REQ_FLUSH;
1434                if (dm_table_supports_flush(t, REQ_FUA))
1435                        flush |= REQ_FUA;
1436        }
1437        blk_queue_flush(q, flush);
1438
1439        if (!dm_table_discard_zeroes_data(t))
1440                q->limits.discard_zeroes_data = 0;
1441
1442        /* Ensure that all underlying devices are non-rotational. */
1443        if (dm_table_all_devices_attribute(t, device_is_nonrot))
1444                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
1445        else
1446                queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
1447
1448        q->limits.max_write_same_sectors = 0;
1449
1450        dm_table_set_integrity(t);
1451
1452        /*
1453         * Determine whether or not this queue's I/O timings contribute
1454         * to the entropy pool, Only request-based targets use this.
1455         * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
1456         * have it set.
1457         */
1458        if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
1459                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
1460
1461        /*
1462         * QUEUE_FLAG_STACKABLE must be set after all queue settings are
1463         * visible to other CPUs because, once the flag is set, incoming bios
1464         * are processed by request-based dm, which refers to the queue
1465         * settings.
1466         * Until the flag set, bios are passed to bio-based dm and queued to
1467         * md->deferred where queue settings are not needed yet.
1468         * Those bios are passed to request-based dm at the resume time.
1469         */
1470        smp_mb();
1471        if (dm_table_request_based(t))
1472                queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
1473}
1474
1475unsigned int dm_table_get_num_targets(struct dm_table *t)
1476{
1477        return t->num_targets;
1478}
1479
1480struct list_head *dm_table_get_devices(struct dm_table *t)
1481{
1482        return &t->devices;
1483}
1484
1485fmode_t dm_table_get_mode(struct dm_table *t)
1486{
1487        return t->mode;
1488}
1489EXPORT_SYMBOL(dm_table_get_mode);
1490
1491static void suspend_targets(struct dm_table *t, unsigned postsuspend)
1492{
1493        int i = t->num_targets;
1494        struct dm_target *ti = t->targets;
1495
1496        while (i--) {
1497                if (postsuspend) {
1498                        if (ti->type->postsuspend)
1499                                ti->type->postsuspend(ti);
1500                } else if (ti->type->presuspend)
1501                        ti->type->presuspend(ti);
1502
1503                ti++;
1504        }
1505}
1506
1507void dm_table_presuspend_targets(struct dm_table *t)
1508{
1509        if (!t)
1510                return;
1511
1512        suspend_targets(t, 0);
1513}
1514
1515void dm_table_postsuspend_targets(struct dm_table *t)
1516{
1517        if (!t)
1518                return;
1519
1520        suspend_targets(t, 1);
1521}
1522
1523int dm_table_resume_targets(struct dm_table *t)
1524{
1525        int i, r = 0;
1526
1527        for (i = 0; i < t->num_targets; i++) {
1528                struct dm_target *ti = t->targets + i;
1529
1530                if (!ti->type->preresume)
1531                        continue;
1532
1533                r = ti->type->preresume(ti);
1534                if (r)
1535                        return r;
1536        }
1537
1538        for (i = 0; i < t->num_targets; i++) {
1539                struct dm_target *ti = t->targets + i;
1540
1541                if (ti->type->resume)
1542                        ti->type->resume(ti);
1543        }
1544
1545        return 0;
1546}
1547
1548void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb)
1549{
1550        list_add(&cb->list, &t->target_callbacks);
1551}
1552EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks);
1553
1554int dm_table_any_congested(struct dm_table *t, int bdi_bits)
1555{
1556        struct dm_dev_internal *dd;
1557        struct list_head *devices = dm_table_get_devices(t);
1558        struct dm_target_callbacks *cb;
1559        int r = 0;
1560
1561        list_for_each_entry(dd, devices, list) {
1562                struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
1563                char b[BDEVNAME_SIZE];
1564
1565                if (likely(q))
1566                        r |= bdi_congested(&q->backing_dev_info, bdi_bits);
1567                else
1568                        DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
1569                                     dm_device_name(t->md),
1570                                     bdevname(dd->dm_dev.bdev, b));
1571        }
1572
1573        list_for_each_entry(cb, &t->target_callbacks, list)
1574                if (cb->congested_fn)
1575                        r |= cb->congested_fn(cb, bdi_bits);
1576
1577        return r;
1578}
1579
1580int dm_table_any_busy_target(struct dm_table *t)
1581{
1582        unsigned i;
1583        struct dm_target *ti;
1584
1585        for (i = 0; i < t->num_targets; i++) {
1586                ti = t->targets + i;
1587                if (ti->type->busy && ti->type->busy(ti))
1588                        return 1;
1589        }
1590
1591        return 0;
1592}
1593
1594struct mapped_device *dm_table_get_md(struct dm_table *t)
1595{
1596        return t->md;
1597}
1598EXPORT_SYMBOL(dm_table_get_md);
1599
1600static int device_discard_capable(struct dm_target *ti, struct dm_dev *dev,
1601                                  sector_t start, sector_t len, void *data)
1602{
1603        struct request_queue *q = bdev_get_queue(dev->bdev);
1604
1605        return q && blk_queue_discard(q);
1606}
1607
1608bool dm_table_supports_discards(struct dm_table *t)
1609{
1610        struct dm_target *ti;
1611        unsigned i = 0;
1612
1613        /*
1614         * Unless any target used by the table set discards_supported,
1615         * require at least one underlying device to support discards.
1616         * t->devices includes internal dm devices such as mirror logs
1617         * so we need to use iterate_devices here, which targets
1618         * supporting discard selectively must provide.
1619         */
1620        while (i < dm_table_get_num_targets(t)) {
1621                ti = dm_table_get_target(t, i++);
1622
1623                if (!ti->num_discard_requests)
1624                        continue;
1625
1626                if (ti->discards_supported)
1627                        return 1;
1628
1629                if (ti->type->iterate_devices &&
1630                    ti->type->iterate_devices(ti, device_discard_capable, NULL))
1631                        return 1;
1632        }
1633
1634        return 0;
1635}
1636
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.