linux/include/linux/device-mapper.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Sistina Software (UK) Limited.
   3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
   4 *
   5 * This file is released under the LGPL.
   6 */
   7
   8#ifndef _LINUX_DEVICE_MAPPER_H
   9#define _LINUX_DEVICE_MAPPER_H
  10
  11#include <linux/bio.h>
  12#include <linux/blkdev.h>
  13
  14struct dm_target;
  15struct dm_table;
  16struct mapped_device;
  17struct bio_vec;
  18
  19typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
  20
  21union map_info {
  22        void *ptr;
  23        unsigned long long ll;
  24};
  25
  26/*
  27 * In the constructor the target parameter will already have the
  28 * table, type, begin and len fields filled in.
  29 */
  30typedef int (*dm_ctr_fn) (struct dm_target *target,
  31                          unsigned int argc, char **argv);
  32
  33/*
  34 * The destructor doesn't need to free the dm_target, just
  35 * anything hidden ti->private.
  36 */
  37typedef void (*dm_dtr_fn) (struct dm_target *ti);
  38
  39/*
  40 * The map function must return:
  41 * < 0: error
  42 * = 0: The target will handle the io by resubmitting it later
  43 * = 1: simple remap complete
  44 * = 2: The target wants to push back the io
  45 */
  46typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio,
  47                          union map_info *map_context);
  48
  49/*
  50 * Returns:
  51 * < 0 : error (currently ignored)
  52 * 0   : ended successfully
  53 * 1   : for some reason the io has still not completed (eg,
  54 *       multipath target might want to requeue a failed io).
  55 * 2   : The target wants to push back the io
  56 */
  57typedef int (*dm_endio_fn) (struct dm_target *ti,
  58                            struct bio *bio, int error,
  59                            union map_info *map_context);
  60
  61typedef void (*dm_flush_fn) (struct dm_target *ti);
  62typedef void (*dm_presuspend_fn) (struct dm_target *ti);
  63typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
  64typedef int (*dm_preresume_fn) (struct dm_target *ti);
  65typedef void (*dm_resume_fn) (struct dm_target *ti);
  66
  67typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
  68                             char *result, unsigned int maxlen);
  69
  70typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
  71
  72typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
  73                            unsigned long arg);
  74
  75typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
  76                            struct bio_vec *biovec, int max_size);
  77
  78void dm_error(const char *message);
  79
  80/*
  81 * Combine device limits.
  82 */
  83void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
  84
  85struct dm_dev {
  86        struct block_device *bdev;
  87        fmode_t mode;
  88        char name[16];
  89};
  90
  91/*
  92 * Constructors should call these functions to ensure destination devices
  93 * are opened/closed correctly.
  94 * FIXME: too many arguments.
  95 */
  96int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
  97                  sector_t len, fmode_t mode, struct dm_dev **result);
  98void dm_put_device(struct dm_target *ti, struct dm_dev *d);
  99
 100/*
 101 * Information about a target type
 102 */
 103struct target_type {
 104        const char *name;
 105        struct module *module;
 106        unsigned version[3];
 107        dm_ctr_fn ctr;
 108        dm_dtr_fn dtr;
 109        dm_map_fn map;
 110        dm_endio_fn end_io;
 111        dm_flush_fn flush;
 112        dm_presuspend_fn presuspend;
 113        dm_postsuspend_fn postsuspend;
 114        dm_preresume_fn preresume;
 115        dm_resume_fn resume;
 116        dm_status_fn status;
 117        dm_message_fn message;
 118        dm_ioctl_fn ioctl;
 119        dm_merge_fn merge;
 120};
 121
 122struct io_restrictions {
 123        unsigned long bounce_pfn;
 124        unsigned long seg_boundary_mask;
 125        unsigned max_hw_sectors;
 126        unsigned max_sectors;
 127        unsigned max_segment_size;
 128        unsigned short hardsect_size;
 129        unsigned short max_hw_segments;
 130        unsigned short max_phys_segments;
 131        unsigned char no_cluster; /* inverted so that 0 is default */
 132};
 133
 134struct dm_target {
 135        struct dm_table *table;
 136        struct target_type *type;
 137
 138        /* target limits */
 139        sector_t begin;
 140        sector_t len;
 141
 142        /* FIXME: turn this into a mask, and merge with io_restrictions */
 143        /* Always a power of 2 */
 144        sector_t split_io;
 145
 146        /*
 147         * These are automatically filled in by
 148         * dm_table_get_device.
 149         */
 150        struct io_restrictions limits;
 151
 152        /* target specific data */
 153        void *private;
 154
 155        /* Used to provide an error string from the ctr */
 156        char *error;
 157};
 158
 159int dm_register_target(struct target_type *t);
 160int dm_unregister_target(struct target_type *t);
 161
 162
 163/*-----------------------------------------------------------------
 164 * Functions for creating and manipulating mapped devices.
 165 * Drop the reference with dm_put when you finish with the object.
 166 *---------------------------------------------------------------*/
 167
 168/*
 169 * DM_ANY_MINOR chooses the next available minor number.
 170 */
 171#define DM_ANY_MINOR (-1)
 172int dm_create(int minor, struct mapped_device **md);
 173
 174/*
 175 * Reference counting for md.
 176 */
 177struct mapped_device *dm_get_md(dev_t dev);
 178void dm_get(struct mapped_device *md);
 179void dm_put(struct mapped_device *md);
 180
 181/*
 182 * An arbitrary pointer may be stored alongside a mapped device.
 183 */
 184void dm_set_mdptr(struct mapped_device *md, void *ptr);
 185void *dm_get_mdptr(struct mapped_device *md);
 186
 187/*
 188 * A device can still be used while suspended, but I/O is deferred.
 189 */
 190int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
 191int dm_resume(struct mapped_device *md);
 192
 193/*
 194 * Event functions.
 195 */
 196uint32_t dm_get_event_nr(struct mapped_device *md);
 197int dm_wait_event(struct mapped_device *md, int event_nr);
 198uint32_t dm_next_uevent_seq(struct mapped_device *md);
 199void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
 200
 201/*
 202 * Info functions.
 203 */
 204const char *dm_device_name(struct mapped_device *md);
 205int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
 206struct gendisk *dm_disk(struct mapped_device *md);
 207int dm_suspended(struct mapped_device *md);
 208int dm_noflush_suspending(struct dm_target *ti);
 209union map_info *dm_get_mapinfo(struct bio *bio);
 210
 211/*
 212 * Geometry functions.
 213 */
 214int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
 215int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
 216
 217
 218/*-----------------------------------------------------------------
 219 * Functions for manipulating device-mapper tables.
 220 *---------------------------------------------------------------*/
 221
 222/*
 223 * First create an empty table.
 224 */
 225int dm_table_create(struct dm_table **result, fmode_t mode,
 226                    unsigned num_targets, struct mapped_device *md);
 227
 228/*
 229 * Then call this once for each target.
 230 */
 231int dm_table_add_target(struct dm_table *t, const char *type,
 232                        sector_t start, sector_t len, char *params);
 233
 234/*
 235 * Finally call this to make the table ready for use.
 236 */
 237int dm_table_complete(struct dm_table *t);
 238
 239/*
 240 * Unplug all devices in a table.
 241 */
 242void dm_table_unplug_all(struct dm_table *t);
 243
 244/*
 245 * Table reference counting.
 246 */
 247struct dm_table *dm_get_table(struct mapped_device *md);
 248void dm_table_get(struct dm_table *t);
 249void dm_table_put(struct dm_table *t);
 250
 251/*
 252 * Queries
 253 */
 254sector_t dm_table_get_size(struct dm_table *t);
 255unsigned int dm_table_get_num_targets(struct dm_table *t);
 256fmode_t dm_table_get_mode(struct dm_table *t);
 257struct mapped_device *dm_table_get_md(struct dm_table *t);
 258
 259/*
 260 * Trigger an event.
 261 */
 262void dm_table_event(struct dm_table *t);
 263
 264/*
 265 * The device must be suspended before calling this method.
 266 */
 267int dm_swap_table(struct mapped_device *md, struct dm_table *t);
 268
 269/*
 270 * A wrapper around vmalloc.
 271 */
 272void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
 273
 274/*-----------------------------------------------------------------
 275 * Macros.
 276 *---------------------------------------------------------------*/
 277#define DM_NAME "device-mapper"
 278
 279#define DMERR(f, arg...) \
 280        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 281#define DMERR_LIMIT(f, arg...) \
 282        do { \
 283                if (printk_ratelimit()) \
 284                        printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
 285                               f "\n", ## arg); \
 286        } while (0)
 287
 288#define DMWARN(f, arg...) \
 289        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 290#define DMWARN_LIMIT(f, arg...) \
 291        do { \
 292                if (printk_ratelimit()) \
 293                        printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
 294                               f "\n", ## arg); \
 295        } while (0)
 296
 297#define DMINFO(f, arg...) \
 298        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
 299#define DMINFO_LIMIT(f, arg...) \
 300        do { \
 301                if (printk_ratelimit()) \
 302                        printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
 303                               "\n", ## arg); \
 304        } while (0)
 305
 306#ifdef CONFIG_DM_DEBUG
 307#  define DMDEBUG(f, arg...) \
 308        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
 309#  define DMDEBUG_LIMIT(f, arg...) \
 310        do { \
 311                if (printk_ratelimit()) \
 312                        printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
 313                               "\n", ## arg); \
 314        } while (0)
 315#else
 316#  define DMDEBUG(f, arg...) do {} while (0)
 317#  define DMDEBUG_LIMIT(f, arg...) do {} while (0)
 318#endif
 319
 320#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
 321                          0 : scnprintf(result + sz, maxlen - sz, x))
 322
 323#define SECTOR_SHIFT 9
 324
 325/*
 326 * Definitions of return values from target end_io function.
 327 */
 328#define DM_ENDIO_INCOMPLETE     1
 329#define DM_ENDIO_REQUEUE        2
 330
 331/*
 332 * Definitions of return values from target map function.
 333 */
 334#define DM_MAPIO_SUBMITTED      0
 335#define DM_MAPIO_REMAPPED       1
 336#define DM_MAPIO_REQUEUE        DM_ENDIO_REQUEUE
 337
 338/*
 339 * Ceiling(n / sz)
 340 */
 341#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
 342
 343#define dm_sector_div_up(n, sz) ( \
 344{ \
 345        sector_t _r = ((n) + (sz) - 1); \
 346        sector_div(_r, (sz)); \
 347        _r; \
 348} \
 349)
 350
 351/*
 352 * ceiling(n / size) * size
 353 */
 354#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
 355
 356#define dm_array_too_big(fixed, obj, num) \
 357        ((num) > (UINT_MAX - (fixed)) / (obj))
 358
 359static inline sector_t to_sector(unsigned long n)
 360{
 361        return (n >> SECTOR_SHIFT);
 362}
 363
 364static inline unsigned long to_bytes(sector_t n)
 365{
 366        return (n << SECTOR_SHIFT);
 367}
 368
 369#endif  /* _LINUX_DEVICE_MAPPER_H */
 370
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.