linux/include/linux/bio.h
<<
>>
Prefs
   1/*
   2 * 2.5 block I/O model
   3 *
   4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public Licens
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  19 */
  20#ifndef __LINUX_BIO_H
  21#define __LINUX_BIO_H
  22
  23#include <linux/highmem.h>
  24#include <linux/mempool.h>
  25#include <linux/ioprio.h>
  26#include <linux/bug.h>
  27
  28#ifdef CONFIG_BLOCK
  29
  30#include <asm/io.h>
  31
  32/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
  33#include <linux/blk_types.h>
  34
  35#define BIO_DEBUG
  36
  37#ifdef BIO_DEBUG
  38#define BIO_BUG_ON      BUG_ON
  39#else
  40#define BIO_BUG_ON
  41#endif
  42
  43#define BIO_MAX_PAGES           256
  44#define BIO_MAX_SIZE            (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
  45#define BIO_MAX_SECTORS         (BIO_MAX_SIZE >> 9)
  46
  47/*
  48 * upper 16 bits of bi_rw define the io priority of this bio
  49 */
  50#define BIO_PRIO_SHIFT  (8 * sizeof(unsigned long) - IOPRIO_BITS)
  51#define bio_prio(bio)   ((bio)->bi_rw >> BIO_PRIO_SHIFT)
  52#define bio_prio_valid(bio)     ioprio_valid(bio_prio(bio))
  53
  54#define bio_set_prio(bio, prio)         do {                    \
  55        WARN_ON(prio >= (1 << IOPRIO_BITS));                    \
  56        (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1);          \
  57        (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT);     \
  58} while (0)
  59
  60/*
  61 * various member access, note that bio_data should of course not be used
  62 * on highmem page vectors
  63 */
  64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
  65#define bio_iovec(bio)          bio_iovec_idx((bio), (bio)->bi_idx)
  66#define bio_page(bio)           bio_iovec((bio))->bv_page
  67#define bio_offset(bio)         bio_iovec((bio))->bv_offset
  68#define bio_segments(bio)       ((bio)->bi_vcnt - (bio)->bi_idx)
  69#define bio_sectors(bio)        ((bio)->bi_size >> 9)
  70
  71static inline unsigned int bio_cur_bytes(struct bio *bio)
  72{
  73        if (bio->bi_vcnt)
  74                return bio_iovec(bio)->bv_len;
  75        else /* dataless requests such as discard */
  76                return bio->bi_size;
  77}
  78
  79static inline void *bio_data(struct bio *bio)
  80{
  81        if (bio->bi_vcnt)
  82                return page_address(bio_page(bio)) + bio_offset(bio);
  83
  84        return NULL;
  85}
  86
  87static inline int bio_has_allocated_vec(struct bio *bio)
  88{
  89        return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
  90}
  91
  92/*
  93 * will die
  94 */
  95#define bio_to_phys(bio)        (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
  96#define bvec_to_phys(bv)        (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
  97
  98/*
  99 * queues that have highmem support enabled may still need to revert to
 100 * PIO transfers occasionally and thus map high pages temporarily. For
 101 * permanent PIO fall back, user is probably better off disabling highmem
 102 * I/O completely on that queue (see ide-dma for example)
 103 */
 104#define __bio_kmap_atomic(bio, idx, kmtype)                             \
 105        (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) +    \
 106                bio_iovec_idx((bio), (idx))->bv_offset)
 107
 108#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
 109
 110/*
 111 * merge helpers etc
 112 */
 113
 114#define __BVEC_END(bio)         bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
 115#define __BVEC_START(bio)       bio_iovec_idx((bio), (bio)->bi_idx)
 116
 117/* Default implementation of BIOVEC_PHYS_MERGEABLE */
 118#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2)     \
 119        ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
 120
 121/*
 122 * allow arch override, for eg virtualized architectures (put in asm/io.h)
 123 */
 124#ifndef BIOVEC_PHYS_MERGEABLE
 125#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)       \
 126        __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
 127#endif
 128
 129#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
 130        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 131#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
 132        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
 133#define BIO_SEG_BOUNDARY(q, b1, b2) \
 134        BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 135
 136#define bio_io_error(bio) bio_endio((bio), -EIO)
 137
 138/*
 139 * drivers should not use the __ version unless they _really_ want to
 140 * run through the entire bio and not just pending pieces
 141 */
 142#define __bio_for_each_segment(bvl, bio, i, start_idx)                  \
 143        for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
 144             i < (bio)->bi_vcnt;                                        \
 145             bvl++, i++)
 146
 147#define bio_for_each_segment(bvl, bio, i)                               \
 148        __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
 149
 150/*
 151 * get a reference to a bio, so it won't disappear. the intended use is
 152 * something like:
 153 *
 154 * bio_get(bio);
 155 * submit_bio(rw, bio);
 156 * if (bio->bi_flags ...)
 157 *      do_something
 158 * bio_put(bio);
 159 *
 160 * without the bio_get(), it could potentially complete I/O before submit_bio
 161 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
 162 * runs
 163 */
 164#define bio_get(bio)    atomic_inc(&(bio)->bi_cnt)
 165
 166#if defined(CONFIG_BLK_DEV_INTEGRITY)
 167/*
 168 * bio integrity payload
 169 */
 170struct bio_integrity_payload {
 171        struct bio              *bip_bio;       /* parent bio */
 172
 173        sector_t                bip_sector;     /* virtual start sector */
 174
 175        void                    *bip_buf;       /* generated integrity data */
 176        bio_end_io_t            *bip_end_io;    /* saved I/O completion fn */
 177
 178        unsigned int            bip_size;
 179
 180        unsigned short          bip_slab;       /* slab the bip came from */
 181        unsigned short          bip_vcnt;       /* # of integrity bio_vecs */
 182        unsigned short          bip_idx;        /* current bip_vec index */
 183
 184        struct work_struct      bip_work;       /* I/O completion */
 185        struct bio_vec          bip_vec[0];     /* embedded bvec array */
 186};
 187#endif /* CONFIG_BLK_DEV_INTEGRITY */
 188
 189/*
 190 * A bio_pair is used when we need to split a bio.
 191 * This can only happen for a bio that refers to just one
 192 * page of data, and in the unusual situation when the
 193 * page crosses a chunk/device boundary
 194 *
 195 * The address of the master bio is stored in bio1.bi_private
 196 * The address of the pool the pair was allocated from is stored
 197 *   in bio2.bi_private
 198 */
 199struct bio_pair {
 200        struct bio                      bio1, bio2;
 201        struct bio_vec                  bv1, bv2;
 202#if defined(CONFIG_BLK_DEV_INTEGRITY)
 203        struct bio_integrity_payload    bip1, bip2;
 204        struct bio_vec                  iv1, iv2;
 205#endif
 206        atomic_t                        cnt;
 207        int                             error;
 208};
 209extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
 210extern void bio_pair_release(struct bio_pair *dbio);
 211
 212extern struct bio_set *bioset_create(unsigned int, unsigned int);
 213extern void bioset_free(struct bio_set *);
 214
 215extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
 216extern void bio_put(struct bio *);
 217
 218extern void __bio_clone(struct bio *, struct bio *);
 219extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
 220
 221extern struct bio_set *fs_bio_set;
 222
 223static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 224{
 225        return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
 226}
 227
 228static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
 229{
 230        return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
 231}
 232
 233static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
 234{
 235        return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
 236}
 237
 238static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
 239{
 240        return bio_clone_bioset(bio, gfp_mask, NULL);
 241
 242}
 243
 244extern void bio_endio(struct bio *, int);
 245struct request_queue;
 246extern int bio_phys_segments(struct request_queue *, struct bio *);
 247
 248extern void bio_init(struct bio *);
 249extern void bio_reset(struct bio *);
 250
 251extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
 252extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
 253                           unsigned int, unsigned int);
 254extern int bio_get_nr_vecs(struct block_device *);
 255extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
 256extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
 257                                unsigned long, unsigned int, int, gfp_t);
 258struct sg_iovec;
 259struct rq_map_data;
 260extern struct bio *bio_map_user_iov(struct request_queue *,
 261                                    struct block_device *,
 262                                    struct sg_iovec *, int, int, gfp_t);
 263extern void bio_unmap_user(struct bio *);
 264extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
 265                                gfp_t);
 266extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
 267                                 gfp_t, int);
 268extern void bio_set_pages_dirty(struct bio *bio);
 269extern void bio_check_pages_dirty(struct bio *bio);
 270
 271#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 272# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
 273#endif
 274#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 275extern void bio_flush_dcache_pages(struct bio *bi);
 276#else
 277static inline void bio_flush_dcache_pages(struct bio *bi)
 278{
 279}
 280#endif
 281
 282extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
 283                                 unsigned long, unsigned int, int, gfp_t);
 284extern struct bio *bio_copy_user_iov(struct request_queue *,
 285                                     struct rq_map_data *, struct sg_iovec *,
 286                                     int, int, gfp_t);
 287extern int bio_uncopy_user(struct bio *);
 288void zero_fill_bio(struct bio *bio);
 289extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
 290extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
 291extern unsigned int bvec_nr_vecs(unsigned short idx);
 292
 293#ifdef CONFIG_BLK_CGROUP
 294int bio_associate_current(struct bio *bio);
 295void bio_disassociate_task(struct bio *bio);
 296#else   /* CONFIG_BLK_CGROUP */
 297static inline int bio_associate_current(struct bio *bio) { return -ENOENT; }
 298static inline void bio_disassociate_task(struct bio *bio) { }
 299#endif  /* CONFIG_BLK_CGROUP */
 300
 301/*
 302 * bio_set is used to allow other portions of the IO system to
 303 * allocate their own private memory pools for bio and iovec structures.
 304 * These memory pools in turn all allocate from the bio_slab
 305 * and the bvec_slabs[].
 306 */
 307#define BIO_POOL_SIZE 2
 308#define BIOVEC_NR_POOLS 6
 309#define BIOVEC_MAX_IDX  (BIOVEC_NR_POOLS - 1)
 310
 311struct bio_set {
 312        struct kmem_cache *bio_slab;
 313        unsigned int front_pad;
 314
 315        mempool_t *bio_pool;
 316#if defined(CONFIG_BLK_DEV_INTEGRITY)
 317        mempool_t *bio_integrity_pool;
 318#endif
 319        mempool_t *bvec_pool;
 320};
 321
 322struct biovec_slab {
 323        int nr_vecs;
 324        char *name;
 325        struct kmem_cache *slab;
 326};
 327
 328/*
 329 * a small number of entries is fine, not going to be performance critical.
 330 * basically we just need to survive
 331 */
 332#define BIO_SPLIT_ENTRIES 2
 333
 334#ifdef CONFIG_HIGHMEM
 335/*
 336 * remember never ever reenable interrupts between a bvec_kmap_irq and
 337 * bvec_kunmap_irq!
 338 */
 339static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 340{
 341        unsigned long addr;
 342
 343        /*
 344         * might not be a highmem page, but the preempt/irq count
 345         * balancing is a lot nicer this way
 346         */
 347        local_irq_save(*flags);
 348        addr = (unsigned long) kmap_atomic(bvec->bv_page);
 349
 350        BUG_ON(addr & ~PAGE_MASK);
 351
 352        return (char *) addr + bvec->bv_offset;
 353}
 354
 355static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 356{
 357        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
 358
 359        kunmap_atomic((void *) ptr);
 360        local_irq_restore(*flags);
 361}
 362
 363#else
 364static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
 365{
 366        return page_address(bvec->bv_page) + bvec->bv_offset;
 367}
 368
 369static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
 370{
 371        *flags = 0;
 372}
 373#endif
 374
 375static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
 376                                   unsigned long *flags)
 377{
 378        return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
 379}
 380#define __bio_kunmap_irq(buf, flags)    bvec_kunmap_irq(buf, flags)
 381
 382#define bio_kmap_irq(bio, flags) \
 383        __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
 384#define bio_kunmap_irq(buf,flags)       __bio_kunmap_irq(buf, flags)
 385
 386/*
 387 * Check whether this bio carries any data or not. A NULL bio is allowed.
 388 */
 389static inline bool bio_has_data(struct bio *bio)
 390{
 391        if (bio && bio->bi_vcnt)
 392                return true;
 393
 394        return false;
 395}
 396
 397static inline bool bio_is_rw(struct bio *bio)
 398{
 399        if (!bio_has_data(bio))
 400                return false;
 401
 402        if (bio->bi_rw & REQ_WRITE_SAME)
 403                return false;
 404
 405        return true;
 406}
 407
 408static inline bool bio_mergeable(struct bio *bio)
 409{
 410        if (bio->bi_rw & REQ_NOMERGE_FLAGS)
 411                return false;
 412
 413        return true;
 414}
 415
 416/*
 417 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
 418 *
 419 * A bio_list anchors a singly-linked list of bios chained through the bi_next
 420 * member of the bio.  The bio_list also caches the last list member to allow
 421 * fast access to the tail.
 422 */
 423struct bio_list {
 424        struct bio *head;
 425        struct bio *tail;
 426};
 427
 428static inline int bio_list_empty(const struct bio_list *bl)
 429{
 430        return bl->head == NULL;
 431}
 432
 433static inline void bio_list_init(struct bio_list *bl)
 434{
 435        bl->head = bl->tail = NULL;
 436}
 437
 438#define bio_list_for_each(bio, bl) \
 439        for (bio = (bl)->head; bio; bio = bio->bi_next)
 440
 441static inline unsigned bio_list_size(const struct bio_list *bl)
 442{
 443        unsigned sz = 0;
 444        struct bio *bio;
 445
 446        bio_list_for_each(bio, bl)
 447                sz++;
 448
 449        return sz;
 450}
 451
 452static inline void bio_list_add(struct bio_list *bl, struct bio *bio)
 453{
 454        bio->bi_next = NULL;
 455
 456        if (bl->tail)
 457                bl->tail->bi_next = bio;
 458        else
 459                bl->head = bio;
 460
 461        bl->tail = bio;
 462}
 463
 464static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio)
 465{
 466        bio->bi_next = bl->head;
 467
 468        bl->head = bio;
 469
 470        if (!bl->tail)
 471                bl->tail = bio;
 472}
 473
 474static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2)
 475{
 476        if (!bl2->head)
 477                return;
 478
 479        if (bl->tail)
 480                bl->tail->bi_next = bl2->head;
 481        else
 482                bl->head = bl2->head;
 483
 484        bl->tail = bl2->tail;
 485}
 486
 487static inline void bio_list_merge_head(struct bio_list *bl,
 488                                       struct bio_list *bl2)
 489{
 490        if (!bl2->head)
 491                return;
 492
 493        if (bl->head)
 494                bl2->tail->bi_next = bl->head;
 495        else
 496                bl->tail = bl2->tail;
 497
 498        bl->head = bl2->head;
 499}
 500
 501static inline struct bio *bio_list_peek(struct bio_list *bl)
 502{
 503        return bl->head;
 504}
 505
 506static inline struct bio *bio_list_pop(struct bio_list *bl)
 507{
 508        struct bio *bio = bl->head;
 509
 510        if (bio) {
 511                bl->head = bl->head->bi_next;
 512                if (!bl->head)
 513                        bl->tail = NULL;
 514
 515                bio->bi_next = NULL;
 516        }
 517
 518        return bio;
 519}
 520
 521static inline struct bio *bio_list_get(struct bio_list *bl)
 522{
 523        struct bio *bio = bl->head;
 524
 525        bl->head = bl->tail = NULL;
 526
 527        return bio;
 528}
 529
 530#if defined(CONFIG_BLK_DEV_INTEGRITY)
 531
 532#define bip_vec_idx(bip, idx)   (&(bip->bip_vec[(idx)]))
 533#define bip_vec(bip)            bip_vec_idx(bip, 0)
 534
 535#define __bip_for_each_vec(bvl, bip, i, start_idx)                      \
 536        for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx);    \
 537             i < (bip)->bip_vcnt;                                       \
 538             bvl++, i++)
 539
 540#define bip_for_each_vec(bvl, bip, i)                                   \
 541        __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
 542
 543#define bio_for_each_integrity_vec(_bvl, _bio, _iter)                   \
 544        for_each_bio(_bio)                                              \
 545                bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
 546
 547#define bio_integrity(bio) (bio->bi_integrity != NULL)
 548
 549extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
 550extern void bio_integrity_free(struct bio *);
 551extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
 552extern int bio_integrity_enabled(struct bio *bio);
 553extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
 554extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
 555extern int bio_integrity_prep(struct bio *);
 556extern void bio_integrity_endio(struct bio *, int);
 557extern void bio_integrity_advance(struct bio *, unsigned int);
 558extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
 559extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
 560extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
 561extern int bioset_integrity_create(struct bio_set *, int);
 562extern void bioset_integrity_free(struct bio_set *);
 563extern void bio_integrity_init(void);
 564
 565#else /* CONFIG_BLK_DEV_INTEGRITY */
 566
 567static inline int bio_integrity(struct bio *bio)
 568{
 569        return 0;
 570}
 571
 572static inline int bio_integrity_enabled(struct bio *bio)
 573{
 574        return 0;
 575}
 576
 577static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
 578{
 579        return 0;
 580}
 581
 582static inline void bioset_integrity_free (struct bio_set *bs)
 583{
 584        return;
 585}
 586
 587static inline int bio_integrity_prep(struct bio *bio)
 588{
 589        return 0;
 590}
 591
 592static inline void bio_integrity_free(struct bio *bio)
 593{
 594        return;
 595}
 596
 597static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
 598                                      gfp_t gfp_mask)
 599{
 600        return 0;
 601}
 602
 603static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
 604                                       int sectors)
 605{
 606        return;
 607}
 608
 609static inline void bio_integrity_advance(struct bio *bio,
 610                                         unsigned int bytes_done)
 611{
 612        return;
 613}
 614
 615static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
 616                                      unsigned int sectors)
 617{
 618        return;
 619}
 620
 621static inline void bio_integrity_init(void)
 622{
 623        return;
 624}
 625
 626#endif /* CONFIG_BLK_DEV_INTEGRITY */
 627
 628#endif /* CONFIG_BLOCK */
 629#endif /* __LINUX_BIO_H */
 630
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.