linux/fs/bio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public Licens
  14 * along with this program; if not, write to the Free Software
  15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  16 *
  17 */
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/bio.h>
  21#include <linux/blkdev.h>
  22#include <linux/slab.h>
  23#include <linux/init.h>
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/mempool.h>
  27#include <linux/workqueue.h>
  28#include <linux/blktrace_api.h>
  29#include <scsi/sg.h>            /* for struct sg_iovec */
  30
  31static struct kmem_cache *bio_slab __read_mostly;
  32
  33static mempool_t *bio_split_pool __read_mostly;
  34
  35/*
  36 * if you change this list, also change bvec_alloc or things will
  37 * break badly! cannot be bigger than what you can fit into an
  38 * unsigned short
  39 */
  40
  41#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
  42static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
  43        BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
  44};
  45#undef BV
  46
  47/*
  48 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
  49 * IO code that does not need private memory pools.
  50 */
  51struct bio_set *fs_bio_set;
  52
  53unsigned int bvec_nr_vecs(unsigned short idx)
  54{
  55        return bvec_slabs[idx].nr_vecs;
  56}
  57
  58struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
  59{
  60        struct bio_vec *bvl;
  61
  62        /*
  63         * If 'bs' is given, lookup the pool and do the mempool alloc.
  64         * If not, this is a bio_kmalloc() allocation and just do a
  65         * kzalloc() for the exact number of vecs right away.
  66         */
  67        if (bs) {
  68                /*
  69                 * see comment near bvec_array define!
  70                 */
  71                switch (nr) {
  72                case 1:
  73                        *idx = 0;
  74                        break;
  75                case 2 ... 4:
  76                        *idx = 1;
  77                        break;
  78                case 5 ... 16:
  79                        *idx = 2;
  80                        break;
  81                case 17 ... 64:
  82                        *idx = 3;
  83                        break;
  84                case 65 ... 128:
  85                        *idx = 4;
  86                        break;
  87                case 129 ... BIO_MAX_PAGES:
  88                        *idx = 5;
  89                        break;
  90                default:
  91                        return NULL;
  92                }
  93
  94                /*
  95                 * idx now points to the pool we want to allocate from
  96                 */
  97                bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
  98                if (bvl)
  99                        memset(bvl, 0,
 100                                bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
 101        } else
 102                bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
 103
 104        return bvl;
 105}
 106
 107void bio_free(struct bio *bio, struct bio_set *bio_set)
 108{
 109        if (bio->bi_io_vec) {
 110                const int pool_idx = BIO_POOL_IDX(bio);
 111
 112                BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
 113
 114                mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
 115        }
 116
 117        if (bio_integrity(bio))
 118                bio_integrity_free(bio, bio_set);
 119
 120        mempool_free(bio, bio_set->bio_pool);
 121}
 122
 123/*
 124 * default destructor for a bio allocated with bio_alloc_bioset()
 125 */
 126static void bio_fs_destructor(struct bio *bio)
 127{
 128        bio_free(bio, fs_bio_set);
 129}
 130
 131static void bio_kmalloc_destructor(struct bio *bio)
 132{
 133        kfree(bio->bi_io_vec);
 134        kfree(bio);
 135}
 136
 137void bio_init(struct bio *bio)
 138{
 139        memset(bio, 0, sizeof(*bio));
 140        bio->bi_flags = 1 << BIO_UPTODATE;
 141        bio->bi_comp_cpu = -1;
 142        atomic_set(&bio->bi_cnt, 1);
 143}
 144
 145/**
 146 * bio_alloc_bioset - allocate a bio for I/O
 147 * @gfp_mask:   the GFP_ mask given to the slab allocator
 148 * @nr_iovecs:  number of iovecs to pre-allocate
 149 * @bs:         the bio_set to allocate from. If %NULL, just use kmalloc
 150 *
 151 * Description:
 152 *   bio_alloc_bioset will first try its own mempool to satisfy the allocation.
 153 *   If %__GFP_WAIT is set then we will block on the internal pool waiting
 154 *   for a &struct bio to become free. If a %NULL @bs is passed in, we will
 155 *   fall back to just using @kmalloc to allocate the required memory.
 156 *
 157 *   allocate bio and iovecs from the memory pools specified by the
 158 *   bio_set structure, or @kmalloc if none given.
 159 **/
 160struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
 161{
 162        struct bio *bio;
 163
 164        if (bs)
 165                bio = mempool_alloc(bs->bio_pool, gfp_mask);
 166        else
 167                bio = kmalloc(sizeof(*bio), gfp_mask);
 168
 169        if (likely(bio)) {
 170                struct bio_vec *bvl = NULL;
 171
 172                bio_init(bio);
 173                if (likely(nr_iovecs)) {
 174                        unsigned long uninitialized_var(idx);
 175
 176                        bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
 177                        if (unlikely(!bvl)) {
 178                                if (bs)
 179                                        mempool_free(bio, bs->bio_pool);
 180                                else
 181                                        kfree(bio);
 182                                bio = NULL;
 183                                goto out;
 184                        }
 185                        bio->bi_flags |= idx << BIO_POOL_OFFSET;
 186                        bio->bi_max_vecs = bvec_nr_vecs(idx);
 187                }
 188                bio->bi_io_vec = bvl;
 189        }
 190out:
 191        return bio;
 192}
 193
 194struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
 195{
 196        struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
 197
 198        if (bio)
 199                bio->bi_destructor = bio_fs_destructor;
 200
 201        return bio;
 202}
 203
 204/*
 205 * Like bio_alloc(), but doesn't use a mempool backing. This means that
 206 * it CAN fail, but while bio_alloc() can only be used for allocations
 207 * that have a short (finite) life span, bio_kmalloc() should be used
 208 * for more permanent bio allocations (like allocating some bio's for
 209 * initalization or setup purposes).
 210 */
 211struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
 212{
 213        struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
 214
 215        if (bio)
 216                bio->bi_destructor = bio_kmalloc_destructor;
 217
 218        return bio;
 219}
 220
 221void zero_fill_bio(struct bio *bio)
 222{
 223        unsigned long flags;
 224        struct bio_vec *bv;
 225        int i;
 226
 227        bio_for_each_segment(bv, bio, i) {
 228                char *data = bvec_kmap_irq(bv, &flags);
 229                memset(data, 0, bv->bv_len);
 230                flush_dcache_page(bv->bv_page);
 231                bvec_kunmap_irq(data, &flags);
 232        }
 233}
 234EXPORT_SYMBOL(zero_fill_bio);
 235
 236/**
 237 * bio_put - release a reference to a bio
 238 * @bio:   bio to release reference to
 239 *
 240 * Description:
 241 *   Put a reference to a &struct bio, either one you have gotten with
 242 *   bio_alloc or bio_get. The last put of a bio will free it.
 243 **/
 244void bio_put(struct bio *bio)
 245{
 246        BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
 247
 248        /*
 249         * last put frees it
 250         */
 251        if (atomic_dec_and_test(&bio->bi_cnt)) {
 252                bio->bi_next = NULL;
 253                bio->bi_destructor(bio);
 254        }
 255}
 256
 257inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
 258{
 259        if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
 260                blk_recount_segments(q, bio);
 261
 262        return bio->bi_phys_segments;
 263}
 264
 265/**
 266 *      __bio_clone     -       clone a bio
 267 *      @bio: destination bio
 268 *      @bio_src: bio to clone
 269 *
 270 *      Clone a &bio. Caller will own the returned bio, but not
 271 *      the actual data it points to. Reference count of returned
 272 *      bio will be one.
 273 */
 274void __bio_clone(struct bio *bio, struct bio *bio_src)
 275{
 276        memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
 277                bio_src->bi_max_vecs * sizeof(struct bio_vec));
 278
 279        /*
 280         * most users will be overriding ->bi_bdev with a new target,
 281         * so we don't set nor calculate new physical/hw segment counts here
 282         */
 283        bio->bi_sector = bio_src->bi_sector;
 284        bio->bi_bdev = bio_src->bi_bdev;
 285        bio->bi_flags |= 1 << BIO_CLONED;
 286        bio->bi_rw = bio_src->bi_rw;
 287        bio->bi_vcnt = bio_src->bi_vcnt;
 288        bio->bi_size = bio_src->bi_size;
 289        bio->bi_idx = bio_src->bi_idx;
 290}
 291
 292/**
 293 *      bio_clone       -       clone a bio
 294 *      @bio: bio to clone
 295 *      @gfp_mask: allocation priority
 296 *
 297 *      Like __bio_clone, only also allocates the returned bio
 298 */
 299struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
 300{
 301        struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
 302
 303        if (!b)
 304                return NULL;
 305
 306        b->bi_destructor = bio_fs_destructor;
 307        __bio_clone(b, bio);
 308
 309        if (bio_integrity(bio)) {
 310                int ret;
 311
 312                ret = bio_integrity_clone(b, bio, fs_bio_set);
 313
 314                if (ret < 0)
 315                        return NULL;
 316        }
 317
 318        return b;
 319}
 320
 321/**
 322 *      bio_get_nr_vecs         - return approx number of vecs
 323 *      @bdev:  I/O target
 324 *
 325 *      Return the approximate number of pages we can send to this target.
 326 *      There's no guarantee that you will be able to fit this number of pages
 327 *      into a bio, it does not account for dynamic restrictions that vary
 328 *      on offset.
 329 */
 330int bio_get_nr_vecs(struct block_device *bdev)
 331{
 332        struct request_queue *q = bdev_get_queue(bdev);
 333        int nr_pages;
 334
 335        nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
 336        if (nr_pages > q->max_phys_segments)
 337                nr_pages = q->max_phys_segments;
 338        if (nr_pages > q->max_hw_segments)
 339                nr_pages = q->max_hw_segments;
 340
 341        return nr_pages;
 342}
 343
 344static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
 345                          *page, unsigned int len, unsigned int offset,
 346                          unsigned short max_sectors)
 347{
 348        int retried_segments = 0;
 349        struct bio_vec *bvec;
 350
 351        /*
 352         * cloned bio must not modify vec list
 353         */
 354        if (unlikely(bio_flagged(bio, BIO_CLONED)))
 355                return 0;
 356
 357        if (((bio->bi_size + len) >> 9) > max_sectors)
 358                return 0;
 359
 360        /*
 361         * For filesystems with a blocksize smaller than the pagesize
 362         * we will often be called with the same page as last time and
 363         * a consecutive offset.  Optimize this special case.
 364         */
 365        if (bio->bi_vcnt > 0) {
 366                struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
 367
 368                if (page == prev->bv_page &&
 369                    offset == prev->bv_offset + prev->bv_len) {
 370                        prev->bv_len += len;
 371
 372                        if (q->merge_bvec_fn) {
 373                                struct bvec_merge_data bvm = {
 374                                        .bi_bdev = bio->bi_bdev,
 375                                        .bi_sector = bio->bi_sector,
 376                                        .bi_size = bio->bi_size,
 377                                        .bi_rw = bio->bi_rw,
 378                                };
 379
 380                                if (q->merge_bvec_fn(q, &bvm, prev) < len) {
 381                                        prev->bv_len -= len;
 382                                        return 0;
 383                                }
 384                        }
 385
 386                        goto done;
 387                }
 388        }
 389
 390        if (bio->bi_vcnt >= bio->bi_max_vecs)
 391                return 0;
 392
 393        /*
 394         * we might lose a segment or two here, but rather that than
 395         * make this too complex.
 396         */
 397
 398        while (bio->bi_phys_segments >= q->max_phys_segments
 399               || bio->bi_phys_segments >= q->max_hw_segments) {
 400
 401                if (retried_segments)
 402                        return 0;
 403
 404                retried_segments = 1;
 405                blk_recount_segments(q, bio);
 406        }
 407
 408        /*
 409         * setup the new entry, we might clear it again later if we
 410         * cannot add the page
 411         */
 412        bvec = &bio->bi_io_vec[bio->bi_vcnt];
 413        bvec->bv_page = page;
 414        bvec->bv_len = len;
 415        bvec->bv_offset = offset;
 416
 417        /*
 418         * if queue has other restrictions (eg varying max sector size
 419         * depending on offset), it can specify a merge_bvec_fn in the
 420         * queue to get further control
 421         */
 422        if (q->merge_bvec_fn) {
 423                struct bvec_merge_data bvm = {
 424                        .bi_bdev = bio->bi_bdev,
 425                        .bi_sector = bio->bi_sector,
 426                        .bi_size = bio->bi_size,
 427                        .bi_rw = bio->bi_rw,
 428                };
 429
 430                /*
 431                 * merge_bvec_fn() returns number of bytes it can accept
 432                 * at this offset
 433                 */
 434                if (q->merge_bvec_fn(q, &bvm, bvec) < len) {
 435                        bvec->bv_page = NULL;
 436                        bvec->bv_len = 0;
 437                        bvec->bv_offset = 0;
 438                        return 0;
 439                }
 440        }
 441
 442        /* If we may be able to merge these biovecs, force a recount */
 443        if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
 444                bio->bi_flags &= ~(1 << BIO_SEG_VALID);
 445
 446        bio->bi_vcnt++;
 447        bio->bi_phys_segments++;
 448 done:
 449        bio->bi_size += len;
 450        return len;
 451}
 452
 453/**
 454 *      bio_add_pc_page -       attempt to add page to bio
 455 *      @q: the target queue
 456 *      @bio: destination bio
 457 *      @page: page to add
 458 *      @len: vec entry length
 459 *      @offset: vec entry offset
 460 *
 461 *      Attempt to add a page to the bio_vec maplist. This can fail for a
 462 *      number of reasons, such as the bio being full or target block
 463 *      device limitations. The target block device must allow bio's
 464 *      smaller than PAGE_SIZE, so it is always possible to add a single
 465 *      page to an empty bio. This should only be used by REQ_PC bios.
 466 */
 467int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
 468                    unsigned int len, unsigned int offset)
 469{
 470        return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
 471}
 472
 473/**
 474 *      bio_add_page    -       attempt to add page to bio
 475 *      @bio: destination bio
 476 *      @page: page to add
 477 *      @len: vec entry length
 478 *      @offset: vec entry offset
 479 *
 480 *      Attempt to add a page to the bio_vec maplist. This can fail for a
 481 *      number of reasons, such as the bio being full or target block
 482 *      device limitations. The target block device must allow bio's
 483 *      smaller than PAGE_SIZE, so it is always possible to add a single
 484 *      page to an empty bio.
 485 */
 486int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
 487                 unsigned int offset)
 488{
 489        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 490        return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
 491}
 492
 493struct bio_map_data {
 494        struct bio_vec *iovecs;
 495        struct sg_iovec *sgvecs;
 496        int nr_sgvecs;
 497        int is_our_pages;
 498};
 499
 500static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
 501                             struct sg_iovec *iov, int iov_count,
 502                             int is_our_pages)
 503{
 504        memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
 505        memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
 506        bmd->nr_sgvecs = iov_count;
 507        bmd->is_our_pages = is_our_pages;
 508        bio->bi_private = bmd;
 509}
 510
 511static void bio_free_map_data(struct bio_map_data *bmd)
 512{
 513        kfree(bmd->iovecs);
 514        kfree(bmd->sgvecs);
 515        kfree(bmd);
 516}
 517
 518static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
 519                                               gfp_t gfp_mask)
 520{
 521        struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
 522
 523        if (!bmd)
 524                return NULL;
 525
 526        bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
 527        if (!bmd->iovecs) {
 528                kfree(bmd);
 529                return NULL;
 530        }
 531
 532        bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
 533        if (bmd->sgvecs)
 534                return bmd;
 535
 536        kfree(bmd->iovecs);
 537        kfree(bmd);
 538        return NULL;
 539}
 540
 541static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
 542                          struct sg_iovec *iov, int iov_count, int uncopy,
 543                          int do_free_page)
 544{
 545        int ret = 0, i;
 546        struct bio_vec *bvec;
 547        int iov_idx = 0;
 548        unsigned int iov_off = 0;
 549        int read = bio_data_dir(bio) == READ;
 550
 551        __bio_for_each_segment(bvec, bio, i, 0) {
 552                char *bv_addr = page_address(bvec->bv_page);
 553                unsigned int bv_len = iovecs[i].bv_len;
 554
 555                while (bv_len && iov_idx < iov_count) {
 556                        unsigned int bytes;
 557                        char *iov_addr;
 558
 559                        bytes = min_t(unsigned int,
 560                                      iov[iov_idx].iov_len - iov_off, bv_len);
 561                        iov_addr = iov[iov_idx].iov_base + iov_off;
 562
 563                        if (!ret) {
 564                                if (!read && !uncopy)
 565                                        ret = copy_from_user(bv_addr, iov_addr,
 566                                                             bytes);
 567                                if (read && uncopy)
 568                                        ret = copy_to_user(iov_addr, bv_addr,
 569                                                           bytes);
 570
 571                                if (ret)
 572                                        ret = -EFAULT;
 573                        }
 574
 575                        bv_len -= bytes;
 576                        bv_addr += bytes;
 577                        iov_addr += bytes;
 578                        iov_off += bytes;
 579
 580                        if (iov[iov_idx].iov_len == iov_off) {
 581                                iov_idx++;
 582                                iov_off = 0;
 583                        }
 584                }
 585
 586                if (do_free_page)
 587                        __free_page(bvec->bv_page);
 588        }
 589
 590        return ret;
 591}
 592
 593/**
 594 *      bio_uncopy_user -       finish previously mapped bio
 595 *      @bio: bio being terminated
 596 *
 597 *      Free pages allocated from bio_copy_user() and write back data
 598 *      to user space in case of a read.
 599 */
 600int bio_uncopy_user(struct bio *bio)
 601{
 602        struct bio_map_data *bmd = bio->bi_private;
 603        int ret = 0;
 604
 605        if (!bio_flagged(bio, BIO_NULL_MAPPED))
 606                ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
 607                                     bmd->nr_sgvecs, 1, bmd->is_our_pages);
 608        bio_free_map_data(bmd);
 609        bio_put(bio);
 610        return ret;
 611}
 612
 613/**
 614 *      bio_copy_user_iov       -       copy user data to bio
 615 *      @q: destination block queue
 616 *      @map_data: pointer to the rq_map_data holding pages (if necessary)
 617 *      @iov:   the iovec.
 618 *      @iov_count: number of elements in the iovec
 619 *      @write_to_vm: bool indicating writing to pages or not
 620 *      @gfp_mask: memory allocation flags
 621 *
 622 *      Prepares and returns a bio for indirect user io, bouncing data
 623 *      to/from kernel pages as necessary. Must be paired with
 624 *      call bio_uncopy_user() on io completion.
 625 */
 626struct bio *bio_copy_user_iov(struct request_queue *q,
 627                              struct rq_map_data *map_data,
 628                              struct sg_iovec *iov, int iov_count,
 629                              int write_to_vm, gfp_t gfp_mask)
 630{
 631        struct bio_map_data *bmd;
 632        struct bio_vec *bvec;
 633        struct page *page;
 634        struct bio *bio;
 635        int i, ret;
 636        int nr_pages = 0;
 637        unsigned int len = 0;
 638
 639        for (i = 0; i < iov_count; i++) {
 640                unsigned long uaddr;
 641                unsigned long end;
 642                unsigned long start;
 643
 644                uaddr = (unsigned long)iov[i].iov_base;
 645                end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 646                start = uaddr >> PAGE_SHIFT;
 647
 648                nr_pages += end - start;
 649                len += iov[i].iov_len;
 650        }
 651
 652        bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
 653        if (!bmd)
 654                return ERR_PTR(-ENOMEM);
 655
 656        ret = -ENOMEM;
 657        bio = bio_alloc(gfp_mask, nr_pages);
 658        if (!bio)
 659                goto out_bmd;
 660
 661        bio->bi_rw |= (!write_to_vm << BIO_RW);
 662
 663        ret = 0;
 664        i = 0;
 665        while (len) {
 666                unsigned int bytes;
 667
 668                if (map_data)
 669                        bytes = 1U << (PAGE_SHIFT + map_data->page_order);
 670                else
 671                        bytes = PAGE_SIZE;
 672
 673                if (bytes > len)
 674                        bytes = len;
 675
 676                if (map_data) {
 677                        if (i == map_data->nr_entries) {
 678                                ret = -ENOMEM;
 679                                break;
 680                        }
 681                        page = map_data->pages[i++];
 682                } else
 683                        page = alloc_page(q->bounce_gfp | gfp_mask);
 684                if (!page) {
 685                        ret = -ENOMEM;
 686                        break;
 687                }
 688
 689                if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
 690                        break;
 691
 692                len -= bytes;
 693        }
 694
 695        if (ret)
 696                goto cleanup;
 697
 698        /*
 699         * success
 700         */
 701        if (!write_to_vm) {
 702                ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 0);
 703                if (ret)
 704                        goto cleanup;
 705        }
 706
 707        bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
 708        return bio;
 709cleanup:
 710        if (!map_data)
 711                bio_for_each_segment(bvec, bio, i)
 712                        __free_page(bvec->bv_page);
 713
 714        bio_put(bio);
 715out_bmd:
 716        bio_free_map_data(bmd);
 717        return ERR_PTR(ret);
 718}
 719
 720/**
 721 *      bio_copy_user   -       copy user data to bio
 722 *      @q: destination block queue
 723 *      @map_data: pointer to the rq_map_data holding pages (if necessary)
 724 *      @uaddr: start of user address
 725 *      @len: length in bytes
 726 *      @write_to_vm: bool indicating writing to pages or not
 727 *      @gfp_mask: memory allocation flags
 728 *
 729 *      Prepares and returns a bio for indirect user io, bouncing data
 730 *      to/from kernel pages as necessary. Must be paired with
 731 *      call bio_uncopy_user() on io completion.
 732 */
 733struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
 734                          unsigned long uaddr, unsigned int len,
 735                          int write_to_vm, gfp_t gfp_mask)
 736{
 737        struct sg_iovec iov;
 738
 739        iov.iov_base = (void __user *)uaddr;
 740        iov.iov_len = len;
 741
 742        return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
 743}
 744
 745static struct bio *__bio_map_user_iov(struct request_queue *q,
 746                                      struct block_device *bdev,
 747                                      struct sg_iovec *iov, int iov_count,
 748                                      int write_to_vm, gfp_t gfp_mask)
 749{
 750        int i, j;
 751        int nr_pages = 0;
 752        struct page **pages;
 753        struct bio *bio;
 754        int cur_page = 0;
 755        int ret, offset;
 756
 757        for (i = 0; i < iov_count; i++) {
 758                unsigned long uaddr = (unsigned long)iov[i].iov_base;
 759                unsigned long len = iov[i].iov_len;
 760                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 761                unsigned long start = uaddr >> PAGE_SHIFT;
 762
 763                nr_pages += end - start;
 764                /*
 765                 * buffer must be aligned to at least hardsector size for now
 766                 */
 767                if (uaddr & queue_dma_alignment(q))
 768                        return ERR_PTR(-EINVAL);
 769        }
 770
 771        if (!nr_pages)
 772                return ERR_PTR(-EINVAL);
 773
 774        bio = bio_alloc(gfp_mask, nr_pages);
 775        if (!bio)
 776                return ERR_PTR(-ENOMEM);
 777
 778        ret = -ENOMEM;
 779        pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
 780        if (!pages)
 781                goto out;
 782
 783        for (i = 0; i < iov_count; i++) {
 784                unsigned long uaddr = (unsigned long)iov[i].iov_base;
 785                unsigned long len = iov[i].iov_len;
 786                unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 787                unsigned long start = uaddr >> PAGE_SHIFT;
 788                const int local_nr_pages = end - start;
 789                const int page_limit = cur_page + local_nr_pages;
 790                
 791                ret = get_user_pages_fast(uaddr, local_nr_pages,
 792                                write_to_vm, &pages[cur_page]);
 793                if (ret < local_nr_pages) {
 794                        ret = -EFAULT;
 795                        goto out_unmap;
 796                }
 797
 798                offset = uaddr & ~PAGE_MASK;
 799                for (j = cur_page; j < page_limit; j++) {
 800                        unsigned int bytes = PAGE_SIZE - offset;
 801
 802                        if (len <= 0)
 803                                break;
 804                        
 805                        if (bytes > len)
 806                                bytes = len;
 807
 808                        /*
 809                         * sorry...
 810                         */
 811                        if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
 812                                            bytes)
 813                                break;
 814
 815                        len -= bytes;
 816                        offset = 0;
 817                }
 818
 819                cur_page = j;
 820                /*
 821                 * release the pages we didn't map into the bio, if any
 822                 */
 823                while (j < page_limit)
 824                        page_cache_release(pages[j++]);
 825        }
 826
 827        kfree(pages);
 828
 829        /*
 830         * set data direction, and check if mapped pages need bouncing
 831         */
 832        if (!write_to_vm)
 833                bio->bi_rw |= (1 << BIO_RW);
 834
 835        bio->bi_bdev = bdev;
 836        bio->bi_flags |= (1 << BIO_USER_MAPPED);
 837        return bio;
 838
 839 out_unmap:
 840        for (i = 0; i < nr_pages; i++) {
 841                if(!pages[i])
 842                        break;
 843                page_cache_release(pages[i]);
 844        }
 845 out:
 846        kfree(pages);
 847        bio_put(bio);
 848        return ERR_PTR(ret);
 849}
 850
 851/**
 852 *      bio_map_user    -       map user address into bio
 853 *      @q: the struct request_queue for the bio
 854 *      @bdev: destination block device
 855 *      @uaddr: start of user address
 856 *      @len: length in bytes
 857 *      @write_to_vm: bool indicating writing to pages or not
 858 *      @gfp_mask: memory allocation flags
 859 *
 860 *      Map the user space address into a bio suitable for io to a block
 861 *      device. Returns an error pointer in case of error.
 862 */
 863struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
 864                         unsigned long uaddr, unsigned int len, int write_to_vm,
 865                         gfp_t gfp_mask)
 866{
 867        struct sg_iovec iov;
 868
 869        iov.iov_base = (void __user *)uaddr;
 870        iov.iov_len = len;
 871
 872        return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
 873}
 874
 875/**
 876 *      bio_map_user_iov - map user sg_iovec table into bio
 877 *      @q: the struct request_queue for the bio
 878 *      @bdev: destination block device
 879 *      @iov:   the iovec.
 880 *      @iov_count: number of elements in the iovec
 881 *      @write_to_vm: bool indicating writing to pages or not
 882 *      @gfp_mask: memory allocation flags
 883 *
 884 *      Map the user space address into a bio suitable for io to a block
 885 *      device. Returns an error pointer in case of error.
 886 */
 887struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
 888                             struct sg_iovec *iov, int iov_count,
 889                             int write_to_vm, gfp_t gfp_mask)
 890{
 891        struct bio *bio;
 892
 893        bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
 894                                 gfp_mask);
 895        if (IS_ERR(bio))
 896                return bio;
 897
 898        /*
 899         * subtle -- if __bio_map_user() ended up bouncing a bio,
 900         * it would normally disappear when its bi_end_io is run.
 901         * however, we need it for the unmap, so grab an extra
 902         * reference to it
 903         */
 904        bio_get(bio);
 905
 906        return bio;
 907}
 908
 909static void __bio_unmap_user(struct bio *bio)
 910{
 911        struct bio_vec *bvec;
 912        int i;
 913
 914        /*
 915         * make sure we dirty pages we wrote to
 916         */
 917        __bio_for_each_segment(bvec, bio, i, 0) {
 918                if (bio_data_dir(bio) == READ)
 919                        set_page_dirty_lock(bvec->bv_page);
 920
 921                page_cache_release(bvec->bv_page);
 922        }
 923
 924        bio_put(bio);
 925}
 926
 927/**
 928 *      bio_unmap_user  -       unmap a bio
 929 *      @bio:           the bio being unmapped
 930 *
 931 *      Unmap a bio previously mapped by bio_map_user(). Must be called with
 932 *      a process context.
 933 *
 934 *      bio_unmap_user() may sleep.
 935 */
 936void bio_unmap_user(struct bio *bio)
 937{
 938        __bio_unmap_user(bio);
 939        bio_put(bio);
 940}
 941
 942static void bio_map_kern_endio(struct bio *bio, int err)
 943{
 944        bio_put(bio);
 945}
 946
 947
 948static struct bio *__bio_map_kern(struct request_queue *q, void *data,
 949                                  unsigned int len, gfp_t gfp_mask)
 950{
 951        unsigned long kaddr = (unsigned long)data;
 952        unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 953        unsigned long start = kaddr >> PAGE_SHIFT;
 954        const int nr_pages = end - start;
 955        int offset, i;
 956        struct bio *bio;
 957
 958        bio = bio_alloc(gfp_mask, nr_pages);
 959        if (!bio)
 960                return ERR_PTR(-ENOMEM);
 961
 962        offset = offset_in_page(kaddr);
 963        for (i = 0; i < nr_pages; i++) {
 964                unsigned int bytes = PAGE_SIZE - offset;
 965
 966                if (len <= 0)
 967                        break;
 968
 969                if (bytes > len)
 970                        bytes = len;
 971
 972                if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
 973                                    offset) < bytes)
 974                        break;
 975
 976                data += bytes;
 977                len -= bytes;
 978                offset = 0;
 979        }
 980
 981        bio->bi_end_io = bio_map_kern_endio;
 982        return bio;
 983}
 984
 985/**
 986 *      bio_map_kern    -       map kernel address into bio
 987 *      @q: the struct request_queue for the bio
 988 *      @data: pointer to buffer to map
 989 *      @len: length in bytes
 990 *      @gfp_mask: allocation flags for bio allocation
 991 *
 992 *      Map the kernel address into a bio suitable for io to a block
 993 *      device. Returns an error pointer in case of error.
 994 */
 995struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
 996                         gfp_t gfp_mask)
 997{
 998        struct bio *bio;
 999
1000        bio = __bio_map_kern(q, data, len, gfp_mask);
1001        if (IS_ERR(bio))
1002                return bio;
1003
1004        if (bio->bi_size == len)
1005                return bio;
1006
1007        /*
1008         * Don't support partial mappings.
1009         */
1010        bio_put(bio);
1011        return ERR_PTR(-EINVAL);
1012}
1013
1014static void bio_copy_kern_endio(struct bio *bio, int err)
1015{
1016        struct bio_vec *bvec;
1017        const int read = bio_data_dir(bio) == READ;
1018        struct bio_map_data *bmd = bio->bi_private;
1019        int i;
1020        char *p = bmd->sgvecs[0].iov_base;
1021
1022        __bio_for_each_segment(bvec, bio, i, 0) {
1023                char *addr = page_address(bvec->bv_page);
1024                int len = bmd->iovecs[i].bv_len;
1025
1026                if (read && !err)
1027                        memcpy(p, addr, len);
1028
1029                __free_page(bvec->bv_page);
1030                p += len;
1031        }
1032
1033        bio_free_map_data(bmd);
1034        bio_put(bio);
1035}
1036
1037/**
1038 *      bio_copy_kern   -       copy kernel address into bio
1039 *      @q: the struct request_queue for the bio
1040 *      @data: pointer to buffer to copy
1041 *      @len: length in bytes
1042 *      @gfp_mask: allocation flags for bio and page allocation
1043 *      @reading: data direction is READ
1044 *
1045 *      copy the kernel address into a bio suitable for io to a block
1046 *      device. Returns an error pointer in case of error.
1047 */
1048struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1049                          gfp_t gfp_mask, int reading)
1050{
1051        struct bio *bio;
1052        struct bio_vec *bvec;
1053        int i;
1054
1055        bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1056        if (IS_ERR(bio))
1057                return bio;
1058
1059        if (!reading) {
1060                void *p = data;
1061
1062                bio_for_each_segment(bvec, bio, i) {
1063                        char *addr = page_address(bvec->bv_page);
1064
1065                        memcpy(addr, p, bvec->bv_len);
1066                        p += bvec->bv_len;
1067                }
1068        }
1069
1070        bio->bi_end_io = bio_copy_kern_endio;
1071
1072        return bio;
1073}
1074
1075/*
1076 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1077 * for performing direct-IO in BIOs.
1078 *
1079 * The problem is that we cannot run set_page_dirty() from interrupt context
1080 * because the required locks are not interrupt-safe.  So what we can do is to
1081 * mark the pages dirty _before_ performing IO.  And in interrupt context,
1082 * check that the pages are still dirty.   If so, fine.  If not, redirty them
1083 * in process context.
1084 *
1085 * We special-case compound pages here: normally this means reads into hugetlb
1086 * pages.  The logic in here doesn't really work right for compound pages
1087 * because the VM does not uniformly chase down the head page in all cases.
1088 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1089 * handle them at all.  So we skip compound pages here at an early stage.
1090 *
1091 * Note that this code is very hard to test under normal circumstances because
1092 * direct-io pins the pages with get_user_pages().  This makes
1093 * is_page_cache_freeable return false, and the VM will not clean the pages.
1094 * But other code (eg, pdflush) could clean the pages if they are mapped
1095 * pagecache.
1096 *
1097 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1098 * deferred bio dirtying paths.
1099 */
1100
1101/*
1102 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1103 */
1104void bio_set_pages_dirty(struct bio *bio)
1105{
1106        struct bio_vec *bvec = bio->bi_io_vec;
1107        int i;
1108
1109        for (i = 0; i < bio->bi_vcnt; i++) {
1110                struct page *page = bvec[i].bv_page;
1111
1112                if (page && !PageCompound(page))
1113                        set_page_dirty_lock(page);
1114        }
1115}
1116
1117static void bio_release_pages(struct bio *bio)
1118{
1119        struct bio_vec *bvec = bio->bi_io_vec;
1120        int i;
1121
1122        for (i = 0; i < bio->bi_vcnt; i++) {
1123                struct page *page = bvec[i].bv_page;
1124
1125                if (page)
1126                        put_page(page);
1127        }
1128}
1129
1130/*
1131 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1132 * If they are, then fine.  If, however, some pages are clean then they must
1133 * have been written out during the direct-IO read.  So we take another ref on
1134 * the BIO and the offending pages and re-dirty the pages in process context.
1135 *
1136 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1137 * here on.  It will run one page_cache_release() against each page and will
1138 * run one bio_put() against the BIO.
1139 */
1140
1141static void bio_dirty_fn(struct work_struct *work);
1142
1143static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1144static DEFINE_SPINLOCK(bio_dirty_lock);
1145static struct bio *bio_dirty_list;
1146
1147/*
1148 * This runs in process context
1149 */
1150static void bio_dirty_fn(struct work_struct *work)
1151{
1152        unsigned long flags;
1153        struct bio *bio;
1154
1155        spin_lock_irqsave(&bio_dirty_lock, flags);
1156        bio = bio_dirty_list;
1157        bio_dirty_list = NULL;
1158        spin_unlock_irqrestore(&bio_dirty_lock, flags);
1159
1160        while (bio) {
1161                struct bio *next = bio->bi_private;
1162
1163                bio_set_pages_dirty(bio);
1164                bio_release_pages(bio);
1165                bio_put(bio);
1166                bio = next;
1167        }
1168}
1169
1170void bio_check_pages_dirty(struct bio *bio)
1171{
1172        struct bio_vec *bvec = bio->bi_io_vec;
1173        int nr_clean_pages = 0;
1174        int i;
1175
1176        for (i = 0; i < bio->bi_vcnt; i++) {
1177                struct page *page = bvec[i].bv_page;
1178
1179                if (PageDirty(page) || PageCompound(page)) {
1180                        page_cache_release(page);
1181                        bvec[i].bv_page = NULL;
1182                } else {
1183                        nr_clean_pages++;
1184                }
1185        }
1186
1187        if (nr_clean_pages) {
1188                unsigned long flags;
1189
1190                spin_lock_irqsave(&bio_dirty_lock, flags);
1191                bio->bi_private = bio_dirty_list;
1192                bio_dirty_list = bio;
1193                spin_unlock_irqrestore(&bio_dirty_lock, flags);
1194                schedule_work(&bio_dirty_work);
1195        } else {
1196                bio_put(bio);
1197        }
1198}
1199
1200/**
1201 * bio_endio - end I/O on a bio
1202 * @bio:        bio
1203 * @error:      error, if any
1204 *
1205 * Description:
1206 *   bio_endio() will end I/O on the whole bio. bio_endio() is the
1207 *   preferred way to end I/O on a bio, it takes care of clearing
1208 *   BIO_UPTODATE on error. @error is 0 on success, and and one of the
1209 *   established -Exxxx (-EIO, for instance) error values in case
1210 *   something went wrong. Noone should call bi_end_io() directly on a
1211 *   bio unless they own it and thus know that it has an end_io
1212 *   function.
1213 **/
1214void bio_endio(struct bio *bio, int error)
1215{
1216        if (error)
1217                clear_bit(BIO_UPTODATE, &bio->bi_flags);
1218        else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1219                error = -EIO;
1220
1221        if (bio->bi_end_io)
1222                bio->bi_end_io(bio, error);
1223}
1224
1225void bio_pair_release(struct bio_pair *bp)
1226{
1227        if (atomic_dec_and_test(&bp->cnt)) {
1228                struct bio *master = bp->bio1.bi_private;
1229
1230                bio_endio(master, bp->error);
1231                mempool_free(bp, bp->bio2.bi_private);
1232        }
1233}
1234
1235static void bio_pair_end_1(struct bio *bi, int err)
1236{
1237        struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1238
1239        if (err)
1240                bp->error = err;
1241
1242        bio_pair_release(bp);
1243}
1244
1245static void bio_pair_end_2(struct bio *bi, int err)
1246{
1247        struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1248
1249        if (err)
1250                bp->error = err;
1251
1252        bio_pair_release(bp);
1253}
1254
1255/*
1256 * split a bio - only worry about a bio with a single page
1257 * in it's iovec
1258 */
1259struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1260{
1261        struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1262
1263        if (!bp)
1264                return bp;
1265
1266        blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi,
1267                                bi->bi_sector + first_sectors);
1268
1269        BUG_ON(bi->bi_vcnt != 1);
1270        BUG_ON(bi->bi_idx != 0);
1271        atomic_set(&bp->cnt, 3);
1272        bp->error = 0;
1273        bp->bio1 = *bi;
1274        bp->bio2 = *bi;
1275        bp->bio2.bi_sector += first_sectors;
1276        bp->bio2.bi_size -= first_sectors << 9;
1277        bp->bio1.bi_size = first_sectors << 9;
1278
1279        bp->bv1 = bi->bi_io_vec[0];
1280        bp->bv2 = bi->bi_io_vec[0];
1281        bp->bv2.bv_offset += first_sectors << 9;
1282        bp->bv2.bv_len -= first_sectors << 9;
1283        bp->bv1.bv_len = first_sectors << 9;
1284
1285        bp->bio1.bi_io_vec = &bp->bv1;
1286        bp->bio2.bi_io_vec = &bp->bv2;
1287
1288        bp->bio1.bi_max_vecs = 1;
1289        bp->bio2.bi_max_vecs = 1;
1290
1291        bp->bio1.bi_end_io = bio_pair_end_1;
1292        bp->bio2.bi_end_io = bio_pair_end_2;
1293
1294        bp->bio1.bi_private = bi;
1295        bp->bio2.bi_private = bio_split_pool;
1296
1297        if (bio_integrity(bi))
1298                bio_integrity_split(bi, bp, first_sectors);
1299
1300        return bp;
1301}
1302
1303/**
1304 *      bio_sector_offset - Find hardware sector offset in bio
1305 *      @bio:           bio to inspect
1306 *      @index:         bio_vec index
1307 *      @offset:        offset in bv_page
1308 *
1309 *      Return the number of hardware sectors between beginning of bio
1310 *      and an end point indicated by a bio_vec index and an offset
1311 *      within that vector's page.
1312 */
1313sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1314                           unsigned int offset)
1315{
1316        unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue);
1317        struct bio_vec *bv;
1318        sector_t sectors;
1319        int i;
1320
1321        sectors = 0;
1322
1323        if (index >= bio->bi_idx)
1324                index = bio->bi_vcnt - 1;
1325
1326        __bio_for_each_segment(bv, bio, i, 0) {
1327                if (i == index) {
1328                        if (offset > bv->bv_offset)
1329                                sectors += (offset - bv->bv_offset) / sector_sz;
1330                        break;
1331                }
1332
1333                sectors += bv->bv_len / sector_sz;
1334        }
1335
1336        return sectors;
1337}
1338EXPORT_SYMBOL(bio_sector_offset);
1339
1340/*
1341 * create memory pools for biovec's in a bio_set.
1342 * use the global biovec slabs created for general use.
1343 */
1344static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1345{
1346        int i;
1347
1348        for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1349                struct biovec_slab *bp = bvec_slabs + i;
1350                mempool_t **bvp = bs->bvec_pools + i;
1351
1352                *bvp = mempool_create_slab_pool(pool_entries, bp->slab);
1353                if (!*bvp)
1354                        return -ENOMEM;
1355        }
1356        return 0;
1357}
1358
1359static void biovec_free_pools(struct bio_set *bs)
1360{
1361        int i;
1362
1363        for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1364                mempool_t *bvp = bs->bvec_pools[i];
1365
1366                if (bvp)
1367                        mempool_destroy(bvp);
1368        }
1369
1370}
1371
1372void bioset_free(struct bio_set *bs)
1373{
1374        if (bs->bio_pool)
1375                mempool_destroy(bs->bio_pool);
1376
1377        bioset_integrity_free(bs);
1378        biovec_free_pools(bs);
1379
1380        kfree(bs);
1381}
1382
1383struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
1384{
1385        struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1386
1387        if (!bs)
1388                return NULL;
1389
1390        bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
1391        if (!bs->bio_pool)
1392                goto bad;
1393
1394        if (bioset_integrity_create(bs, bio_pool_size))
1395                goto bad;
1396
1397        if (!biovec_create_pools(bs, bvec_pool_size))
1398                return bs;
1399
1400bad:
1401        bioset_free(bs);
1402        return NULL;
1403}
1404
1405static void __init biovec_init_slabs(void)
1406{
1407        int i;
1408
1409        for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1410                int size;
1411                struct biovec_slab *bvs = bvec_slabs + i;
1412
1413                size = bvs->nr_vecs * sizeof(struct bio_vec);
1414                bvs->slab = kmem_cache_create(bvs->name, size, 0,
1415                                SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1416        }
1417}
1418
1419static int __init init_bio(void)
1420{
1421        bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
1422
1423        bio_integrity_init_slab();
1424        biovec_init_slabs();
1425
1426        fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
1427        if (!fs_bio_set)
1428                panic("bio: can't allocate bios\n");
1429
1430        bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1431                                                     sizeof(struct bio_pair));
1432        if (!bio_split_pool)
1433                panic("bio: can't create split pool\n");
1434
1435        return 0;
1436}
1437
1438subsys_initcall(init_bio);
1439
1440EXPORT_SYMBOL(bio_alloc);
1441EXPORT_SYMBOL(bio_kmalloc);
1442EXPORT_SYMBOL(bio_put);
1443EXPORT_SYMBOL(bio_free);
1444EXPORT_SYMBOL(bio_endio);
1445EXPORT_SYMBOL(bio_init);
1446EXPORT_SYMBOL(__bio_clone);
1447EXPORT_SYMBOL(bio_clone);
1448EXPORT_SYMBOL(bio_phys_segments);
1449EXPORT_SYMBOL(bio_add_page);
1450EXPORT_SYMBOL(bio_add_pc_page);
1451EXPORT_SYMBOL(bio_get_nr_vecs);
1452EXPORT_SYMBOL(bio_map_user);
1453EXPORT_SYMBOL(bio_unmap_user);
1454EXPORT_SYMBOL(bio_map_kern);
1455EXPORT_SYMBOL(bio_copy_kern);
1456EXPORT_SYMBOL(bio_pair_release);
1457EXPORT_SYMBOL(bio_split);
1458EXPORT_SYMBOL(bio_copy_user);
1459EXPORT_SYMBOL(bio_uncopy_user);
1460EXPORT_SYMBOL(bioset_create);
1461EXPORT_SYMBOL(bioset_free);
1462EXPORT_SYMBOL(bio_alloc_bioset);
1463
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.