linux/mm/bounce.c
<<
>>
Prefs
   1/* bounce buffer handling for block devices
   2 *
   3 * - Split from highmem.c
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/export.h>
   8#include <linux/swap.h>
   9#include <linux/gfp.h>
  10#include <linux/bio.h>
  11#include <linux/pagemap.h>
  12#include <linux/mempool.h>
  13#include <linux/blkdev.h>
  14#include <linux/init.h>
  15#include <linux/hash.h>
  16#include <linux/highmem.h>
  17#include <linux/bootmem.h>
  18#include <asm/tlbflush.h>
  19
  20#include <trace/events/block.h>
  21
  22#define POOL_SIZE       64
  23#define ISA_POOL_SIZE   16
  24
  25static mempool_t *page_pool, *isa_page_pool;
  26
  27#ifdef CONFIG_HIGHMEM
  28static __init int init_emergency_pool(void)
  29{
  30#ifndef CONFIG_MEMORY_HOTPLUG
  31        if (max_pfn <= max_low_pfn)
  32                return 0;
  33#endif
  34
  35        page_pool = mempool_create_page_pool(POOL_SIZE, 0);
  36        BUG_ON(!page_pool);
  37        printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
  38
  39        return 0;
  40}
  41
  42__initcall(init_emergency_pool);
  43
  44/*
  45 * highmem version, map in to vec
  46 */
  47static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
  48{
  49        unsigned long flags;
  50        unsigned char *vto;
  51
  52        local_irq_save(flags);
  53        vto = kmap_atomic(to->bv_page);
  54        memcpy(vto + to->bv_offset, vfrom, to->bv_len);
  55        kunmap_atomic(vto);
  56        local_irq_restore(flags);
  57}
  58
  59#else /* CONFIG_HIGHMEM */
  60
  61#define bounce_copy_vec(to, vfrom)      \
  62        memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
  63
  64#endif /* CONFIG_HIGHMEM */
  65
  66/*
  67 * allocate pages in the DMA region for the ISA pool
  68 */
  69static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
  70{
  71        return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
  72}
  73
  74/*
  75 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
  76 * as the max address, so check if the pool has already been created.
  77 */
  78int init_emergency_isa_pool(void)
  79{
  80        if (isa_page_pool)
  81                return 0;
  82
  83        isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
  84                                       mempool_free_pages, (void *) 0);
  85        BUG_ON(!isa_page_pool);
  86
  87        printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
  88        return 0;
  89}
  90
  91/*
  92 * Simple bounce buffer support for highmem pages. Depending on the
  93 * queue gfp mask set, *to may or may not be a highmem page. kmap it
  94 * always, it will do the Right Thing
  95 */
  96static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
  97{
  98        unsigned char *vfrom;
  99        struct bio_vec *tovec, *fromvec;
 100        int i;
 101
 102        __bio_for_each_segment(tovec, to, i, 0) {
 103                fromvec = from->bi_io_vec + i;
 104
 105                /*
 106                 * not bounced
 107                 */
 108                if (tovec->bv_page == fromvec->bv_page)
 109                        continue;
 110
 111                /*
 112                 * fromvec->bv_offset and fromvec->bv_len might have been
 113                 * modified by the block layer, so use the original copy,
 114                 * bounce_copy_vec already uses tovec->bv_len
 115                 */
 116                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 117
 118                bounce_copy_vec(tovec, vfrom);
 119                flush_dcache_page(tovec->bv_page);
 120        }
 121}
 122
 123static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
 124{
 125        struct bio *bio_orig = bio->bi_private;
 126        struct bio_vec *bvec, *org_vec;
 127        int i;
 128
 129        if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
 130                set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
 131
 132        /*
 133         * free up bounce indirect pages used
 134         */
 135        __bio_for_each_segment(bvec, bio, i, 0) {
 136                org_vec = bio_orig->bi_io_vec + i;
 137                if (bvec->bv_page == org_vec->bv_page)
 138                        continue;
 139
 140                dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
 141                mempool_free(bvec->bv_page, pool);
 142        }
 143
 144        bio_endio(bio_orig, err);
 145        bio_put(bio);
 146}
 147
 148static void bounce_end_io_write(struct bio *bio, int err)
 149{
 150        bounce_end_io(bio, page_pool, err);
 151}
 152
 153static void bounce_end_io_write_isa(struct bio *bio, int err)
 154{
 155
 156        bounce_end_io(bio, isa_page_pool, err);
 157}
 158
 159static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
 160{
 161        struct bio *bio_orig = bio->bi_private;
 162
 163        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 164                copy_to_high_bio_irq(bio_orig, bio);
 165
 166        bounce_end_io(bio, pool, err);
 167}
 168
 169static void bounce_end_io_read(struct bio *bio, int err)
 170{
 171        __bounce_end_io_read(bio, page_pool, err);
 172}
 173
 174static void bounce_end_io_read_isa(struct bio *bio, int err)
 175{
 176        __bounce_end_io_read(bio, isa_page_pool, err);
 177}
 178
 179static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
 180                               mempool_t *pool)
 181{
 182        struct page *page;
 183        struct bio *bio = NULL;
 184        int i, rw = bio_data_dir(*bio_orig);
 185        struct bio_vec *to, *from;
 186
 187        bio_for_each_segment(from, *bio_orig, i) {
 188                page = from->bv_page;
 189
 190                /*
 191                 * is destination page below bounce pfn?
 192                 */
 193                if (page_to_pfn(page) <= queue_bounce_pfn(q))
 194                        continue;
 195
 196                /*
 197                 * irk, bounce it
 198                 */
 199                if (!bio) {
 200                        unsigned int cnt = (*bio_orig)->bi_vcnt;
 201
 202                        bio = bio_alloc(GFP_NOIO, cnt);
 203                        memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
 204                }
 205                        
 206
 207                to = bio->bi_io_vec + i;
 208
 209                to->bv_page = mempool_alloc(pool, q->bounce_gfp);
 210                to->bv_len = from->bv_len;
 211                to->bv_offset = from->bv_offset;
 212                inc_zone_page_state(to->bv_page, NR_BOUNCE);
 213
 214                if (rw == WRITE) {
 215                        char *vto, *vfrom;
 216
 217                        flush_dcache_page(from->bv_page);
 218                        vto = page_address(to->bv_page) + to->bv_offset;
 219                        vfrom = kmap(from->bv_page) + from->bv_offset;
 220                        memcpy(vto, vfrom, to->bv_len);
 221                        kunmap(from->bv_page);
 222                }
 223        }
 224
 225        /*
 226         * no pages bounced
 227         */
 228        if (!bio)
 229                return;
 230
 231        trace_block_bio_bounce(q, *bio_orig);
 232
 233        /*
 234         * at least one page was bounced, fill in possible non-highmem
 235         * pages
 236         */
 237        __bio_for_each_segment(from, *bio_orig, i, 0) {
 238                to = bio_iovec_idx(bio, i);
 239                if (!to->bv_page) {
 240                        to->bv_page = from->bv_page;
 241                        to->bv_len = from->bv_len;
 242                        to->bv_offset = from->bv_offset;
 243                }
 244        }
 245
 246        bio->bi_bdev = (*bio_orig)->bi_bdev;
 247        bio->bi_flags |= (1 << BIO_BOUNCED);
 248        bio->bi_sector = (*bio_orig)->bi_sector;
 249        bio->bi_rw = (*bio_orig)->bi_rw;
 250
 251        bio->bi_vcnt = (*bio_orig)->bi_vcnt;
 252        bio->bi_idx = (*bio_orig)->bi_idx;
 253        bio->bi_size = (*bio_orig)->bi_size;
 254
 255        if (pool == page_pool) {
 256                bio->bi_end_io = bounce_end_io_write;
 257                if (rw == READ)
 258                        bio->bi_end_io = bounce_end_io_read;
 259        } else {
 260                bio->bi_end_io = bounce_end_io_write_isa;
 261                if (rw == READ)
 262                        bio->bi_end_io = bounce_end_io_read_isa;
 263        }
 264
 265        bio->bi_private = *bio_orig;
 266        *bio_orig = bio;
 267}
 268
 269void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
 270{
 271        mempool_t *pool;
 272
 273        /*
 274         * Data-less bio, nothing to bounce
 275         */
 276        if (!bio_has_data(*bio_orig))
 277                return;
 278
 279        /*
 280         * for non-isa bounce case, just check if the bounce pfn is equal
 281         * to or bigger than the highest pfn in the system -- in that case,
 282         * don't waste time iterating over bio segments
 283         */
 284        if (!(q->bounce_gfp & GFP_DMA)) {
 285                if (queue_bounce_pfn(q) >= blk_max_pfn)
 286                        return;
 287                pool = page_pool;
 288        } else {
 289                BUG_ON(!isa_page_pool);
 290                pool = isa_page_pool;
 291        }
 292
 293        /*
 294         * slow path
 295         */
 296        __blk_queue_bounce(q, bio_orig, pool);
 297}
 298
 299EXPORT_SYMBOL(blk_queue_bounce);
 300
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.