linux/block/blk-map.c
<<
>>
Prefs
   1/*
   2 * Functions related to mapping data to requests
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <scsi/sg.h>            /* for struct sg_iovec */
   9
  10#include "blk.h"
  11
  12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  13                      struct bio *bio)
  14{
  15        if (!rq->bio)
  16                blk_rq_bio_prep(q, rq, bio);
  17        else if (!ll_back_merge_fn(q, rq, bio))
  18                return -EINVAL;
  19        else {
  20                rq->biotail->bi_next = bio;
  21                rq->biotail = bio;
  22
  23                rq->data_len += bio->bi_size;
  24        }
  25        return 0;
  26}
  27EXPORT_SYMBOL(blk_rq_append_bio);
  28
  29static int __blk_rq_unmap_user(struct bio *bio)
  30{
  31        int ret = 0;
  32
  33        if (bio) {
  34                if (bio_flagged(bio, BIO_USER_MAPPED))
  35                        bio_unmap_user(bio);
  36                else
  37                        ret = bio_uncopy_user(bio);
  38        }
  39
  40        return ret;
  41}
  42
  43static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  44                             void __user *ubuf, unsigned int len)
  45{
  46        unsigned long uaddr;
  47        unsigned int alignment;
  48        struct bio *bio, *orig_bio;
  49        int reading, ret;
  50
  51        reading = rq_data_dir(rq) == READ;
  52
  53        /*
  54         * if alignment requirement is satisfied, map in user pages for
  55         * direct dma. else, set up kernel bounce buffers
  56         */
  57        uaddr = (unsigned long) ubuf;
  58        alignment = queue_dma_alignment(q) | q->dma_pad_mask;
  59        if (!(uaddr & alignment) && !(len & alignment))
  60                bio = bio_map_user(q, NULL, uaddr, len, reading);
  61        else
  62                bio = bio_copy_user(q, uaddr, len, reading);
  63
  64        if (IS_ERR(bio))
  65                return PTR_ERR(bio);
  66
  67        orig_bio = bio;
  68        blk_queue_bounce(q, &bio);
  69
  70        /*
  71         * We link the bounce buffer in and could have to traverse it
  72         * later so we have to get a ref to prevent it from being freed
  73         */
  74        bio_get(bio);
  75
  76        ret = blk_rq_append_bio(q, rq, bio);
  77        if (!ret)
  78                return bio->bi_size;
  79
  80        /* if it was boucned we must call the end io function */
  81        bio_endio(bio, 0);
  82        __blk_rq_unmap_user(orig_bio);
  83        bio_put(bio);
  84        return ret;
  85}
  86
  87/**
  88 * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
  89 * @q:          request queue where request should be inserted
  90 * @rq:         request structure to fill
  91 * @ubuf:       the user buffer
  92 * @len:        length of user data
  93 *
  94 * Description:
  95 *    Data will be mapped directly for zero copy io, if possible. Otherwise
  96 *    a kernel bounce buffer is used.
  97 *
  98 *    A matching blk_rq_unmap_user() must be issued at the end of io, while
  99 *    still in process context.
 100 *
 101 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 102 *    before being submitted to the device, as pages mapped may be out of
 103 *    reach. It's the callers responsibility to make sure this happens. The
 104 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 105 *    unmapping.
 106 */
 107int blk_rq_map_user(struct request_queue *q, struct request *rq,
 108                    void __user *ubuf, unsigned long len)
 109{
 110        unsigned long bytes_read = 0;
 111        struct bio *bio = NULL;
 112        int ret;
 113
 114        if (len > (q->max_hw_sectors << 9))
 115                return -EINVAL;
 116        if (!len || !ubuf)
 117                return -EINVAL;
 118
 119        while (bytes_read != len) {
 120                unsigned long map_len, end, start;
 121
 122                map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
 123                end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
 124                                                                >> PAGE_SHIFT;
 125                start = (unsigned long)ubuf >> PAGE_SHIFT;
 126
 127                /*
 128                 * A bad offset could cause us to require BIO_MAX_PAGES + 1
 129                 * pages. If this happens we just lower the requested
 130                 * mapping len by a page so that we can fit
 131                 */
 132                if (end - start > BIO_MAX_PAGES)
 133                        map_len -= PAGE_SIZE;
 134
 135                ret = __blk_rq_map_user(q, rq, ubuf, map_len);
 136                if (ret < 0)
 137                        goto unmap_rq;
 138                if (!bio)
 139                        bio = rq->bio;
 140                bytes_read += ret;
 141                ubuf += ret;
 142        }
 143
 144        if (!bio_flagged(bio, BIO_USER_MAPPED))
 145                rq->cmd_flags |= REQ_COPY_USER;
 146
 147        rq->buffer = rq->data = NULL;
 148        return 0;
 149unmap_rq:
 150        blk_rq_unmap_user(bio);
 151        rq->bio = NULL;
 152        return ret;
 153}
 154EXPORT_SYMBOL(blk_rq_map_user);
 155
 156/**
 157 * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage
 158 * @q:          request queue where request should be inserted
 159 * @rq:         request to map data to
 160 * @iov:        pointer to the iovec
 161 * @iov_count:  number of elements in the iovec
 162 * @len:        I/O byte count
 163 *
 164 * Description:
 165 *    Data will be mapped directly for zero copy io, if possible. Otherwise
 166 *    a kernel bounce buffer is used.
 167 *
 168 *    A matching blk_rq_unmap_user() must be issued at the end of io, while
 169 *    still in process context.
 170 *
 171 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 172 *    before being submitted to the device, as pages mapped may be out of
 173 *    reach. It's the callers responsibility to make sure this happens. The
 174 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 175 *    unmapping.
 176 */
 177int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
 178                        struct sg_iovec *iov, int iov_count, unsigned int len)
 179{
 180        struct bio *bio;
 181        int i, read = rq_data_dir(rq) == READ;
 182        int unaligned = 0;
 183
 184        if (!iov || iov_count <= 0)
 185                return -EINVAL;
 186
 187        for (i = 0; i < iov_count; i++) {
 188                unsigned long uaddr = (unsigned long)iov[i].iov_base;
 189
 190                if (uaddr & queue_dma_alignment(q)) {
 191                        unaligned = 1;
 192                        break;
 193                }
 194                if (!iov[i].iov_len)
 195                        return -EINVAL;
 196        }
 197
 198        if (unaligned || (q->dma_pad_mask & len))
 199                bio = bio_copy_user_iov(q, iov, iov_count, read);
 200        else
 201                bio = bio_map_user_iov(q, NULL, iov, iov_count, read);
 202
 203        if (IS_ERR(bio))
 204                return PTR_ERR(bio);
 205
 206        if (bio->bi_size != len) {
 207                bio_endio(bio, 0);
 208                bio_unmap_user(bio);
 209                return -EINVAL;
 210        }
 211
 212        if (!bio_flagged(bio, BIO_USER_MAPPED))
 213                rq->cmd_flags |= REQ_COPY_USER;
 214
 215        blk_queue_bounce(q, &bio);
 216        bio_get(bio);
 217        blk_rq_bio_prep(q, rq, bio);
 218        rq->buffer = rq->data = NULL;
 219        return 0;
 220}
 221
 222/**
 223 * blk_rq_unmap_user - unmap a request with user data
 224 * @bio:               start of bio list
 225 *
 226 * Description:
 227 *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
 228 *    supply the original rq->bio from the blk_rq_map_user() return, since
 229 *    the io completion may have changed rq->bio.
 230 */
 231int blk_rq_unmap_user(struct bio *bio)
 232{
 233        struct bio *mapped_bio;
 234        int ret = 0, ret2;
 235
 236        while (bio) {
 237                mapped_bio = bio;
 238                if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
 239                        mapped_bio = bio->bi_private;
 240
 241                ret2 = __blk_rq_unmap_user(mapped_bio);
 242                if (ret2 && !ret)
 243                        ret = ret2;
 244
 245                mapped_bio = bio;
 246                bio = bio->bi_next;
 247                bio_put(mapped_bio);
 248        }
 249
 250        return ret;
 251}
 252EXPORT_SYMBOL(blk_rq_unmap_user);
 253
 254/**
 255 * blk_rq_map_kern - map kernel data to a request, for REQ_BLOCK_PC usage
 256 * @q:          request queue where request should be inserted
 257 * @rq:         request to fill
 258 * @kbuf:       the kernel buffer
 259 * @len:        length of user data
 260 * @gfp_mask:   memory allocation flags
 261 *
 262 * Description:
 263 *    Data will be mapped directly if possible. Otherwise a bounce
 264 *    buffer is used.
 265 */
 266int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
 267                    unsigned int len, gfp_t gfp_mask)
 268{
 269        unsigned long kaddr;
 270        unsigned int alignment;
 271        int reading = rq_data_dir(rq) == READ;
 272        int do_copy = 0;
 273        struct bio *bio;
 274
 275        if (len > (q->max_hw_sectors << 9))
 276                return -EINVAL;
 277        if (!len || !kbuf)
 278                return -EINVAL;
 279
 280        kaddr = (unsigned long)kbuf;
 281        alignment = queue_dma_alignment(q) | q->dma_pad_mask;
 282        do_copy = ((kaddr & alignment) || (len & alignment) ||
 283                   object_is_on_stack(kbuf));
 284
 285        if (do_copy)
 286                bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
 287        else
 288                bio = bio_map_kern(q, kbuf, len, gfp_mask);
 289
 290        if (IS_ERR(bio))
 291                return PTR_ERR(bio);
 292
 293        if (rq_data_dir(rq) == WRITE)
 294                bio->bi_rw |= (1 << BIO_RW);
 295
 296        if (do_copy)
 297                rq->cmd_flags |= REQ_COPY_USER;
 298
 299        blk_rq_bio_prep(q, rq, bio);
 300        blk_queue_bounce(q, &rq->bio);
 301        rq->buffer = rq->data = NULL;
 302        return 0;
 303}
 304EXPORT_SYMBOL(blk_rq_map_kern);
 305