linux/block/blk-map.c
<<
>>
Prefs
   1/*
   2 * Functions related to mapping data to requests
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <scsi/sg.h>            /* for struct sg_iovec */
   9
  10#include "blk.h"
  11
  12int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  13                      struct bio *bio)
  14{
  15        if (!rq->bio)
  16                blk_rq_bio_prep(q, rq, bio);
  17        else if (!ll_back_merge_fn(q, rq, bio))
  18                return -EINVAL;
  19        else {
  20                rq->biotail->bi_next = bio;
  21                rq->biotail = bio;
  22
  23                rq->__data_len += bio->bi_size;
  24        }
  25        return 0;
  26}
  27
  28static int __blk_rq_unmap_user(struct bio *bio)
  29{
  30        int ret = 0;
  31
  32        if (bio) {
  33                if (bio_flagged(bio, BIO_USER_MAPPED))
  34                        bio_unmap_user(bio);
  35                else
  36                        ret = bio_uncopy_user(bio);
  37        }
  38
  39        return ret;
  40}
  41
  42static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
  43                             struct rq_map_data *map_data, void __user *ubuf,
  44                             unsigned int len, gfp_t gfp_mask)
  45{
  46        unsigned long uaddr;
  47        struct bio *bio, *orig_bio;
  48        int reading, ret;
  49
  50        reading = rq_data_dir(rq) == READ;
  51
  52        /*
  53         * if alignment requirement is satisfied, map in user pages for
  54         * direct dma. else, set up kernel bounce buffers
  55         */
  56        uaddr = (unsigned long) ubuf;
  57        if (blk_rq_aligned(q, uaddr, len) && !map_data)
  58                bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
  59        else
  60                bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
  61
  62        if (IS_ERR(bio))
  63                return PTR_ERR(bio);
  64
  65        if (map_data && map_data->null_mapped)
  66                bio->bi_flags |= (1 << BIO_NULL_MAPPED);
  67
  68        orig_bio = bio;
  69        blk_queue_bounce(q, &bio);
  70
  71        /*
  72         * We link the bounce buffer in and could have to traverse it
  73         * later so we have to get a ref to prevent it from being freed
  74         */
  75        bio_get(bio);
  76
  77        ret = blk_rq_append_bio(q, rq, bio);
  78        if (!ret)
  79                return bio->bi_size;
  80
  81        /* if it was boucned we must call the end io function */
  82        bio_endio(bio, 0);
  83        __blk_rq_unmap_user(orig_bio);
  84        bio_put(bio);
  85        return ret;
  86}
  87
  88/**
  89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
  90 * @q:          request queue where request should be inserted
  91 * @rq:         request structure to fill
  92 * @map_data:   pointer to the rq_map_data holding pages (if necessary)
  93 * @ubuf:       the user buffer
  94 * @len:        length of user data
  95 * @gfp_mask:   memory allocation flags
  96 *
  97 * Description:
  98 *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
  99 *    a kernel bounce buffer is used.
 100 *
 101 *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
 102 *    still in process context.
 103 *
 104 *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
 105 *    before being submitted to the device, as pages mapped may be out of
 106 *    reach. It's the callers responsibility to make sure this happens. The
 107 *    original bio must be passed back in to blk_rq_unmap_user() for proper
 108 *    unmapping.
 109 */
 110int blk_rq_map_user(struct request_queue *q, struct request *rq,
 111                    struct rq_map_data *map_data, void __user *ubuf,
 112                    unsigned long len, gfp_t gfp_mask)
 113{
 114        unsigned long bytes_read = 0;
 115        struct bio *bio = NULL;
 116        int ret;
 117
 118        if (len > (queue_max_hw_sectors(q) << 9))
 119                return -EINVAL;
 120        if (!len)
 121                return -EINVAL;
 122
 123        if (!ubuf && (!map_data || !map_data->null_mapped))
 124                return -EINVAL;
 125
 126        while (bytes_read != len) {
 127                unsigned long map_len, end, start;
 128
 129                map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
 130                end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
 131                                                                >> PAGE_SHIFT;
 132                start = (unsigned long)ubuf >> PAGE_SHIFT;
 133
 134                /*
 135                 * A bad offset could cause us to require BIO_MAX_PAGES + 1
 136                 * pages. If this happens we just lower the requested
 137                 * mapping len by a page so that we can fit
 138                 */
 139                if (end - start > BIO_MAX_PAGES)
 140                        map_len -= PAGE_SIZE;
 141
 142                ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len,
 143                                        gfp_mask);
 144                if (ret < 0)
 145                        goto unmap_rq;
 146                if (!bio)
 147                        bio = rq->bio;
 148                bytes_read += ret;
 149                ubuf += ret;
 150
 151                if (map_data)
 152                        map_data->offset += ret;
 153        }
 154
 155        if (!bio_flagged(bio, BIO_USER_MAPPED))
 156                rq->cmd_flags |= REQ_COPY_USER;
 157
 158        rq->buffer = NULL;
 159        return 0;
 160unmap_rq:
 161        blk_rq_unmap_user(bio);
 162        rq->bio = NULL;
 163        ine" name="L85">  85        return ret164" id="L1826bio" class="sref">bio,  163        ine" name="L85"" claas" class="sref">cmd_flags |=  &ak-map.c#L125" idXPORT_SYMBO25" class="line" XPORT_SYMBO2k-map.c#L162" id="L16+code=request_queue" class="sref">request_queue<62"> 162          77          89orig_bi1o16/blk-map.c#L109" id="L109" claref">request_qu" na
  90blk_16/span>
  91ap.c#L92" id="L92" cd="L3"lock/blk"L91">  91
  93       1  * We link the bounce b1uffer1in and could have to traversek-m na.c#L92" i href="block/bl name"L93">  93  85  * later so we have to 1get a1ref to prevent it from being k-m na_count.c#numb="bof ellk-masap.c#/bl name"L93">  93
  93cmd_flagt" class="sref">bio_get<1/a>(<17s
  96  97ret =   98ret1)

  99bi17/span>
 100 101/* if it was boucned we1 must18ust be issued at the end of I/O, while
 102bio_en1dio 103_184" id="L104" class="line" name="L104"> 104bio_put<1/a>(<18 be bounced through blk_queue_bounce()
 105ret;
<18vice, as pages mapped may be out of
 106
 107
 108/**
 109 * blk_1rq_map_user - map user d1ata t1 a request, for REQ_TYPE_BLOCKme="L139"> 139                if"> * @q: 1         request queue w1here 19user(struct request_qu" nak-mapa> *q, struct request *rq,
 111                  "> * @rq:1         request structu1re to19    >> map_data, void __user *BIO_MAX_e" name href="+code=__user"  naqueue" class="s nak-ma1"> 111                  "e=bio_en1_data:   pointer to the 1rq_ma19">map_data->gfp_t gfp_mask)
  45{
 * @ubu1f:       the user buffer1 114        unsigned l"> * @len1:        length of user 1data<19 < 0)
bio = NULL;
((struct mapef">ret;
rq) == READ;
  51
 *
q, ame="L160"> 160 * Desc1ription:
 158         *    D1ata will be mapped direc1tly f19ef="block/blk-map.c#L79" id="L79" naqueue" class="s nak-maef="struct  145                  "> *    a1 kernel bounce buffer is1 used19EINVAL;
 120        if (! *
 151                i2nt"> *   2A matching blk_rq_unmap_2ser()20    >> "linclass="sref">REA queue" class="s data"L160(struct  127                u2nt"> *   2still in process context2map_data->> q, struct < na_bas>"L120"> 120        if (! 134                <2nt"> *   2Note: The mapped bio may2need 20 < 0)
   45{
 *   2before being submitted t2 the 20"sref">unmap_rq;
 120        if (!  77         *   2original bio must be pas2ed ba20 =  135 *   2unmapping.
 135 139                i2=blk_rq_m2p_user" class="sref">blk2rq_ma21>map_len -= &ref="+code=null_mappeass="ldma_REA" id="L119" class="lin  45{
rq_map_data *code=#L117" id="L11un"sref">q, ame="L11  21                 *   2 long len
 134                <2ong bytes_readq, ame="f="class="sref">REA" id="L119" class="liLL" class="sref">NULdma_padd="L45" class="line"dma_padd="L447">&ref="+code=null_mappe127" class="line" name="L12f="struct  152                 2href="+co2e=bio" class="sref">bio<2a> *<21"sref">unmap_rq;
lass="sref">NULL;
REA" id="L119" class="liode=ubuf" class="sref">ubuf, mapef">ret;
 111                 2f="+code=2et" class="sref">ret2
rq_map_data *code=#L117" id="L1144" id="L144" class="line" name="L144"> 144                i2k-map.c#L218" id="L118" class="lin2" nam21 =   60                len2> 21a href="+code=bio_map_user" class="sref">bio_map_user(q<" naqueue" class="srcodquest_qu" nak-maplass="sref">REA" id="L119" class="liode=ubuf" class="addr, l naqueue" class="s nak-ma1(struct mapef">ret;
 144                i2kmap.c#L30ref="+code=EINVAL" class2"sref2150">  50        len  63                retu2turn -EINVAL;
bio);
  64
 123        if (!ubuf<2a> &a2p;& (!k-map.c#L127" id="L127" class="line" name="L127"> 127                u2turn -EINVAL;
 135

106" href="block/blame="L105"> 105byt22ens we just lower the requested
 105
 105 139                i2 href="+c2de=map_len" class="sref"2map_l2n = bio);
  76
end2/a> =2((unsigned long)bio, 0);
  83        orig_bio);
  76
s2art = (unsigned long) 120        if (! 154
/*
 155        if (!     2           * A bad offse2 coul23agged(bio, BIO_USER_MAPPED))
 156                <2nt">     2           * pages. If t2is ha23a href="+code=cmd_flags" class="sref">cmd_flags |= REQ_COPY_USER;
 157
     2           * mapping len2by a 23"L158"> 158             2           */
b" class="line" nef">BIO_USER_MAP href="+code=bio" class="sref">bio);
  70
(  76
lock/repef">BIO_USER_MAP hr/a>lock/repk-maplass="sref">REA" id="L119" class="liode=ubuf" class=");
  78        if (!(NULL;
 159        return 0;2 href="+c2de=ret" class="sref">ret2/a> =2 160 154
 <k-map.c#L120" idXPORT_SYMBO25" class="line" XPORT_SYMBO2k-map.c#L162" id="L16+code=request_" naqueue" class="sref">request_qu" nak-ma>  78        if (!     2  126        while (  89b24 in to blk_rq_unmap_user() forbio);
  95by249" id="L89" class="line" nacla@e="
  93ub2f24a request, for REQ_TYPE_BLOCK"L93">  93  9825ust be issued at the end of I/O, Uline-mapq id="iouslyan>
request_qu()" namhref="b  98
request_qu()eame="L, si=bio"98">  98 109 139                i2ef="+code2bio_flagged" class="sref2>bio_2lagged/a>(struct bio);
bio = NULL;
 156                <2 href="+c2de=rq" class="sref">rq->2 127                u2k-map.c#L258" id="L158" class="lin2" nam25 = bio = NULn>
 159        return 0;2code=rq" 2lass="sref">rq-><2 href2"+code=buffer"map.c#L117" id="L117" class="line" name="L11"L16coclass="sref">mapet2class="line" name=2"L159"> 159        return 0;2chref="+c2lock/blk-map.c#L160" id=2L160"2550">  50        unmap_rq:
2a hre26a href="+code;
 127                u2code=blk_2q_unmap_user" class="sre2">blk26    >>   69        rq-><2 href26 = (unsigned long), bio, BIO_USER_MAPPED))
 156                <2"L85">  82        return gfp_masklock/blk-map.c#L80" id=privat0" class="line" namprivat069">  69         163 2     26"L155"> 155        if (!cmd_fla2s |= unmap_rq;
lass="sref">NULpet2class="line" name=2"L15er(orig_bio);
  78        if (!)
NULpet2class="line" name=2"L15e| !  79                retu2map.c#L682" id="L68" class="line" 2name=26 = rq__blk_rq_map_user( 159        return 0;2de=orig_b2io" class="sref">orig_bi2o26"L129"> 129                <2de=blk_qu2eue_bounce" class="sref"2>blk_26 =   69        bio_map_user(lock/blk-map.c#L80" id=nex/a>);
  69        bio);
  78        if (!       2  * We link the bounce b2uffer27"sref">len
  82  * later so we have to 2get a27"L134"> 134                <2"lock/blk2  */
ret164" id="L1826bio" class="sref">bio, cmd_fla2t" class="sref">bio_get<2/a>(<27s
bio);
ret =  158        ret2)
  89bi27/span>
reques0" i
  90
  91/* if it was boucned we2 must28ust be issued at the end of I/>ap.c#L92" id="L92" cd="ame="L92">  92bio_en2dio  94_284" id="L104" class="line" nam#L95" id="L95" class="line" name="L95">  95bio_put<2/a>(<28 be bounced through blk_queue_p.c#L96" id="L96" class="line" name="L96">  96ret;
<28vice, as pages mapped may be "L96">  96  98
  98
  98 109 * @q: 2         request queue w2here 291" id="L101" class="line" namme="L139"> 139                i2"> * @rq:2         request structu2re to29    &/a>(struct reques0" ik-mapio" class="sref">bioq, struct request *rq,
;
 112                 2"e=bio_en2_data:   pointer to the 2rq_ma29">map_data->gfp_t gfp_mask)
  45{
 * @ubu2f:       the user buffer2 114        unsigned 2"> * @len2:        length of user 2data<29 < 0)
rq) == READ;
  51
(> 
 = bio = NUL class="line" name="L69">  69         *    D2ata will be mapped direc2tly f29+code=buffer"map.c#L117" id="L117" class="line" name="L11  69          50         *
queue_max_hw_sectors(q) << 9))
 119                r3nt"> *   3A matching blk_rq_unmap_3ser()30EINVAL;
 120        if (! *   3still in process context3map_data;
 119                r3n3"> *   3s:       the user buffer3p.c#L30ask" class="sref">gfpblock/blk-map.c#L120" id="L120" class="line" name="L120"> 120        if (! *   3s        length of user 3need 30"L155"> 155        if (! *   3before being submitted t3 the 30"sref">unmap_c#L117" id="L11dode=maa>;
re"sref">q, re"sref">block/blk-map.c#L119" id="L119" class="li_mask" class="sre47" class="line" nae="L47">_read" class="sref">bytes_read, ;
 120        if (! *   3b>
 119                r3n7"> *   3biption:
 = bio_map_user(gfp_t g="L45" class="line" name="L45">coclass="sref">mapef""L9>,
 120        if (! *   3bta will be mapped direc3="blo30+code=buffer"0">  60                
b">bio_map_user(gfp_t g="L45" class="line" name="L45">90"> 120        if (!blk3rq_ma31"L151"> 151                i3  struct 3a href="+code=rq_map_dat3" cla3s="sref">rq_ma-map=#L117" id="L11bio))
  63                retu3et"> *   3 long bio);
  64
 134                <3ong bytes_read,
 134                <3ot"> *   3e=bio" class="sref">bio<3a> *<31"sref">unmap_rq;
lass="sref">NULL;
);
 120        if (!ret3
  77         =  119                r3f="+code=3en" class="sref">len3> 31a href="+code=bio_map_user" class="s">cmd_flags |= REQ_COPY_USER;
 157
  50        len__blk_rq_map_user(re"#L13dd="L84" class="linesef">re"#L13dd="Lblock/blk-map.c#L119" id="L119" class="li_mask" class="sre);
  78        if (!rq_ma-map=#L117" id="L11Lnlikelyq,  627"> 127                u3k-map.c#L323" id="L123" class="lin3" nam32a href="+code=__blk_rqcopy I/O, if possiblemed="L92" cislk-o bigamme="L139"> 139                i3ef="+code3ubuf" class="sref">ubuf<3a> &a32ask" class="sref">gfpmap.c#L83" id="">bio);
  78        if (!EINVAL;
ret164" id="L1826bio" class="sref">bio, unmap_  27
byt3277">  77        BIO_USER_MAP href="+code=bio" class="sref">bio);
NULL;
buffer = NULL;
 159        return 0;3 href="+c3de=map_len" class="sref"3map_l32lass="line" name="L160"> 160end3/a> =3((unsi  27
reques0" ik-ma>  78        if (!s3art = (u


Ta hoper http://sourcfLXR posunityock/bllkisaexperissibspaversne" nya78 mailto:lxL@lasux.no+coxL@lasux.noock/.
oxL.lasux.no kindly hosta hrya78 http://www.redpill-laspro.no+cRedpill Laspro ASock/blprovid="bof Lasuxlinesult