linux/block/blk-map.c
<<
lue=lue=luo/spa.18luospa. class="lxr_search">lue=lue=lue=lue= typ Search18 lue=luo/spa.18e=luoinput typ le=8 odiv id/ /1o/a>ospa. class="comment">/*o/spa.18/ /2o/a>ospa. class="comment"> * Func v3.s related to mapping data to requestso/spa.18/ /3o/a>ospa. class="comment"> */o/spa.18/ /4o/a>#include <linux/kernel.ho/a>>8/ /5o/a>#include <linux/module.ho/a>>8/ /6o/a>#include <linux/bio.ho/a>>8/ /7o/a>#include <linux/blkdev.ho/a>>8/ /8o/a>#include <scsi/sg.ho/a>> ospa. class="comment">/* for struct sg_iovec */o/spa.18/ /9o/a>l/ >a>#include "blk.ho/a>"l/ 11o/a>l/ 12o/a>int/oa href="+code=blk_rq_append_bio" class="sref">blk_rq_append_bioo/a>(struct oa href="+code=request_queue" class="sref">request_queueo/a> *oa href="+code=q" class="sref">qo/a>, struct oa href="+code=request" class="sref">requesto/a> *oa href="+code=rq" class="sref">rqo/a>,l/ 13o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>)l/ 14o/a>{l/ 15o/a> if (!oa href="+code=rq" class="sref">rqo/a>->oa href="+code=bio" class="sref">bioo/a>)l/ 16o/a> oa href="+code=blk_rq_bio_prep" class="sref">blk_rq_bio_prepo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=bio" class="sref">bioo/a>);l/ 17o/a> else if (!oa href="+code=ll_back_merge_fn" class="sref">ll_back_merge_fno/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=bio" class="sref">bioo/a>))l/ 18o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/ 19o/a> else {l/ 20o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=biotail" class="sref">biotailo/a>->oa href="+code=bi_next" class="sref">bi_nexto/a> = oa href="+code=bio" class="sref">bioo/a>;l/ 21o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=biotail" class="sref">biotailo/a> = oa href="+code=bio" class="sref">bioo/a>;l/ 22o/a>l/ 23o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=__data_len" class="sref">__data_leno/a> += oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_size" class="sref">bi_sizeo/a>;l/ 24o/a> }l/ 25o/a> return 0;l/ 26o/a>}l/ 27o/a>l/ 28o/a>static int/oa href="+code=__blk_rq_unmap_user" class="sref">__blk_rq_unmap_usero/a>(struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>)l/ 29o/a>{l/ 30o/a> int/oa href="+code=ret" class="sref">reto/a> = 0;l/ 31o/a>l/ 32o/a> if (oa href="+code=bio" class="sref">bioo/a>) {l/ 33o/a> if (oa href="+code=bio_flagged" class="sref">bio_flaggedo/a>(oa href="+code=bio" class="sref">bioo/a>, oa href="+code=BIO_USER_MAPPED" class="sref">BIO_USER_MAPPEDo/a>))l/ 34o/a> oa href="+code=bio_unmap_user" class="sref">bio_unmap_usero/a>(oa href="+code=bio" class="sref">bioo/a>);l/ 35o/a> elsel/ 36o/a> oa href="+code=ret" class="sref">reto/a> = oa href="+code=bio_uncopy_user" class="sref">bio_uncopy_usero/a>(oa href="+code=bio" class="sref">bioo/a>);l/ 37o/a> }l/ 38o/a>l/ 39o/a> return oa href="+code=ret" class="sref">reto/a>;l/ 40o/a>}l/ 41o/a>l/ 42o/a>static int/oa href="+code=__blk_rq_map_user" class="sref">__blk_rq_map_usero/a>(struct oa href="+code=request_queue" class="sref">request_queueo/a> *oa href="+code=q" class="sref">qo/a>, struct oa href="+code=request" class="sref">requesto/a> *oa href="+code=rq" class="sref">rqo/a>,l/ 43o/a> struct oa href="+code=rq_map_data" class="sref">rq_map_datao/a> *oa href="+code=map_data" class="sref">map_datao/a>, void/oa href="+code=__user" class="sref">__usero/a> *oa href="+code=ubuf" class="sref">ubufo/a>,l/ 44o/a> unsigned int/oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_t" class="sref">gfp_to/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>)l/ 45o/a>{l/ 46o/a> unsigned long oa href="+code=uaddr" class="sref">uaddro/a>;l/ 47o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>, *oa href="+code=orig_bio" class="sref">orig_bioo/a>;l/ 48o/a> int/oa href="+code=reading" class="sref">readingo/a>, oa href="+code=ret" class="sref">reto/a>;l/ 49o/a>l/ 50o/a> oa href="+code=reading" class="sref">readingo/a> = oa href="+code=rq_data_dir" class="sref">rq_data_diro/a>(oa href="+code=rq" class="sref">rqo/a>) == oa href="+code=READ" class="sref">READo/a>;l/ 51o/a>l/ 52o/a> ospa. class="comment">/*o/spa.18/ 53o/a>ospa. class="comment"> * if alignment requirement is satisfied, map in user pages foro/spa.18/ 54o/a>ospa. class="comment"> * direct dma. else, set up kernel bounce bufferso/spa.18/ 55o/a>ospa. class="comment"> */o/spa.18/ 56o/a> oa href="+code=uaddr" class="sref">uaddro/a> = (unsigned long) oa href="+code=ubuf" class="sref">ubufo/a>;l/ 57o/a> if (oa href="+code=blk_rq_aligned" class="sref">blk_rq_alignedo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=uaddr" class="sref">uaddro/a>, oa href="+code=len" class="sref">leno/a>) && !oa href="+code=map_data" class="sref">map_datao/a>)l/ 58o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_map_user" class="sref">bio_map_usero/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=NULL" class="sref">NULLo/a>, oa href="+code=uaddr" class="sref">uaddro/a>, oa href="+code=len" class="sref">leno/a>, oa href="+code=reading" class="sref">readingo/a>, oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/ 59o/a> else8/ 60o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_copy_user" class="sref">bio_copy_usero/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=map_data" class="sref">map_datao/a>, oa href="+code=uaddr" class="sref">uaddro/a>, oa href="+code=len" class="sref">leno/a>, oa href="+code=reading" class="sref">readingo/a>, oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/ 61o/a>l/ 62o/a> if (oa href="+code=IS_ERR" class="sref">IS_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>))l/ 63o/a> return oa href="+code=PTR_ERR" class="sref">PTR_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>);l/ 64o/a>l/ 65o/a> if (oa href="+code=map_data" class="sref">map_datao/a> && oa href="+code=map_data" class="sref">map_datao/a>->oa href="+code=null_mapped" class="sref">null_mappedo/a>)l/ 66o/a> oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_flags" class="sref">bi_flagso/a> |= (1 << oa href="+code=BIO_NULL_MAPPED" class="sref">BIO_NULL_MAPPEDo/a>);l/ 67o/a>l/ 68o/a> oa href="+code=orig_bio" class="sref">orig_bioo/a> = oa href="+code=bio" class="sref">bioo/a>;l/ 69o/a> oa href="+code=blk_queue_bounce" class="sref">blk_queue_bounceo/a>(oa href="+code=q" class="sref">qo/a>, &oa href="+code=bio" class="sref">bioo/a>);l/ 70o/a>l/ 71o/a> ospa. class="comment">/*o/spa.18/ 72o/a>ospa. class="comment"> * We link the bounce buffer in and could have to traverse ito/spa.18/ 73o/a>ospa. class="comment"> * later so we have to get a ref to prevent it from being freedo/spa.18/ 74o/a>ospa. class="comment"> */o/spa.18/ 75o/a> oa href="+code=bio_get" class="sref">bio_geto/a>(oa href="+code=bio" class="sref">bioo/a>);l/ 76o/a>l/ 77o/a> oa href="+code=ret" class="sref">reto/a> = oa href="+code=blk_rq_append_bio" class="sref">blk_rq_append_bioo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=bio" class="sref">bioo/a>);l/ 78o/a> if (!oa href="+code=ret" class="sref">reto/a>)l/ 79o/a> return oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_size" class="sref">bi_sizeo/a>;l/ 80o/a>l/ 81o/a> ospa. class="comment">/* if it was boucned we must call the end io func v3. */o/spa.18/ 82o/a> oa href="+code=bio_endio" class="sref">bio_endioo/a>(oa href="+code=bio" class="sref">bioo/a>, 0);l/ 83o/a> oa href="+code=__blk_rq_unmap_user" class="sref">__blk_rq_unmap_usero/a>(oa href="+code=orig_bio" class="sref">orig_bioo/a>);l/ 84o/a> oa href="+code=bio_put" class="sref">bio_puto/a>(oa href="+code=bio" class="sref">bioo/a>);l/ 85o/a> return oa href="+code=ret" class="sref">reto/a>;l/ 86o/a>}l/ 87o/a>l/ 88o/a>ospa. class="comment">/**o/spa.18/ 89o/a>ospa. class="comment"> * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usageo/spa.18/ 90o/a>ospa. class="comment"> * @q: request queue where request should be insertedo/spa.18/ 91o/a>ospa. class="comment"> * @rq: request structure to fillo/spa.18/ 92o/a>ospa. class="comment"> * @map_data: pointer to the rq_map_data holding pages (if necessary)o/spa.18/ 93o/a>ospa. class="comment"> * @ubuf: the user buffero/spa.18/ 94o/a>ospa. class="comment"> * @len: length of user datao/spa.18/ 95o/a>ospa. class="comment"> * @gfp_mask: memory alloca v3. flagso/spa.18/ 96o/a>ospa. class="comment"> *o/spa.18/ 97o/a>ospa. class="comment"> * Descri v3.:o/spa.18/ 98o/a>ospa. class="comment"> * Data will be mapped directly for zero copy I/O, if possible. Otherwiseo/spa.18/ 99o/a>ospa. class="comment"> * a kernel bounce buffer is used.o/spa.18/100o/a>ospa. class="comment"> *o/spa.18/101o/a>ospa. class="comment"> * A matching blk_rq_unmap_user() must be issued at the end of I/O, whileo/spa.18/102o/a>ospa. class="comment"> * still in process context.o/spa.18/103o/a>ospa. class="comment"> *o/spa.18/104o/a>ospa. class="comment"> * Note: The mapped bio may need to be bounced through blk_queue_bounce()o/spa.18/105o/a>ospa. class="comment"> * before being submitted to the device, as pages mapped may be out ofo/spa.18/106o/a>ospa. class="comment"> * reach. It's the callers responsibility to make sure this happens. Theo/spa.18/107o/a>ospa. class="comment"> * original bio must be passed back in to blk_rq_unmap_user() for propero/spa.18/108o/a>ospa. class="comment"> * unmapping.o/spa.18/109o/a>ospa. class="comment"> */o/spa.18/1 >a>int/oa href="+code=blk_rq_map_user" class="sref">blk_rq_map_usero/a>(struct oa href="+code=request_queue" class="sref">request_queueo/a> *oa href="+code=q" class="sref">qo/a>, struct oa href="+code=request" class="sref">requesto/a> *oa href="+code=rq" class="sref">rqo/a>,l/111o/a> struct oa href="+code=rq_map_data" class="sref">rq_map_datao/a> *oa href="+code=map_data" class="sref">map_datao/a>, void/oa href="+code=__user" class="sref">__usero/a> *oa href="+code=ubuf" class="sref">ubufo/a>,l/112o/a> unsigned long oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_t" class="sref">gfp_to/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>)l/113o/a>{l/114o/a> unsigned long oa href="+code=bytes_read" class="sref">bytes_reado/a> = 0;l/115o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a> = oa href="+code=NULL" class="sref">NULLo/a>;l/116o/a> int/oa href="+code=ret" class="sref">reto/a>;l/117o/a>l/118o/a> if (oa href="+code=len" class="sref">leno/a> > (oa href="+code=queue_max_hw_sectors" class="sref">queue_max_hw_sectorso/a>(oa href="+code=q" class="sref">qo/a>) << 9))l/119o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/120o/a> if (!oa href="+code=len" class="sref">leno/a>)l/121o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/122o/a>l/123o/a> if (!oa href="+code=ubuf" class="sref">ubufo/a> && (!oa href="+code=map_data" class="sref">map_datao/a> || !oa href="+code=map_data" class="sref">map_datao/a>->oa href="+code=null_mapped" class="sref">null_mappedo/a>))l/124o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/125o/a>l/126o/a> while (oa href="+code=bytes_read" class="sref">bytes_reado/a> != oa href="+code=len" class="sref">leno/a>) {l/127o/a> unsigned long oa href="+code=map_len" class="sref">map_leno/a>, oa href="+code=end" class="sref">endo/a>, oa href="+code=start" class="sref">starto/a>;l/128o/a>l/129o/a> oa href="+code=map_len" class="sref">map_leno/a> = oa href="+code=min_t" class="sref">min_to/a>(unsigned long, oa href="+code=len" class="sref">leno/a> - oa href="+code=bytes_read" class="sref">bytes_reado/a>, oa href="+code=BIO_MAX_SIZE" class="sref">BIO_MAX_SIZEo/a>);l/130o/a> oa href="+code=end" class="sref">endo/a> = ((unsigned long)oa href="+code=ubuf" class="sref">ubufo/a> + oa href="+code=map_len" class="sref">map_leno/a> + oa href="+code=PAGE_SIZE" class="sref">PAGE_SIZEo/a> - 1)l/131o/a> >> oa href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFTo/a>;l/132o/a> oa href="+code=start" class="sref">starto/a> = (unsigned long)oa href="+code=ubuf" class="sref">ubufo/a> >> oa href="+code=PAGE_SHIFT" class="sref">PAGE_SHIFTo/a>;l/133o/a>l/134o/a> ospa. class="comment">/*o/spa.18/135o/a>ospa. class="comment"> * A bad offset could cause us to require BIO_MAX_PAGES + 1o/spa.18/136o/a>ospa. class="comment"> * pages. If this happens we just lower the requestedo/spa.18/137o/a>ospa. class="comment"> * mapping len by a page so that we ca. fito/spa.18/138o/a>ospa. class="comment"> */o/spa.18/139o/a> if (oa href="+code=end" class="sref">endo/a> - oa href="+code=start" class="sref">starto/a> > oa href="+code=BIO_MAX_PAGES" class="sref">BIO_MAX_PAGESo/a>)l/140o/a> oa href="+code=map_len" class="sref">map_leno/a> -= oa href="+code=PAGE_SIZE" class="sref">PAGE_SIZEo/a>;l/141o/a>l/142o/a> oa href="+code=ret" class="sref">reto/a> = oa href="+code=__blk_rq_map_user" class="sref">__blk_rq_map_usero/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=map_data" class="sref">map_datao/a>, oa href="+code=ubuf" class="sref">ubufo/a>, oa href="+code=map_len" class="sref">map_leno/a>,l/143o/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/144o/a> if (oa href="+code=ret" class="sref">reto/a> < 0)l/145o/a> goto oa href="+code=unmap_rq" class="sref">unmap_rqo/a>;l/146o/a> if (!oa href="+code=bio" class="sref">bioo/a>)l/147o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=rq" class="sref">rqo/a>->oa href="+code=bio" class="sref">bioo/a>;l/148o/a> oa href="+code=bytes_read" class="sref">bytes_reado/a> += oa href="+code=ret" class="sref">reto/a>;l/149o/a> oa href="+code=ubuf" class="sref">ubufo/a> += oa href="+code=ret" class="sref">reto/a>;l/150o/a>l/151o/a> if (oa href="+code=map_data" class="sref">map_datao/a>)l/152o/a> oa href="+code=map_data" class="sref">map_datao/a>->oa href="+code=offset" class="sref">offseto/a> += oa href="+code=ret" class="sref">reto/a>;l/153o/a> }l/154o/a>l/155o/a> if (!oa href="+code=bio_flagged" class="sref">bio_flaggedo/a>(oa href="+code=bio" class="sref">bioo/a>, oa href="+code=BIO_USER_MAPPED" class="sref">BIO_USER_MAPPEDo/a>))l/156o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=cmd_flags" class="sref">cmd_flagso/a> |= oa href="+code=REQ_COPY_USER" class="sref">REQ_COPY_USERo/a>;l/157o/a>l/158o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=buffer" class="sref">buffero/a> = oa href="+code=NULL" class="sref">NULLo/a>;l/159o/a> return 0;l/160o/a>oa href="+code=unmap_rq" class="sref">unmap_rqo/a>:l/161o/a> oa href="+code=blk_rq_unmap_user" class="sref">blk_rq_unmap_usero/a>(oa href="+code=bio" class="sref">bioo/a>);l/162o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=bio" class="sref">bioo/a> = oa href="+code=NULL" class="sref">NULLo/a>;l/163o/a> return oa href="+code=ret" class="sref">reto/a>;l/164o/a>}l/165o/a>oa href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOLo/a>(oa href="+code=blk_rq_map_user" class="sref">blk_rq_map_usero/a>);l/166o/a>l/167o/a>ospa. class="comment">/**o/spa.18/168o/a>ospa. class="comment"> * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usageo/spa.18/169o/a>ospa. class="comment"> * @q: request queue where request should be insertedo/spa.18/170o/a>ospa. class="comment"> * @rq: request to map data too/spa.18/171o/a>ospa. class="comment"> * @map_data: pointer to the rq_map_data holding pages (if necessary)o/spa.18/172o/a>ospa. class="comment"> * @iov: pointer to the ioveco/spa.18/173o/a>ospa. class="comment"> * @iov_count: number of elements in the ioveco/spa.18/174o/a>ospa. class="comment"> * @len: I/O byte counto/spa.18/175o/a>ospa. class="comment"> * @gfp_mask: memory alloca v3. flagso/spa.18/176o/a>ospa. class="comment"> *o/spa.18/177o/a>ospa. class="comment"> * Descri v3.:o/spa.18/178o/a>ospa. class="comment"> * Data will be mapped directly for zero copy I/O, if possible. Otherwiseo/spa.18/179o/a>ospa. class="comment"> * a kernel bounce buffer is used.o/spa.18/180o/a>ospa. class="comment"> *o/spa.18/181o/a>ospa. class="comment"> * A matching blk_rq_unmap_user() must be issued at the end of I/O, whileo/spa.18/182o/a>ospa. class="comment"> * still in process context.o/spa.18/183o/a>ospa. class="comment"> *o/spa.18/184o/a>ospa. class="comment"> * Note: The mapped bio may need to be bounced through blk_queue_bounce()o/spa.18/185o/a>ospa. class="comment"> * before being submitted to the device, as pages mapped may be out ofo/spa.18/186o/a>ospa. class="comment"> * reach. It's the callers responsibility to make sure this happens. Theo/spa.18/187o/a>ospa. class="comment"> * original bio must be passed back in to blk_rq_unmap_user() for propero/spa.18/188o/a>ospa. class="comment"> * unmapping.o/spa.18/189o/a>ospa. class="comment"> */o/spa.18/19 >a>int/oa href="+code=blk_rq_map_user_iov" class="sref">blk_rq_map_user_iovo/a>(struct oa href="+code=request_queue" class="sref">request_queueo/a> *oa href="+code=q" class="sref">qo/a>, struct oa href="+code=request" class="sref">requesto/a> *oa href="+code=rq" class="sref">rqo/a>,l/191o/a> struct oa href="+code=rq_map_data" class="sref">rq_map_datao/a> *oa href="+code=map_data" class="sref">map_datao/a>, struct oa href="+code=sg_iovec" class="sref">sg_ioveco/a> *oa href="+code=iov" class="sref">iovo/a>,l/192o/a> int/oa href="+code=iov_count" class="sref">iov_counto/a>, unsigned int/oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_t" class="sref">gfp_to/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>)l/193o/a>{l/194o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>;l/195o/a> int/oa href="+code=i" class="sref">io/a>, oa href="+code=read" class="sref">reado/a> = oa href="+code=rq_data_dir" class="sref">rq_data_diro/a>(oa href="+code=rq" class="sref">rqo/a>) == oa href="+code=READ" class="sref">READo/a>;l/196o/a> int/oa href="+code=unaligned" class="sref">unalignedo/a> = 0;l/197o/a>l/198o/a> if (!oa href="+code=iov" class="sref">iovo/a> || oa href="+code=iov_count" class="sref">iov_counto/a> <= 0)l/199o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/200o/a>l/201o/a> for (oa href="+code=i" class="sref">io/a> = 0;/oa href="+code=i" class="sref">io/a> < oa href="+code=iov_count" class="sref">iov_counto/a>;/oa href="+code=i" class="sref">io/a>++) {l/202o/a> unsigned long oa href="+code=uaddr" class="sref">uaddro/a> = (unsigned long)oa href="+code=iov" class="sref">iovo/a>[oa href="+code=i" class="sref">io/a>].oa href="+code=iov_base" class="sref">iov_baseo/a>;l/203o/a>l/204o/a> if (!oa href="+code=iov" class="sref">iovo/a>[oa href="+code=i" class="sref">io/a>].oa href="+code=iov_len" class="sref">iov_leno/a>)l/205o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/206o/a>l/207o/a> ospa. class="comment">/*o/spa.18/208o/a>ospa. class="comment"> * Keep going so we check length of all segmentso/spa.18/209o/a>ospa. class="comment"> */o/spa.18/210o/a> if (oa href="+code=uaddr" class="sref">uaddro/a> & oa href="+code=queue_dma_alignment" class="sref">queue_dma_alignmento/a>(oa href="+code=q" class="sref">qo/a>))l/211o/a> oa href="+code=unaligned" class="sref">unalignedo/a> = 1;l/212o/a> }l/213o/a>l/214o/a> if (oa href="+code=unaligned" class="sref">unalignedo/a> || (oa href="+code=q" class="sref">qo/a>->oa href="+code=dma_pad_mask" class="sref">dma_pad_masko/a> & oa href="+code=len" class="sref">leno/a>) || oa href="+code=map_data" class="sref">map_datao/a>)l/215o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_copy_user_iov" class="sref">bio_copy_user_iovo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=map_data" class="sref">map_datao/a>, oa href="+code=iov" class="sref">iovo/a>,/oa href="+code=iov_count" class="sref">iov_counto/a>, oa href="+code=read" class="sref">reado/a>,l/216o/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/217o/a> else8/218o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_map_user_iov" class="sref">bio_map_user_iovo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=NULL" class="sref">NULLo/a>, oa href="+code=iov" class="sref">iovo/a>,/oa href="+code=iov_count" class="sref">iov_counto/a>, oa href="+code=read" class="sref">reado/a>, oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/219o/a>l/220o/a> if (oa href="+code=IS_ERR" class="sref">IS_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>))l/221o/a> return oa href="+code=PTR_ERR" class="sref">PTR_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>);l/222o/a>l/223o/a> if (oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_size" class="sref">bi_sizeo/a> != oa href="+code=len" class="sref">leno/a>) {l/224o/a> ospa. class="comment">/*o/spa.18/225o/a>ospa. class="comment"> * Grab a. extra reference to this bio, as bio_unmap_user()o/spa.18/226o/a>ospa. class="comment"> * expects to be able to drop it twice as it happens on theo/spa.18/227o/a>ospa. class="comment"> * normal IO comple v3. patho/spa.18/228o/a>ospa. class="comment"> */o/spa.18/229o/a> oa href="+code=bio_get" class="sref">bio_geto/a>(oa href="+code=bio" class="sref">bioo/a>);l/230o/a> oa href="+code=bio_endio" class="sref">bio_endioo/a>(oa href="+code=bio" class="sref">bioo/a>, 0);l/231o/a> oa href="+code=__blk_rq_unmap_user" class="sref">__blk_rq_unmap_usero/a>(oa href="+code=bio" class="sref">bioo/a>);l/232o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/233o/a> }l/234o/a>l/235o/a> if (!oa href="+code=bio_flagged" class="sref">bio_flaggedo/a>(oa href="+code=bio" class="sref">bioo/a>, oa href="+code=BIO_USER_MAPPED" class="sref">BIO_USER_MAPPEDo/a>))l/236o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=cmd_flags" class="sref">cmd_flagso/a> |= oa href="+code=REQ_COPY_USER" class="sref">REQ_COPY_USERo/a>;l/237o/a>l/238o/a> oa href="+code=blk_queue_bounce" class="sref">blk_queue_bounceo/a>(oa href="+code=q" class="sref">qo/a>, &oa href="+code=bio" class="sref">bioo/a>);l/239o/a> oa href="+code=bio_get" class="sref">bio_geto/a>(oa href="+code=bio" class="sref">bioo/a>);l/240o/a> oa href="+code=blk_rq_bio_prep" class="sref">blk_rq_bio_prepo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=bio" class="sref">bioo/a>);l/241o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=buffer" class="sref">buffero/a> = oa href="+code=NULL" class="sref">NULLo/a>;l/242o/a> return 0;l/243o/a>}l/244o/a>oa href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOLo/a>(oa href="+code=blk_rq_map_user_iov" class="sref">blk_rq_map_user_iovo/a>);l/245o/a>l/246o/a>ospa. class="comment">/**o/spa.18/247o/a>ospa. class="comment"> * blk_rq_unmap_user - unmap a request with user datao/spa.18/248o/a>ospa. class="comment"> * @bio: start of bio listo/spa.18/249o/a>ospa. class="comment"> *o/spa.18/250o/a>ospa. class="comment"> * Descri v3.:o/spa.18/251o/a>ospa. class="comment"> * Unmap a rq previously mapped by blk_rq_map_user(). The caller musto/spa.18/252o/a>ospa. class="comment"> * supply the original rq->bio from the blk_rq_map_user() return, sinceo/spa.18/253o/a>ospa. class="comment"> * the I/O comple v3. may have changed rq->bio.o/spa.18/254o/a>ospa. class="comment"> */o/spa.18/255o/a>int/oa href="+code=blk_rq_unmap_user" class="sref">blk_rq_unmap_usero/a>(struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>)l/256o/a>{l/257o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=mapped_bio" class="sref">mapped_bioo/a>;l/258o/a> int/oa href="+code=ret" class="sref">reto/a> = 0, oa href="+code=ret2" class="sref">ret2o/a>;l/259o/a>l/260o/a> while (oa href="+code=bio" class="sref">bioo/a>) {l/261o/a> oa href="+code=mapped_bio" class="sref">mapped_bioo/a> = oa href="+code=bio" class="sref">bioo/a>;l/262o/a> if (oa href="+code=unlikely" class="sref">unlikelyo/a>(oa href="+code=bio_flagged" class="sref">bio_flaggedo/a>(oa href="+code=bio" class="sref">bioo/a>, oa href="+code=BIO_BOUNCED" class="sref">BIO_BOUNCEDo/a>)))l/263o/a> oa href="+code=mapped_bio" class="sref">mapped_bioo/a> = oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_private" class="sref">bi_privateo/a>;l/264o/a>l/265o/a> oa href="+code=ret2" class="sref">ret2o/a> = oa href="+code=__blk_rq_unmap_user" class="sref">__blk_rq_unmap_usero/a>(oa href="+code=mapped_bio" class="sref">mapped_bioo/a>);l/266o/a> if (oa href="+code=ret2" class="sref">ret2o/a> && !oa href="+code=ret" class="sref">reto/a>)l/267o/a> oa href="+code=ret" class="sref">reto/a> = oa href="+code=ret2" class="sref">ret2o/a>;l/268o/a>l/269o/a> oa href="+code=mapped_bio" class="sref">mapped_bioo/a> = oa href="+code=bio" class="sref">bioo/a>;l/270o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_next" class="sref">bi_nexto/a>;l/271o/a> oa href="+code=bio_put" class="sref">bio_puto/a>(oa href="+code=mapped_bio" class="sref">mapped_bioo/a>);l/272o/a> }l/273o/a>l/274o/a> return oa href="+code=ret" class="sref">reto/a>;l/275o/a>}l/276o/a>oa href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOLo/a>(oa href="+code=blk_rq_unmap_user" class="sref">blk_rq_unmap_usero/a>);l/277o/a>l/278o/a>ospa. class="comment">/**o/spa.18/279o/a>ospa. class="comment"> * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usageo/spa.18/280o/a>ospa. class="comment"> * @q: request queue where request should be insertedo/spa.18/281o/a>ospa. class="comment"> * @rq: request to fillo/spa.18/282o/a>ospa. class="comment"> * @kbuf: the kernel buffero/spa.18/283o/a>ospa. class="comment"> * @len: length of user datao/spa.18/284o/a>ospa. class="comment"> * @gfp_mask: memory alloca v3. flagso/spa.18/285o/a>ospa. class="comment"> *o/spa.18/286o/a>ospa. class="comment"> * Descri v3.:o/spa.18/287o/a>ospa. class="comment"> * Data will be mapped directly if possible. Otherwise a bounceo/spa.18/288o/a>ospa. class="comment"> * buffer is used. Ca. be called multple times to append multpleo/spa.18/289o/a>ospa. class="comment"> * buffers.o/spa.18/290o/a>ospa. class="comment"> */o/spa.18/291o/a>int/oa href="+code=blk_rq_map_kern" class="sref">blk_rq_map_kerno/a>(struct oa href="+code=request_queue" class="sref">request_queueo/a> *oa href="+code=q" class="sref">qo/a>, struct oa href="+code=request" class="sref">requesto/a> *oa href="+code=rq" class="sref">rqo/a>, void/*oa href="+code=kbuf" class="sref">kbufo/a>,l/292o/a> unsigned int/oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_t" class="sref">gfp_to/a> oa href="+code=gfp_mask" class="sref">gfp_masko/a>)l/293o/a>{l/294o/a> int/oa href="+code=reading" class="sref">readingo/a> = oa href="+code=rq_data_dir" class="sref">rq_data_diro/a>(oa href="+code=rq" class="sref">rqo/a>) == oa href="+code=READ" class="sref">READo/a>;l/295o/a> unsigned long oa href="+code=addr" class="sref">addro/a> = (unsigned long) oa href="+code=kbuf" class="sref">kbufo/a>;l/296o/a> int/oa href="+code=do_copy" class="sref">do_copyo/a> = 0;l/297o/a> struct oa href="+code=bio" class="sref">bioo/a> *oa href="+code=bio" class="sref">bioo/a>;l/298o/a> int/oa href="+code=ret" class="sref">reto/a>;l/299o/a>l/300o/a> if (oa href="+code=len" class="sref">leno/a> > (oa href="+code=queue_max_hw_sectors" class="sref">queue_max_hw_sectorso/a>(oa href="+code=q" class="sref">qo/a>) << 9))l/301o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/302o/a> if (!oa href="+code=len" class="sref">leno/a> || !oa href="+code=kbuf" class="sref">kbufo/a>)l/303o/a> return -oa href="+code=EINVAL" class="sref">EINVALo/a>;l/304o/a>l/305o/a> oa href="+code=do_copy" class="sref">do_copyo/a> = !oa href="+code=blk_rq_aligned" class="sref">blk_rq_alignedo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=addr" class="sref">addro/a>, oa href="+code=len" class="sref">leno/a>) || oa href="+code=object_is_on_stack" class="sref">object_is_on_stacko/a>(oa href="+code=kbuf" class="sref">kbufo/a>);l/306o/a> if (oa href="+code=do_copy" class="sref">do_copyo/a>)l/307o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_copy_kern" class="sref">bio_copy_kerno/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=kbuf" class="sref">kbufo/a>,/oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_mask" class="sref">gfp_masko/a>, oa href="+code=reading" class="sref">readingo/a>);l/308o/a> else8/309o/a> oa href="+code=bio" class="sref">bioo/a> = oa href="+code=bio_map_kern" class="sref">bio_map_kerno/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=kbuf" class="sref">kbufo/a>,/oa href="+code=len" class="sref">leno/a>, oa href="+code=gfp_mask" class="sref">gfp_masko/a>);l/310o/a>l/311o/a> if (oa href="+code=IS_ERR" class="sref">IS_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>))l/312o/a> return oa href="+code=PTR_ERR" class="sref">PTR_ERRo/a>(oa href="+code=bio" class="sref">bioo/a>);l/313o/a>l/314o/a> if (!oa href="+code=reading" class="sref">readingo/a>)l/315o/a> oa href="+code=bio" class="sref">bioo/a>->oa href="+code=bi_rw" class="sref">bi_rwo/a> |= oa href="+code=REQ_WRITE" class="sref">REQ_WRITEo/a>;l/316o/a>l/317o/a> if (oa href="+code=do_copy" class="sref">do_copyo/a>)l/318o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=cmd_flags" class="sref">cmd_flagso/a> |= oa href="+code=REQ_COPY_USER" class="sref">REQ_COPY_USERo/a>;l/319o/a>l/320o/a> oa href="+code=ret" class="sref">reto/a> = oa href="+code=blk_rq_append_bio" class="sref">blk_rq_append_bioo/a>(oa href="+code=q" class="sref">qo/a>, oa href="+code=rq" class="sref">rqo/a>, oa href="+code=bio" class="sref">bioo/a>);l/321o/a> if (oa href="+code=unlikely" class="sref">unlikelyo/a>(oa href="+code=ret" class="sref">reto/a>)) {l/322o/a> ospa. class="comment">/* request is too big */o/spa.18/323o/a> oa href="+code=bio_put" class="sref">bio_puto/a>(oa href="+code=bio" class="sref">bioo/a>);l/324o/a> return oa href="+code=ret" class="sref">reto/a>;l/325o/a> }l/326o/a>l/327o/a> oa href="+code=blk_queue_bounce" class="sref">blk_queue_bounceo/a>(oa href="+code=q" class="sref">qo/a>, &oa href="+code=rq" class="sref">rqo/a>->oa href="+code=bio" class="sref">bioo/a>);l/328o/a> oa href="+code=rq" class="sref">rqo/a>->oa href="+code=buffer" class="sref">buffero/a> = oa href="+code=NULL" class="sref">NULLo/a>;l/329o/a> return 0;l/330o/a>}l/331o/a>oa href="+code=EXPORT_SYMBOL" class="sref">EXPORT_SYMBOLo/a>(oa href="+code=blk_rq_map_kern" class="sref">blk_rq_map_kerno/a>);l/332o/a>
lxr.linux.no kindly hosted by Redpill Linpro ASo/a>, provider of Linux consulting and opera v3.s services since/1995.