linux/block/blk-merge.c
<<
>>
Prefs
   1/*
   2 * Functions related to segment and merge handling
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
  13                                             struct bio *bio)
  14{
  15        struct bio_vec *bv, *bvprv = NULL;
  16        int cluster, i, high, highprv = 1;
  17        unsigned int seg_size, nr_phys_segs;
  18        struct bio *fbio, *bbio;
  19
  20        if (!bio)
  21                return 0;
  22
  23        fbio = bio;
  24        cluster = blk_queue_cluster(q);
  25        seg_size = 0;
  26        nr_phys_segs = 0;
  27        for_each_bio(bio) {
  28                bio_for_each_segment(bv, bio, i) {
  29                        /*
  30                         * the trick here is making sure that a high page is
  31                         * never considered part of another segment, since that
  32                         * might change with the bounce page.
  33                         */
  34                        high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
  35                        if (high || highprv)
  36                                goto new_segment;
  37                        if (cluster) {
  38                                if (seg_size + bv->bv_len
  39                                    > queue_max_segment_size(q))
  40                                        goto new_segment;
  41                                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
  42                                        goto new_segment;
  43                                if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
  44                                        goto new_segment;
  45
  46                                seg_size += bv->bv_len;
  47                                bvprv = bv;
  48                                continue;
  49                        }
  50new_segment:
  51                        if (nr_phys_segs == 1 && seg_size >
  52                            fbio->bi_seg_front_size)
  53                                fbio->bi_seg_front_size = seg_size;
  54
  55                        nr_phys_segs++;
  56                        bvprv = bv;
  57                        seg_size = bv->bv_len;
  58                        highprv = high;
  59                }
  60                bbio = bio;
  61        }
  62
  63        if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
  64                fbio->bi_seg_front_size = seg_size;
  65        if (seg_size > bbio->bi_seg_back_size)
  66                bbio->bi_seg_back_size = seg_size;
  67
  68        return nr_phys_segs;
  69}
  70
  71void blk_recalc_rq_segments(struct request *rq)
  72{
  73        rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
  74}
  75
  76void blk_recount_segments(struct request_queue *q, struct bio *bio)
  77{
  78        struct bio *nxt = bio->bi_next;
  79
  80        bio->bi_next = NULL;
  81        bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
  82        bio->bi_next = nxt;
  83        bio->bi_flags |= (1 << BIO_SEG_VALID);
  84}
  85EXPORT_SYMBOL(blk_recount_segments);
  86
  87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
  88                                   struct bio *nxt)
  89{
  90        if (!blk_queue_cluster(q))
  91                return 0;
  92
  93        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
  94            queue_max_segment_size(q))
  95                return 0;
  96
  97        if (!bio_has_data(bio))
  98                return 1;
  99
 100        if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
 101                return 0;
 102
 103        /*
 104         * bio and nxt are contiguous in memory; check if the queue allows
 105         * these two to be merged into one
 106         */
 107        if (BIO_SEG_BOUNDARY(q, bio, nxt))
 108                return 1;
 109
 110        return 0;
 111}
 112
 113static void
 114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
 115                     struct scatterlist *sglist, struct bio_vec **bvprv,
 116                     struct scatterlist **sg, int *nsegs, int *cluster)
 117{
 118
 119        int nbytes = bvec->bv_len;
 120
 121        if (*bvprv && *cluster) {
 122                if ((*sg)->length + nbytes > queue_max_segment_size(q))
 123                        goto new_segment;
 124
 125                if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec))
 126                        goto new_segment;
 127                if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec))
 128                        goto new_segment;
 129
 130                (*sg)->length += nbytes;
 131        } else {
 132new_segment:
 133                if (!*sg)
 134                        *sg = sglist;
 135                else {
 136                        /*
 137                         * If the driver previously mapped a shorter
 138                         * list, we could see a termination bit
 139                         * prematurely unless it fully inits the sg
 140                         * table on each mapping. We KNOW that there
 141                         * must be more entries here or the driver
 142                         * would be buggy, so force clear the
 143                         * termination bit to avoid doing a full
 144                         * sg_init_table() in drivers for each command.
 145                         */
 146                        (*sg)->page_link &= ~0x02;
 147                        *sg = sg_next(*sg);
 148                }
 149
 150                sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
 151                (*nsegs)++;
nsegs)++;
  53    if (*bvprv = bvec>)++;
/*
reqated f">scatter,     retnumbdriof   more entr_ofup. Calldriver
-ef">nr_phys_segmmore entiver

  60tic int request_queue *q, struct request *rq,
scatterlist *sglist)
  63    a>, struct bio_vec *bvec< if (*bvprv;
  64    a>, struct   65        struct scatterlist *sg;
  66        int nsegscluster;
  67
  68    int nsegs = 0;
cluster = blk_queue_cluster(q);
  70
/*
     *vers for e  * in ef">/*
         */
bvprv = NULL;
sg = NULL;
bvec< a> *rq<__blk_segment_map_sg *q(bvec< a> *sglist &>, *bvprv &>, *sg<>) {
nsegs &>, *cluster<;

  82        if (rq->rq) & a> *q->sg) {
q->rq)) +turn 1;
  86
sg/a>)->length += sgrn 1;
  88            a> *rq->sgrn 1;
q->q->rq)) a>) {
rq->q->q->  95            uct sg/a>)->page_link &= ~0x02;
sg = sg_next<    sg<;
sg_set_page<>, *sg< uct q->  98                        uct q->  39                        ((    unsiglong)a> *q-> 101            >, *nsegs/a>)++;
rq->q-> 103    }n 1;
sg)
sg<;
nsegs;
EXPORT_SYMBOL(/*
ent_ma<- achoin  * bit  ef">scatterli/*
request_qmin ">reqionli/*

sg: ef">scatter bed dosly mali/*


-ef">bi_phys_segmmore entiver



 123tic int ent_map_sg" class="srode=ef">ent_maMBOL(struct request_queue *q, struct bio *bio,
scatterlist *sglist)
 125a>) {
 126    a>, struct bio_vec *bvec< if (*bvprv;
 127        struct scatterlist *sg;
 128        int nsegscluster;
 131    int nsegs = 0;
cluster = blk_queue_cluster(q);
 133a>);
bvprv = NULL;
sg = NULL;
bio_for_each_segment" class="sref">bio_for_each_segMBOL(bvec< a> *bio, __blk_segment_map_sg *q(bvec< a> *sglist &>, *bvprv &>, *sg<>) {
nsegs &>, *cluster<;

sg)
sg<;
bio->bi_phys_segments &/a> & a> *nsegs > bio->bi_phys_segments<;
nsegs;
 146}
 147EXPORT_SYMBOL(ent_map_sg" class="srode=ef">ent_maMBOL<;
 149staa>, he=new_segment" class="srll_ef">he=new_segMBOL(struct request_queue *q<
 150                                a>, struct request *q<
 151                                a>, struct bio *bio)
nsegs)++2
  72{
  53        int nr_phys_segs = bi_phys_segments(q, bio);
q->nr_phys_segments + nr_phys_segs > q<)
elk-msegs" class="sre">elk-msegs;
(bio) &/a> & a> *q, q, bio<)
elk-msegs" class="sre">elk-msegs;
/*
     *vThis will formnits ttartiof a ef" hw hys_seg.  Bump boleng/*
       k_recersmand.
         */
q->nr_phys_segments += nr_phys_segs;
  66        return 1;
  67
  68oto elk-msegs" class="sre">elk-msegs:
q->q =a> += q->elk-msegs" class="srq" t>elk-m>bio)
q->elk-msegs" class="srq" t>elk-m>bio = NULL;
request_queue *q< a>, struct request *q<
bio *bio)
q) + (bio) >
q)) a>) {
q->q =a> += q->elk-msegs" class="srq" t>elk-m>bio)
q->elk-msegs" class="srq" t>elk-m>bio = NULL;
q->q, BIO_VALIC_END" class="sref">BIO_VALIC>bio<)
q, q->q);
bio, BIO_VALIC_END" class="sref">BIO_VALIC>bio<)
  88            a> *q, bio);
he=new_segment" class="srll_ef">he=new_segMBOL(q, q, bio);
request_queue *q< a>, struct request *q<
bio *bio)
q) + (bio) >
q)) a>) {
  98                q->  39                int q =a> += q->elk-msegs" class="srq" t>elk-m>bio)
q->elk-msegs" class="srq" t>elk-m>bio = NULL;
 101                ret> = 0;
 103        if (!bio, BIO_VALIC_END" class="sref">BIO_VALIC>bio<)
q, bio);
q->bio, BIO_VALIC_END" class="sref">BIO_VALIC>bio<)
q, q->bio);
he=new_segment" class="srll_ef">he=new_segMBOL(q, q, bio);
reqs_fv_len" class="srll_elk-m_ef">reqs_fvMBOL(struct request_queue *q< a>, struct request *q<
request *q->q->bio->/*
reqs ars ef-est_qali/*
reqs.  Can't blk-m itsm    itsy arsmand.

q->/*
/*

q) + q))>
q->nr_phys_segments + nr_phys_segments = 0;
nr_pcontigeach_segment" class="srede=>nr_pcontigeach_segents(q, q->q, bio)) a>) {
q->nr_phys_segments<== 1)>
q->bio->nr_phys_segments<== 1)>
q->q<)
ode=rq" class="srede=   egrity>odents(q) &/a> & a> *q, q, 
q->nr_phys_segments<= int  150}n 1;
nsegs)++3
 142/*
elk-m<- acrk a ef">req as mixed elk-m>b/*
req bitacrk as mixed elk-m>b/*
b/*

    @rq is abouq bitbe mixed elk-md.  Makeking sits attributntiver



elk-mytes" class="srode=bq_set_mixed>elk-mMBOL(struct request *rq)pan>
rq->bio *bio;
  66        if (rq->rq)pan>
/*
/*

     *vDistributnt its attributs bitfor_   *mand.
         */
bio = rq->biobiobio = bio->q)<{
bio->bio->bio->rq->rq/a>);
elk-msegs" class="srede=ack_recpi">elk-mMBOL(struct request *q)
segs" class="srede=dopi">ents(q))<{
(ste=rq" class="srhd_/a>(stuest *rq/a>);
rq/a>);
  88            a> *rq = );
rq = q->rq/a>);
r_red>flags" class="srpart>r_red>fents(rq, rq);
dec_ine=bighte=rq" class="srpart>dec_ine=bightents(rq, q))/a>);
(st_pute=rq" class="srhd_/a>(st_putents(rq)/a>);
_unf="be=rq" class="srpart>_unf="bents<);
 139/*
req spinf="b acquirediver

scmpt>elk-msegs" class="sr">scmpt>elk-mMBOL(struct request_queue *q< a>, struct request *q<
request *q)<||aif (!q->/*
     *vnot contiguoutiver
         */
q) + q) = q) = q->q->(q->bio, bio))/a>{
/*

sg_ bitrq anigrelease >sg_. elk-m_of">reqs_fvMB/*



reqs_fv_len" class="srll_elk-m_ef">reqs_fvMBOL *q< a> *q, /*
/*
/*
/*


q->rq ||pan>
q->elk-mytes" class="srode=bq_set_mixed>elk-mMBOL(q) = 0;
elk-mytes" class="srode=bq_set_mixed>elk-mMBOL(/*
b/*
sed bts smalldr start>timeiof>b/*
reqs bitbe bts currseg ef">reque/*
nsegs)++4
 142
q->timesize" class="srstart>time>bio<)a>);
q->timesize" class="srstart>time>bio = 0;
q->q->q = bio;
q->q = q;
q->reqsnext" class="srelv_elk-m_ef">reqsMBOL *q< a> *q, /*
sg_' is going away, so update s/a>f ack_rdingly">/*
 106elk-msegs" class="srede=ack_recpi">elk-mMBOL, q->q = q->q, q< = 0;
q->rq = rq/a>);
sg_ bitreqn*   */
bio = NULL;
req_len" class="sr__ode=put_ef">reqMBOL *q< a> *scmpt>seg_belk-msegs" class="sr">scmpt>seg_belk-mMBOL(struct request_queue *q< a>, struct request *rq)pan>
request *scr_ef">req_len" class="srelv_l">scr_ef">reqMBOL *q< a> *rq)rn 1;
scmpt>elk-msegs" class="sr">scmpt>elk-mMBOL *q< a> *rq< a> * 118
scmpt>frontbelk-msegs" class="sr">scmpt>frontbelk-mMBOL(struct request_queue *q< a>, struct request *rq)pan>
request *req_len" class="srelv_formcr_ef">reqMBOL *q< a> *rq)rn 1;
scmpt>elk-msegs" class="sr">scmpt>elk-mMBOL *q< a> *rq)rn 1;
elk-msegs" class="srede=a>scmpt>ef">elk-mMBOL(struct request_queue *q< a>, struct request *rq<
request *scmpt>elk-msegs" class="sr">scmpt>elk-mMBOL *q< a> *rq< a> *request *rq<     struct bio *bio)
rq)<||aif (!elk-mablsster" class="sroi">elk-mablsents(bio))/a>{
rq->bio->
data_diuster" class="srei">data_diuents(bio) = rq))/a>{
req *   */
rq->bio->rq->
   egrityster" class="srei">   egrityents(bio) = rq))/a>{

rq->(rq->bio< a> *bio))/a>{
elk-msegs" class="srede=try>elk-mMBOL(struct request *rq<     struct bio *bio)
rq) + rq)<=/a> = bio->rq)<-a> = bio)<=/a> = bio->
The original LXR softwars by bts  1;
LXR ss="unityents, bhis experi"comal version by  1;
lxu@ss=ux.noents.
lxu.ss=ux.no kindly hosqsigey 1; Redpill Ls=pro ASents, provider of Ls=ux consulcing anigoperaqions services since 1995.