1
2
3
4
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include <trace/events/block.h>
12
13#include "blk.h"
14#include "blk-rq-qos.h"
15
16static inline bool bio_will_gap(struct request_queue *q,
17 struct request *prev_rq, struct bio *prev, struct bio *next)
18{
19 struct bio_vec pb, nb;
20
21 if (!bio_has_data(prev) || !queue_virt_boundary(q))
22 return false;
23
24
25
26
27
28
29 if (prev_rq)
30 bio_get_first_bvec(prev_rq->bio, &pb);
31 else
32 bio_get_first_bvec(prev, &pb);
33 if (pb.bv_offset & queue_virt_boundary(q))
34 return true;
35
36
37
38
39
40
41
42
43
44
45 bio_get_last_bvec(prev, &pb);
46 bio_get_first_bvec(next, &nb);
47 if (biovec_phys_mergeable(q, &pb, &nb))
48 return false;
49 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
50}
51
52static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
53{
54 return bio_will_gap(req->q, req, req->biotail, bio);
55}
56
57static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
58{
59 return bio_will_gap(req->q, NULL, bio, req->bio);
60}
61
62static struct bio *blk_bio_discard_split(struct request_queue *q,
63 struct bio *bio,
64 struct bio_set *bs,
65 unsigned *nsegs)
66{
67 unsigned int max_discard_sectors, granularity;
68 int alignment;
69 sector_t tmp;
70 unsigned split_sectors;
71
72 *nsegs = 1;
73
74
75 granularity = max(q->limits.discard_granularity >> 9, 1U);
76
77 max_discard_sectors = min(q->limits.max_discard_sectors,
78 bio_allowed_max_sectors(q));
79 max_discard_sectors -= max_discard_sectors % granularity;
80
81 if (unlikely(!max_discard_sectors)) {
82
83 return NULL;
84 }
85
86 if (bio_sectors(bio) <= max_discard_sectors)
87 return NULL;
88
89 split_sectors = max_discard_sectors;
90
91
92
93
94
95 alignment = (q->limits.discard_alignment >> 9) % granularity;
96
97 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
98 tmp = sector_div(tmp, granularity);
99
100 if (split_sectors > tmp)
101 split_sectors -= tmp;
102
103 return bio_split(bio, split_sectors, GFP_NOIO, bs);
104}
105
106static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
107 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
108{
109 *nsegs = 0;
110
111 if (!q->limits.max_write_zeroes_sectors)
112 return NULL;
113
114 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
115 return NULL;
116
117 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
118}
119
120static struct bio *blk_bio_write_same_split(struct request_queue *q,
121 struct bio *bio,
122 struct bio_set *bs,
123 unsigned *nsegs)
124{
125 *nsegs = 1;
126
127 if (!q->limits.max_write_same_sectors)
128 return NULL;
129
130 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
131 return NULL;
132
133 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
134}
135
136
137
138
139
140
141
142
143
144static inline unsigned get_max_io_size(struct request_queue *q,
145 struct bio *bio)
146{
147 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
148 unsigned max_sectors = sectors;
149 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
150 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
151 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
152
153 max_sectors += start_offset;
154 max_sectors &= ~(pbs - 1);
155 if (max_sectors > start_offset)
156 return max_sectors - start_offset;
157
158 return sectors & ~(lbs - 1);
159}
160
161static inline unsigned get_max_segment_size(const struct request_queue *q,
162 struct page *start_page,
163 unsigned long offset)
164{
165 unsigned long mask = queue_segment_boundary(q);
166
167 offset = mask & (page_to_phys(start_page) + offset);
168
169
170
171
172
173 return min_not_zero(mask - offset + 1,
174 (unsigned long)queue_max_segment_size(q));
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197static bool bvec_split_segs(const struct request_queue *q,
198 const struct bio_vec *bv, unsigned *nsegs,
199 unsigned *sectors, unsigned max_segs,
200 unsigned max_sectors)
201{
202 unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
203 unsigned len = min(bv->bv_len, max_len);
204 unsigned total_len = 0;
205 unsigned seg_size = 0;
206
207 while (len && *nsegs < max_segs) {
208 seg_size = get_max_segment_size(q, bv->bv_page,
209 bv->bv_offset + total_len);
210 seg_size = min(seg_size, len);
211
212 (*nsegs)++;
213 total_len += seg_size;
214 len -= seg_size;
215
216 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
217 break;
218 }
219
220 *sectors += total_len >> 9;
221
222
223 return len > 0 || bv->bv_len > max_len;
224}
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245static struct bio *blk_bio_segment_split(struct request_queue *q,
246 struct bio *bio,
247 struct bio_set *bs,
248 unsigned *segs)
249{
250 struct bio_vec bv, bvprv, *bvprvp = NULL;
251 struct bvec_iter iter;
252 unsigned nsegs = 0, sectors = 0;
253 const unsigned max_sectors = get_max_io_size(q, bio);
254 const unsigned max_segs = queue_max_segments(q);
255
256 bio_for_each_bvec(bv, bio, iter) {
257
258
259
260
261 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
262 goto split;
263
264 if (nsegs < max_segs &&
265 sectors + (bv.bv_len >> 9) <= max_sectors &&
266 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
267 nsegs++;
268 sectors += bv.bv_len >> 9;
269 } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
270 max_sectors)) {
271 goto split;
272 }
273
274 bvprv = bv;
275 bvprvp = &bvprv;
276 }
277
278 *segs = nsegs;
279 return NULL;
280split:
281 *segs = nsegs;
282
283
284
285
286
287
288 bio->bi_opf &= ~REQ_HIPRI;
289
290 return bio_split(bio, sectors, GFP_NOIO, bs);
291}
292
293
294
295
296
297
298
299
300
301
302
303
304
305void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
306{
307 struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
308 struct bio *split = NULL;
309
310 switch (bio_op(*bio)) {
311 case REQ_OP_DISCARD:
312 case REQ_OP_SECURE_ERASE:
313 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
314 break;
315 case REQ_OP_WRITE_ZEROES:
316 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
317 nr_segs);
318 break;
319 case REQ_OP_WRITE_SAME:
320 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
321 nr_segs);
322 break;
323 default:
324
325
326
327
328
329
330
331
332 if (!q->limits.chunk_sectors &&
333 (*bio)->bi_vcnt == 1 &&
334 ((*bio)->bi_io_vec[0].bv_len +
335 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
336 *nr_segs = 1;
337 break;
338 }
339 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
340 break;
341 }
342
343 if (split) {
344
345 split->bi_opf |= REQ_NOMERGE;
346
347 bio_chain(split, *bio);
348 trace_block_split(split, (*bio)->bi_iter.bi_sector);
349 submit_bio_noacct(*bio);
350 *bio = split;
351
352 blk_throtl_charge_bio_split(*bio);
353 }
354}
355
356
357
358
359
360
361
362
363
364
365
366void blk_queue_split(struct bio **bio)
367{
368 unsigned int nr_segs;
369
370 __blk_queue_split(bio, &nr_segs);
371}
372EXPORT_SYMBOL(blk_queue_split);
373
374unsigned int blk_recalc_rq_segments(struct request *rq)
375{
376 unsigned int nr_phys_segs = 0;
377 unsigned int nr_sectors = 0;
378 struct req_iterator iter;
379 struct bio_vec bv;
380
381 if (!rq->bio)
382 return 0;
383
384 switch (bio_op(rq->bio)) {
385 case REQ_OP_DISCARD:
386 case REQ_OP_SECURE_ERASE:
387 if (queue_max_discard_segments(rq->q) > 1) {
388 struct bio *bio = rq->bio;
389
390 for_each_bio(bio)
391 nr_phys_segs++;
392 return nr_phys_segs;
393 }
394 return 1;
395 case REQ_OP_WRITE_ZEROES:
396 return 0;
397 case REQ_OP_WRITE_SAME:
398 return 1;
399 }
400
401 rq_for_each_bvec(bv, rq, iter)
402 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
403 UINT_MAX, UINT_MAX);
404 return nr_phys_segs;
405}
406
407static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
408 struct scatterlist *sglist)
409{
410 if (!*sg)
411 return sglist;
412
413
414
415
416
417
418
419
420 sg_unmark_end(*sg);
421 return sg_next(*sg);
422}
423
424static unsigned blk_bvec_map_sg(struct request_queue *q,
425 struct bio_vec *bvec, struct scatterlist *sglist,
426 struct scatterlist **sg)
427{
428 unsigned nbytes = bvec->bv_len;
429 unsigned nsegs = 0, total = 0;
430
431 while (nbytes > 0) {
432 unsigned offset = bvec->bv_offset + total;
433 unsigned len = min(get_max_segment_size(q, bvec->bv_page,
434 offset), nbytes);
435 struct page *page = bvec->bv_page;
436
437
438
439
440
441
442
443
444
445 page += (offset >> PAGE_SHIFT);
446 offset &= ~PAGE_MASK;
447
448 *sg = blk_next_sg(sg, sglist);
449 sg_set_page(*sg, page, len, offset);
450
451 total += len;
452 nbytes -= len;
453 nsegs++;
454 }
455
456 return nsegs;
457}
458
459static inline int __blk_bvec_map_sg(struct bio_vec bv,
460 struct scatterlist *sglist, struct scatterlist **sg)
461{
462 *sg = blk_next_sg(sg, sglist);
463 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
464 return 1;
465}
466
467
468static inline bool
469__blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
470 struct bio_vec *bvprv, struct scatterlist **sg)
471{
472
473 int nbytes = bvec->bv_len;
474
475 if (!*sg)
476 return false;
477
478 if ((*sg)->length + nbytes > queue_max_segment_size(q))
479 return false;
480
481 if (!biovec_phys_mergeable(q, bvprv, bvec))
482 return false;
483
484 (*sg)->length += nbytes;
485
486 return true;
487}
488
489static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
490 struct scatterlist *sglist,
491 struct scatterlist **sg)
492{
493 struct bio_vec bvec, bvprv = { NULL };
494 struct bvec_iter iter;
495 int nsegs = 0;
496 bool new_bio = false;
497
498 for_each_bio(bio) {
499 bio_for_each_bvec(bvec, bio, iter) {
500
501
502
503
504
505 if (new_bio &&
506 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
507 goto next_bvec;
508
509 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
510 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
511 else
512 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
513 next_bvec:
514 new_bio = false;
515 }
516 if (likely(bio->bi_iter.bi_size)) {
517 bvprv = bvec;
518 new_bio = true;
519 }
520 }
521
522 return nsegs;
523}
524
525
526
527
528
529int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
530 struct scatterlist *sglist, struct scatterlist **last_sg)
531{
532 int nsegs = 0;
533
534 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
535 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
536 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
537 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
538 else if (rq->bio)
539 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
540
541 if (*last_sg)
542 sg_mark_end(*last_sg);
543
544
545
546
547
548 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
549
550 return nsegs;
551}
552EXPORT_SYMBOL(__blk_rq_map_sg);
553
554static inline unsigned int blk_rq_get_max_segments(struct request *rq)
555{
556 if (req_op(rq) == REQ_OP_DISCARD)
557 return queue_max_discard_segments(rq->q);
558 return queue_max_segments(rq->q);
559}
560
561static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
562 unsigned int nr_phys_segs)
563{
564 if (blk_integrity_merge_bio(req->q, req, bio) == false)
565 goto no_merge;
566
567
568 if (req_op(req) == REQ_OP_DISCARD)
569 return 1;
570
571 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
572 goto no_merge;
573
574
575
576
577
578 req->nr_phys_segments += nr_phys_segs;
579 return 1;
580
581no_merge:
582 req_set_nomerge(req->q, req);
583 return 0;
584}
585
586int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
587{
588 if (req_gap_back_merge(req, bio))
589 return 0;
590 if (blk_integrity_rq(req) &&
591 integrity_req_gap_back_merge(req, bio))
592 return 0;
593 if (!bio_crypt_ctx_back_mergeable(req, bio))
594 return 0;
595 if (blk_rq_sectors(req) + bio_sectors(bio) >
596 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
597 req_set_nomerge(req->q, req);
598 return 0;
599 }
600
601 return ll_new_hw_segment(req, bio, nr_segs);
602}
603
604static int ll_front_merge_fn(struct request *req, struct bio *bio,
605 unsigned int nr_segs)
606{
607 if (req_gap_front_merge(req, bio))
608 return 0;
609 if (blk_integrity_rq(req) &&
610 integrity_req_gap_front_merge(req, bio))
611 return 0;
612 if (!bio_crypt_ctx_front_mergeable(req, bio))
613 return 0;
614 if (blk_rq_sectors(req) + bio_sectors(bio) >
615 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
616 req_set_nomerge(req->q, req);
617 return 0;
618 }
619
620 return ll_new_hw_segment(req, bio, nr_segs);
621}
622
623static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
624 struct request *next)
625{
626 unsigned short segments = blk_rq_nr_discard_segments(req);
627
628 if (segments >= queue_max_discard_segments(q))
629 goto no_merge;
630 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
631 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
632 goto no_merge;
633
634 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
635 return true;
636no_merge:
637 req_set_nomerge(q, req);
638 return false;
639}
640
641static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
642 struct request *next)
643{
644 int total_phys_segments;
645
646 if (req_gap_back_merge(req, next->bio))
647 return 0;
648
649
650
651
652 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
653 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
654 return 0;
655
656 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
657 if (total_phys_segments > blk_rq_get_max_segments(req))
658 return 0;
659
660 if (blk_integrity_merge_rq(q, req, next) == false)
661 return 0;
662
663 if (!bio_crypt_ctx_merge_rq(req, next))
664 return 0;
665
666
667 req->nr_phys_segments = total_phys_segments;
668 return 1;
669}
670
671
672
673
674
675
676
677
678
679
680void blk_rq_set_mixed_merge(struct request *rq)
681{
682 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
683 struct bio *bio;
684
685 if (rq->rq_flags & RQF_MIXED_MERGE)
686 return;
687
688
689
690
691
692
693 for (bio = rq->bio; bio; bio = bio->bi_next) {
694 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
695 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
696 bio->bi_opf |= ff;
697 }
698 rq->rq_flags |= RQF_MIXED_MERGE;
699}
700
701static void blk_account_io_merge_request(struct request *req)
702{
703 if (blk_do_io_stat(req)) {
704 part_stat_lock();
705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
706 part_stat_unlock();
707 }
708}
709
710static enum elv_merge blk_try_req_merge(struct request *req,
711 struct request *next)
712{
713 if (blk_discard_mergable(req))
714 return ELEVATOR_DISCARD_MERGE;
715 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
716 return ELEVATOR_BACK_MERGE;
717
718 return ELEVATOR_NO_MERGE;
719}
720
721
722
723
724
725static struct request *attempt_merge(struct request_queue *q,
726 struct request *req, struct request *next)
727{
728 if (!rq_mergeable(req) || !rq_mergeable(next))
729 return NULL;
730
731 if (req_op(req) != req_op(next))
732 return NULL;
733
734 if (rq_data_dir(req) != rq_data_dir(next)
735 || req->rq_disk != next->rq_disk)
736 return NULL;
737
738 if (req_op(req) == REQ_OP_WRITE_SAME &&
739 !blk_write_same_mergeable(req->bio, next->bio))
740 return NULL;
741
742
743
744
745
746 if (req->write_hint != next->write_hint)
747 return NULL;
748
749 if (req->ioprio != next->ioprio)
750 return NULL;
751
752
753
754
755
756
757
758
759
760 switch (blk_try_req_merge(req, next)) {
761 case ELEVATOR_DISCARD_MERGE:
762 if (!req_attempt_discard_merge(q, req, next))
763 return NULL;
764 break;
765 case ELEVATOR_BACK_MERGE:
766 if (!ll_merge_requests_fn(q, req, next))
767 return NULL;
768 break;
769 default:
770 return NULL;
771 }
772
773
774
775
776
777
778
779 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
780 (req->cmd_flags & REQ_FAILFAST_MASK) !=
781 (next->cmd_flags & REQ_FAILFAST_MASK)) {
782 blk_rq_set_mixed_merge(req);
783 blk_rq_set_mixed_merge(next);
784 }
785
786
787
788
789
790
791 if (next->start_time_ns < req->start_time_ns)
792 req->start_time_ns = next->start_time_ns;
793
794 req->biotail->bi_next = next->bio;
795 req->biotail = next->biotail;
796
797 req->__data_len += blk_rq_bytes(next);
798
799 if (!blk_discard_mergable(req))
800 elv_merge_requests(q, req, next);
801
802
803
804
805 blk_account_io_merge_request(next);
806
807 trace_block_rq_merge(next);
808
809
810
811
812
813 next->bio = NULL;
814 return next;
815}
816
817static struct request *attempt_back_merge(struct request_queue *q,
818 struct request *rq)
819{
820 struct request *next = elv_latter_request(q, rq);
821
822 if (next)
823 return attempt_merge(q, rq, next);
824
825 return NULL;
826}
827
828static struct request *attempt_front_merge(struct request_queue *q,
829 struct request *rq)
830{
831 struct request *prev = elv_former_request(q, rq);
832
833 if (prev)
834 return attempt_merge(q, prev, rq);
835
836 return NULL;
837}
838
839int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
840 struct request *next)
841{
842 struct request *free;
843
844 free = attempt_merge(q, rq, next);
845 if (free) {
846 blk_put_request(free);
847 return 1;
848 }
849
850 return 0;
851}
852
853bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
854{
855 if (!rq_mergeable(rq) || !bio_mergeable(bio))
856 return false;
857
858 if (req_op(rq) != bio_op(bio))
859 return false;
860
861
862 if (bio_data_dir(bio) != rq_data_dir(rq))
863 return false;
864
865
866 if (rq->rq_disk != bio->bi_bdev->bd_disk)
867 return false;
868
869
870 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
871 return false;
872
873
874 if (!bio_crypt_rq_ctx_compatible(rq, bio))
875 return false;
876
877
878 if (req_op(rq) == REQ_OP_WRITE_SAME &&
879 !blk_write_same_mergeable(rq->bio, bio))
880 return false;
881
882
883
884
885
886 if (rq->write_hint != bio->bi_write_hint)
887 return false;
888
889 if (rq->ioprio != bio_prio(bio))
890 return false;
891
892 return true;
893}
894
895enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
896{
897 if (blk_discard_mergable(rq))
898 return ELEVATOR_DISCARD_MERGE;
899 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
900 return ELEVATOR_BACK_MERGE;
901 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
902 return ELEVATOR_FRONT_MERGE;
903 return ELEVATOR_NO_MERGE;
904}
905
906static void blk_account_io_merge_bio(struct request *req)
907{
908 if (!blk_do_io_stat(req))
909 return;
910
911 part_stat_lock();
912 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
913 part_stat_unlock();
914}
915
916enum bio_merge_status {
917 BIO_MERGE_OK,
918 BIO_MERGE_NONE,
919 BIO_MERGE_FAILED,
920};
921
922static enum bio_merge_status bio_attempt_back_merge(struct request *req,
923 struct bio *bio, unsigned int nr_segs)
924{
925 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
926
927 if (!ll_back_merge_fn(req, bio, nr_segs))
928 return BIO_MERGE_FAILED;
929
930 trace_block_bio_backmerge(bio);
931 rq_qos_merge(req->q, req, bio);
932
933 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
934 blk_rq_set_mixed_merge(req);
935
936 req->biotail->bi_next = bio;
937 req->biotail = bio;
938 req->__data_len += bio->bi_iter.bi_size;
939
940 bio_crypt_free_ctx(bio);
941
942 blk_account_io_merge_bio(req);
943 return BIO_MERGE_OK;
944}
945
946static enum bio_merge_status bio_attempt_front_merge(struct request *req,
947 struct bio *bio, unsigned int nr_segs)
948{
949 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
950
951 if (!ll_front_merge_fn(req, bio, nr_segs))
952 return BIO_MERGE_FAILED;
953
954 trace_block_bio_frontmerge(bio);
955 rq_qos_merge(req->q, req, bio);
956
957 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
958 blk_rq_set_mixed_merge(req);
959
960 bio->bi_next = req->bio;
961 req->bio = bio;
962
963 req->__sector = bio->bi_iter.bi_sector;
964 req->__data_len += bio->bi_iter.bi_size;
965
966 bio_crypt_do_front_merge(req, bio);
967
968 blk_account_io_merge_bio(req);
969 return BIO_MERGE_OK;
970}
971
972static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
973 struct request *req, struct bio *bio)
974{
975 unsigned short segments = blk_rq_nr_discard_segments(req);
976
977 if (segments >= queue_max_discard_segments(q))
978 goto no_merge;
979 if (blk_rq_sectors(req) + bio_sectors(bio) >
980 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
981 goto no_merge;
982
983 rq_qos_merge(q, req, bio);
984
985 req->biotail->bi_next = bio;
986 req->biotail = bio;
987 req->__data_len += bio->bi_iter.bi_size;
988 req->nr_phys_segments = segments + 1;
989
990 blk_account_io_merge_bio(req);
991 return BIO_MERGE_OK;
992no_merge:
993 req_set_nomerge(q, req);
994 return BIO_MERGE_FAILED;
995}
996
997static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
998 struct request *rq,
999 struct bio *bio,
1000 unsigned int nr_segs,
1001 bool sched_allow_merge)
1002{
1003 if (!blk_rq_merge_ok(rq, bio))
1004 return BIO_MERGE_NONE;
1005
1006 switch (blk_try_merge(rq, bio)) {
1007 case ELEVATOR_BACK_MERGE:
1008 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1009 return bio_attempt_back_merge(rq, bio, nr_segs);
1010 break;
1011 case ELEVATOR_FRONT_MERGE:
1012 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1013 return bio_attempt_front_merge(rq, bio, nr_segs);
1014 break;
1015 case ELEVATOR_DISCARD_MERGE:
1016 return bio_attempt_discard_merge(q, rq, bio);
1017 default:
1018 return BIO_MERGE_NONE;
1019 }
1020
1021 return BIO_MERGE_FAILED;
1022}
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1047 unsigned int nr_segs, struct request **same_queue_rq)
1048{
1049 struct blk_plug *plug;
1050 struct request *rq;
1051 struct list_head *plug_list;
1052
1053 plug = blk_mq_plug(q, bio);
1054 if (!plug)
1055 return false;
1056
1057 plug_list = &plug->mq_list;
1058
1059 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1060 if (rq->q == q && same_queue_rq) {
1061
1062
1063
1064
1065
1066 *same_queue_rq = rq;
1067 }
1068
1069 if (rq->q != q)
1070 continue;
1071
1072 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1073 BIO_MERGE_OK)
1074 return true;
1075 }
1076
1077 return false;
1078}
1079
1080
1081
1082
1083
1084bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1085 struct bio *bio, unsigned int nr_segs)
1086{
1087 struct request *rq;
1088 int checked = 8;
1089
1090 list_for_each_entry_reverse(rq, list, queuelist) {
1091 if (!checked--)
1092 break;
1093
1094 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1095 case BIO_MERGE_NONE:
1096 continue;
1097 case BIO_MERGE_OK:
1098 return true;
1099 case BIO_MERGE_FAILED:
1100 return false;
1101 }
1102
1103 }
1104
1105 return false;
1106}
1107EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1108
1109bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1110 unsigned int nr_segs, struct request **merged_request)
1111{
1112 struct request *rq;
1113
1114 switch (elv_merge(q, &rq, bio)) {
1115 case ELEVATOR_BACK_MERGE:
1116 if (!blk_mq_sched_allow_merge(q, rq, bio))
1117 return false;
1118 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1119 return false;
1120 *merged_request = attempt_back_merge(q, rq);
1121 if (!*merged_request)
1122 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1123 return true;
1124 case ELEVATOR_FRONT_MERGE:
1125 if (!blk_mq_sched_allow_merge(q, rq, bio))
1126 return false;
1127 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1128 return false;
1129 *merged_request = attempt_front_merge(q, rq);
1130 if (!*merged_request)
1131 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1132 return true;
1133 case ELEVATOR_DISCARD_MERGE:
1134 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1135 default:
1136 return false;
1137 }
1138}
1139EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1140