1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h>
29#include <linux/blktrace_api.h>
30#include <linux/fault-inject.h>
31#include <trace/block.h>
32
33#include "blk.h"
34
35DEFINE_TRACE(block_plug);
36DEFINE_TRACE(block_unplug_io);
37DEFINE_TRACE(block_unplug_timer);
38DEFINE_TRACE(block_getrq);
39DEFINE_TRACE(block_sleeprq);
40DEFINE_TRACE(block_rq_requeue);
41DEFINE_TRACE(block_bio_backmerge);
42DEFINE_TRACE(block_bio_frontmerge);
43DEFINE_TRACE(block_bio_queue);
44DEFINE_TRACE(block_rq_complete);
45DEFINE_TRACE(block_remap);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap);
47
48static int __make_request(struct request_queue *q, struct bio *bio);
49
50
51
52
53static struct kmem_cache *request_cachep;
54
55
56
57
58struct kmem_cache *blk_requestq_cachep;
59
60
61
62
63static struct workqueue_struct *kblockd_workqueue;
64
65static void drive_stat_acct(struct request *rq, int new_io)
66{
67 struct hd_struct *part;
68 int rw = rq_data_dir(rq);
69 int cpu;
70
71 if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
72 return;
73
74 cpu = part_stat_lock();
75 part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
76
77 if (!new_io)
78 part_stat_inc(cpu, part, merges[rw]);
79 else {
80 part_round_stats(cpu, part);
81 part_inc_in_flight(part);
82 }
83
84 part_stat_unlock();
85}
86
87void blk_queue_congestion_threshold(struct request_queue *q)
88{
89 int nr;
90
91 nr = q->nr_requests - (q->nr_requests / 8) + 1;
92 if (nr > q->nr_requests)
93 nr = q->nr_requests;
94 q->nr_congestion_on = nr;
95
96 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
97 if (nr < 1)
98 nr = 1;
99 q->nr_congestion_off = nr;
100}
101
102
103
104
105
106
107
108
109
110
111struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
112{
113 struct backing_dev_info *ret = NULL;
114 struct request_queue *q = bdev_get_queue(bdev);
115
116 if (q)
117 ret = &q->backing_dev_info;
118 return ret;
119}
120EXPORT_SYMBOL(blk_get_backing_dev_info);
121
122void blk_rq_init(struct request_queue *q, struct request *rq)
123{
124 memset(rq, 0, sizeof(*rq));
125
126 INIT_LIST_HEAD(&rq->queuelist);
127 INIT_LIST_HEAD(&rq->timeout_list);
128 rq->cpu = -1;
129 rq->q = q;
130 rq->sector = rq->hard_sector = (sector_t) -1;
131 INIT_HLIST_NODE(&rq->hash);
132 RB_CLEAR_NODE(&rq->rb_node);
133 rq->cmd = rq->__cmd;
134 rq->cmd_len = BLK_MAX_CDB;
135 rq->tag = -1;
136 rq->ref_count = 1;
137}
138EXPORT_SYMBOL(blk_rq_init);
139
140static void req_bio_endio(struct request *rq, struct bio *bio,
141 unsigned int nbytes, int error)
142{
143 struct request_queue *q = rq->q;
144
145 if (&q->bar_rq != rq) {
146 if (error)
147 clear_bit(BIO_UPTODATE, &bio->bi_flags);
148 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
149 error = -EIO;
150
151 if (unlikely(nbytes > bio->bi_size)) {
152 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
153 __func__, nbytes, bio->bi_size);
154 nbytes = bio->bi_size;
155 }
156
157 if (unlikely(rq->cmd_flags & REQ_QUIET))
158 set_bit(BIO_QUIET, &bio->bi_flags);
159
160 bio->bi_size -= nbytes;
161 bio->bi_sector += (nbytes >> 9);
162
163 if (bio_integrity(bio))
164 bio_integrity_advance(bio, nbytes);
165
166 if (bio->bi_size == 0)
167 bio_endio(bio, error);
168 } else {
169
170
171
172
173
174 if (error && !q->orderr)
175 q->orderr = error;
176 }
177}
178
179void blk_dump_rq_flags(struct request *rq, char *msg)
180{
181 int bit;
182
183 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
184 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
185 rq->cmd_flags);
186
187 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
188 (unsigned long long)rq->sector,
189 rq->nr_sectors,
190 rq->current_nr_sectors);
191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
192 rq->bio, rq->biotail,
193 rq->buffer, rq->data,
194 rq->data_len);
195
196 if (blk_pc_request(rq)) {
197 printk(KERN_INFO " cdb: ");
198 for (bit = 0; bit < BLK_MAX_CDB; bit++)
199 printk("%02x ", rq->cmd[bit]);
200 printk("\n");
201 }
202}
203EXPORT_SYMBOL(blk_dump_rq_flags);
204
205
206
207
208
209
210
211
212
213void blk_plug_device(struct request_queue *q)
214{
215 WARN_ON(!irqs_disabled());
216
217
218
219
220
221 if (blk_queue_stopped(q))
222 return;
223
224 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
225 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
226 trace_block_plug(q);
227 }
228}
229EXPORT_SYMBOL(blk_plug_device);
230
231
232
233
234
235
236
237
238
239void blk_plug_device_unlocked(struct request_queue *q)
240{
241 unsigned long flags;
242
243 spin_lock_irqsave(q->queue_lock, flags);
244 blk_plug_device(q);
245 spin_unlock_irqrestore(q->queue_lock, flags);
246}
247EXPORT_SYMBOL(blk_plug_device_unlocked);
248
249
250
251
252
253int blk_remove_plug(struct request_queue *q)
254{
255 WARN_ON(!irqs_disabled());
256
257 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
258 return 0;
259
260 del_timer(&q->unplug_timer);
261 return 1;
262}
263EXPORT_SYMBOL(blk_remove_plug);
264
265
266
267
268void __generic_unplug_device(struct request_queue *q)
269{
270 if (unlikely(blk_queue_stopped(q)))
271 return;
272 if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
273 return;
274
275 q->request_fn(q);
276}
277
278
279
280
281
282
283
284
285
286
287
288
289void generic_unplug_device(struct request_queue *q)
290{
291 if (blk_queue_plugged(q)) {
292 spin_lock_irq(q->queue_lock);
293 __generic_unplug_device(q);
294 spin_unlock_irq(q->queue_lock);
295 }
296}
297EXPORT_SYMBOL(generic_unplug_device);
298
299static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
300 struct page *page)
301{
302 struct request_queue *q = bdi->unplug_io_data;
303
304 blk_unplug(q);
305}
306
307void blk_unplug_work(struct work_struct *work)
308{
309 struct request_queue *q =
310 container_of(work, struct request_queue, unplug_work);
311
312 trace_block_unplug_io(q);
313 q->unplug_fn(q);
314}
315
316void blk_unplug_timeout(unsigned long data)
317{
318 struct request_queue *q = (struct request_queue *)data;
319
320 trace_block_unplug_timer(q);
321 kblockd_schedule_work(q, &q->unplug_work);
322}
323
324void blk_unplug(struct request_queue *q)
325{
326
327
328
329 if (q->unplug_fn) {
330 trace_block_unplug_io(q);
331 q->unplug_fn(q);
332 }
333}
334EXPORT_SYMBOL(blk_unplug);
335
336static void blk_invoke_request_fn(struct request_queue *q)
337{
338 if (unlikely(blk_queue_stopped(q)))
339 return;
340
341
342
343
344
345 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
346 q->request_fn(q);
347 queue_flag_clear(QUEUE_FLAG_REENTER, q);
348 } else {
349 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
350 kblockd_schedule_work(q, &q->unplug_work);
351 }
352}
353
354
355
356
357
358
359
360
361
362
363void blk_start_queue(struct request_queue *q)
364{
365 WARN_ON(!irqs_disabled());
366
367 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
368 blk_invoke_request_fn(q);
369}
370EXPORT_SYMBOL(blk_start_queue);
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386void blk_stop_queue(struct request_queue *q)
387{
388 blk_remove_plug(q);
389 queue_flag_set(QUEUE_FLAG_STOPPED, q);
390}
391EXPORT_SYMBOL(blk_stop_queue);
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407void blk_sync_queue(struct request_queue *q)
408{
409 del_timer_sync(&q->unplug_timer);
410 del_timer_sync(&q->timeout);
411 cancel_work_sync(&q->unplug_work);
412}
413EXPORT_SYMBOL(blk_sync_queue);
414
415
416
417
418
419
420
421
422
423
424void __blk_run_queue(struct request_queue *q)
425{
426 blk_remove_plug(q);
427
428
429
430
431
432 if (!elv_queue_empty(q))
433 blk_invoke_request_fn(q);
434}
435EXPORT_SYMBOL(__blk_run_queue);
436
437
438
439
440
441
442
443
444
445
446
447void blk_run_queue(struct request_queue *q)
448{
449 unsigned long flags;
450
451 spin_lock_irqsave(q->queue_lock, flags);
452 __blk_run_queue(q);
453 spin_unlock_irqrestore(q->queue_lock, flags);
454}
455EXPORT_SYMBOL(blk_run_queue);
456
457void blk_put_queue(struct request_queue *q)
458{
459 kobject_put(&q->kobj);
460}
461
462void blk_cleanup_queue(struct request_queue *q)
463{
464
465
466
467
468
469
470 blk_sync_queue(q);
471
472 mutex_lock(&q->sysfs_lock);
473 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
474 mutex_unlock(&q->sysfs_lock);
475
476 if (q->elevator)
477 elevator_exit(q->elevator);
478
479 blk_put_queue(q);
480}
481EXPORT_SYMBOL(blk_cleanup_queue);
482
483static int blk_init_free_list(struct request_queue *q)
484{
485 struct request_list *rl = &q->rq;
486
487 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
488 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
489 rl->elvpriv = 0;
490 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
491 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
492
493 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
494 mempool_free_slab, request_cachep, q->node);
495
496 if (!rl->rq_pool)
497 return -ENOMEM;
498
499 return 0;
500}
501
502struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
503{
504 return blk_alloc_queue_node(gfp_mask, -1);
505}
506EXPORT_SYMBOL(blk_alloc_queue);
507
508struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
509{
510 struct request_queue *q;
511 int err;
512
513 q = kmem_cache_alloc_node(blk_requestq_cachep,
514 gfp_mask | __GFP_ZERO, node_id);
515 if (!q)
516 return NULL;
517
518 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
519 q->backing_dev_info.unplug_io_data = q;
520 err = bdi_init(&q->backing_dev_info);
521 if (err) {
522 kmem_cache_free(blk_requestq_cachep, q);
523 return NULL;
524 }
525
526 init_timer(&q->unplug_timer);
527 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
528 INIT_LIST_HEAD(&q->timeout_list);
529 INIT_WORK(&q->unplug_work, blk_unplug_work);
530
531 kobject_init(&q->kobj, &blk_queue_ktype);
532
533 mutex_init(&q->sysfs_lock);
534 spin_lock_init(&q->__queue_lock);
535
536 return q;
537}
538EXPORT_SYMBOL(blk_alloc_queue_node);
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
574{
575 return blk_init_queue_node(rfn, lock, -1);
576}
577EXPORT_SYMBOL(blk_init_queue);
578
579struct request_queue *
580blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
581{
582 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
583
584 if (!q)
585 return NULL;
586
587 q->node = node_id;
588 if (blk_init_free_list(q)) {
589 kmem_cache_free(blk_requestq_cachep, q);
590 return NULL;
591 }
592
593
594
595
596
597 if (!lock)
598 lock = &q->__queue_lock;
599
600 q->request_fn = rfn;
601 q->prep_rq_fn = NULL;
602 q->unplug_fn = generic_unplug_device;
603 q->queue_flags = QUEUE_FLAG_DEFAULT;
604 q->queue_lock = lock;
605
606
607
608
609 blk_queue_make_request(q, __make_request);
610
611 q->sg_reserved_size = INT_MAX;
612
613 blk_set_cmd_filter_defaults(&q->cmd_filter);
614
615
616
617
618 if (!elevator_init(q, NULL)) {
619 blk_queue_congestion_threshold(q);
620 return q;
621 }
622
623 blk_put_queue(q);
624 return NULL;
625}
626EXPORT_SYMBOL(blk_init_queue_node);
627
628int blk_get_queue(struct request_queue *q)
629{
630 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
631 kobject_get(&q->kobj);
632 return 0;
633 }
634
635 return 1;
636}
637
638static inline void blk_free_request(struct request_queue *q, struct request *rq)
639{
640 if (rq->cmd_flags & REQ_ELVPRIV)
641 elv_put_request(q, rq);
642 mempool_free(rq, q->rq.rq_pool);
643}
644
645static struct request *
646blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
647{
648 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
649
650 if (!rq)
651 return NULL;
652
653 blk_rq_init(q, rq);
654
655 rq->cmd_flags = flags | REQ_ALLOCED;
656
657 if (priv) {
658 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
659 mempool_free(rq, q->rq.rq_pool);
660 return NULL;
661 }
662 rq->cmd_flags |= REQ_ELVPRIV;
663 }
664
665 return rq;
666}
667
668
669
670
671
672static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
673{
674 if (!ioc)
675 return 0;
676
677
678
679
680
681
682 return ioc->nr_batch_requests == q->nr_batching ||
683 (ioc->nr_batch_requests > 0
684 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
685}
686
687
688
689
690
691
692
693static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
694{
695 if (!ioc || ioc_batching(q, ioc))
696 return;
697
698 ioc->nr_batch_requests = q->nr_batching;
699 ioc->last_waited = jiffies;
700}
701
702static void __freed_request(struct request_queue *q, int sync)
703{
704 struct request_list *rl = &q->rq;
705
706 if (rl->count[sync] < queue_congestion_off_threshold(q))
707 blk_clear_queue_congested(q, sync);
708
709 if (rl->count[sync] + 1 <= q->nr_requests) {
710 if (waitqueue_active(&rl->wait[sync]))
711 wake_up(&rl->wait[sync]);
712
713 blk_clear_queue_full(q, sync);
714 }
715}
716
717
718
719
720
721static void freed_request(struct request_queue *q, int sync, int priv)
722{
723 struct request_list *rl = &q->rq;
724
725 rl->count[sync]--;
726 if (priv)
727 rl->elvpriv--;
728
729 __freed_request(q, sync);
730
731 if (unlikely(rl->starved[sync ^ 1]))
732 __freed_request(q, sync ^ 1);
733}
734
735
736
737
738
739
740static struct request *get_request(struct request_queue *q, int rw_flags,
741 struct bio *bio, gfp_t gfp_mask)
742{
743 struct request *rq = NULL;
744 struct request_list *rl = &q->rq;
745 struct io_context *ioc = NULL;
746 const bool is_sync = rw_is_sync(rw_flags) != 0;
747 int may_queue, priv;
748
749 may_queue = elv_may_queue(q, rw_flags);
750 if (may_queue == ELV_MQUEUE_NO)
751 goto rq_starved;
752
753 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
754 if (rl->count[is_sync]+1 >= q->nr_requests) {
755 ioc = current_io_context(GFP_ATOMIC, q->node);
756
757
758
759
760
761
762 if (!blk_queue_full(q, is_sync)) {
763 ioc_set_batching(q, ioc);
764 blk_set_queue_full(q, is_sync);
765 } else {
766 if (may_queue != ELV_MQUEUE_MUST
767 && !ioc_batching(q, ioc)) {
768
769
770
771
772
773 goto out;
774 }
775 }
776 }
777 blk_set_queue_congested(q, is_sync);
778 }
779
780
781
782
783
784
785 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
786 goto out;
787
788 rl->count[is_sync]++;
789 rl->starved[is_sync] = 0;
790
791 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
792 if (priv)
793 rl->elvpriv++;
794
795 if (blk_queue_io_stat(q))
796 rw_flags |= REQ_IO_STAT;
797 spin_unlock_irq(q->queue_lock);
798
799 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
800 if (unlikely(!rq)) {
801
802
803
804
805
806
807
808 spin_lock_irq(q->queue_lock);
809 freed_request(q, is_sync, priv);
810
811
812
813
814
815
816
817
818rq_starved:
819 if (unlikely(rl->count[is_sync] == 0))
820 rl->starved[is_sync] = 1;
821
822 goto out;
823 }
824
825
826
827
828
829
830
831 if (ioc_batching(q, ioc))
832 ioc->nr_batch_requests--;
833
834 trace_block_getrq(q, bio, rw_flags & 1);
835out:
836 return rq;
837}
838
839
840
841
842
843
844
845static struct request *get_request_wait(struct request_queue *q, int rw_flags,
846 struct bio *bio)
847{
848 const bool is_sync = rw_is_sync(rw_flags) != 0;
849 struct request *rq;
850
851 rq = get_request(q, rw_flags, bio, GFP_NOIO);
852 while (!rq) {
853 DEFINE_WAIT(wait);
854 struct io_context *ioc;
855 struct request_list *rl = &q->rq;
856
857 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
858 TASK_UNINTERRUPTIBLE);
859
860 trace_block_sleeprq(q, bio, rw_flags & 1);
861
862 __generic_unplug_device(q);
863 spin_unlock_irq(q->queue_lock);
864 io_schedule();
865
866
867
868
869
870
871
872 ioc = current_io_context(GFP_NOIO, q->node);
873 ioc_set_batching(q, ioc);
874
875 spin_lock_irq(q->queue_lock);
876 finish_wait(&rl->wait[is_sync], &wait);
877
878 rq = get_request(q, rw_flags, bio, GFP_NOIO);
879 };
880
881 return rq;
882}
883
884struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
885{
886 struct request *rq;
887
888 BUG_ON(rw != READ && rw != WRITE);
889
890 spin_lock_irq(q->queue_lock);
891 if (gfp_mask & __GFP_WAIT) {
892 rq = get_request_wait(q, rw, NULL);
893 } else {
894 rq = get_request(q, rw, NULL, gfp_mask);
895 if (!rq)
896 spin_unlock_irq(q->queue_lock);
897 }
898
899
900 return rq;
901}
902EXPORT_SYMBOL(blk_get_request);
903
904
905
906
907
908
909
910
911
912
913
914
915void blk_start_queueing(struct request_queue *q)
916{
917 if (!blk_queue_plugged(q)) {
918 if (unlikely(blk_queue_stopped(q)))
919 return;
920 q->request_fn(q);
921 } else
922 __generic_unplug_device(q);
923}
924EXPORT_SYMBOL(blk_start_queueing);
925
926
927
928
929
930
931
932
933
934
935
936void blk_requeue_request(struct request_queue *q, struct request *rq)
937{
938 blk_delete_timer(rq);
939 blk_clear_rq_complete(rq);
940 trace_block_rq_requeue(q, rq);
941
942 if (blk_rq_tagged(rq))
943 blk_queue_end_tag(q, rq);
944
945 elv_requeue_request(q, rq);
946}
947EXPORT_SYMBOL(blk_requeue_request);
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968void blk_insert_request(struct request_queue *q, struct request *rq,
969 int at_head, void *data)
970{
971 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
972 unsigned long flags;
973
974
975
976
977
978
979 rq->cmd_type = REQ_TYPE_SPECIAL;
980 rq->cmd_flags |= REQ_SOFTBARRIER;
981
982 rq->special = data;
983
984 spin_lock_irqsave(q->queue_lock, flags);
985
986
987
988
989 if (blk_rq_tagged(rq))
990 blk_queue_end_tag(q, rq);
991
992 drive_stat_acct(rq, 1);
993 __elv_add_request(q, rq, where, 0);
994 blk_start_queueing(q);
995 spin_unlock_irqrestore(q->queue_lock, flags);
996}
997EXPORT_SYMBOL(blk_insert_request);
998
999
1000
1001
1002
1003
1004static inline void add_request(struct request_queue *q, struct request *req)
1005{
1006 drive_stat_acct(req, 1);
1007
1008
1009
1010
1011
1012 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1013}
1014
1015static void part_round_stats_single(int cpu, struct hd_struct *part,
1016 unsigned long now)
1017{
1018 if (now == part->stamp)
1019 return;
1020
1021 if (part->in_flight) {
1022 __part_stat_add(cpu, part, time_in_queue,
1023 part->in_flight * (now - part->stamp));
1024 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1025 }
1026 part->stamp = now;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045void part_round_stats(int cpu, struct hd_struct *part)
1046{
1047 unsigned long now = jiffies;
1048
1049 if (part->partno)
1050 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1051 part_round_stats_single(cpu, part, now);
1052}
1053EXPORT_SYMBOL_GPL(part_round_stats);
1054
1055
1056
1057
1058void __blk_put_request(struct request_queue *q, struct request *req)
1059{
1060 if (unlikely(!q))
1061 return;
1062 if (unlikely(--req->ref_count))
1063 return;
1064
1065 elv_completed_request(q, req);
1066
1067
1068 WARN_ON(req->bio != NULL);
1069
1070
1071
1072
1073
1074 if (req->cmd_flags & REQ_ALLOCED) {
1075 int is_sync = rq_is_sync(req) != 0;
1076 int priv = req->cmd_flags & REQ_ELVPRIV;
1077
1078 BUG_ON(!list_empty(&req->queuelist));
1079 BUG_ON(!hlist_unhashed(&req->hash));
1080
1081 blk_free_request(q, req);
1082 freed_request(q, is_sync, priv);
1083 }
1084}
1085EXPORT_SYMBOL_GPL(__blk_put_request);
1086
1087void blk_put_request(struct request *req)
1088{
1089 unsigned long flags;
1090 struct request_queue *q = req->q;
1091
1092 spin_lock_irqsave(q->queue_lock, flags);
1093 __blk_put_request(q, req);
1094 spin_unlock_irqrestore(q->queue_lock, flags);
1095}
1096EXPORT_SYMBOL(blk_put_request);
1097
1098void init_request_from_bio(struct request *req, struct bio *bio)
1099{
1100 req->cpu = bio->bi_comp_cpu;
1101 req->cmd_type = REQ_TYPE_FS;
1102
1103
1104
1105
1106 if (bio_rw_ahead(bio))
1107 req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
1108 REQ_FAILFAST_DRIVER);
1109 if (bio_failfast_dev(bio))
1110 req->cmd_flags |= REQ_FAILFAST_DEV;
1111 if (bio_failfast_transport(bio))
1112 req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1113 if (bio_failfast_driver(bio))
1114 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1115
1116
1117
1118
1119 if (unlikely(bio_discard(bio))) {
1120 req->cmd_flags |= REQ_DISCARD;
1121 if (bio_barrier(bio))
1122 req->cmd_flags |= REQ_SOFTBARRIER;
1123 req->q->prepare_discard_fn(req->q, req);
1124 } else if (unlikely(bio_barrier(bio)))
1125 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1126
1127 if (bio_sync(bio))
1128 req->cmd_flags |= REQ_RW_SYNC;
1129 if (bio_rw_meta(bio))
1130 req->cmd_flags |= REQ_RW_META;
1131 if (bio_noidle(bio))
1132 req->cmd_flags |= REQ_NOIDLE;
1133
1134 req->errors = 0;
1135 req->hard_sector = req->sector = bio->bi_sector;
1136 req->ioprio = bio_prio(bio);
1137 req->start_time = jiffies;
1138 blk_rq_bio_prep(req->q, req, bio);
1139}
1140
1141
1142
1143
1144
1145static inline bool queue_should_plug(struct request_queue *q)
1146{
1147 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1148}
1149
1150static int __make_request(struct request_queue *q, struct bio *bio)
1151{
1152 struct request *req;
1153 int el_ret, nr_sectors;
1154 const unsigned short prio = bio_prio(bio);
1155 const int sync = bio_sync(bio);
1156 const int unplug = bio_unplug(bio);
1157 int rw_flags;
1158
1159 nr_sectors = bio_sectors(bio);
1160
1161 if (bio_barrier(bio) && bio_has_data(bio) &&
1162 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1163 bio_endio(bio, -EOPNOTSUPP);
1164 return 0;
1165 }
1166
1167
1168
1169
1170
1171 blk_queue_bounce(q, &bio);
1172
1173 spin_lock_irq(q->queue_lock);
1174
1175 if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
1176 goto get_rq;
1177
1178 el_ret = elv_merge(q, &req, bio);
1179 switch (el_ret) {
1180 case ELEVATOR_BACK_MERGE:
1181 BUG_ON(!rq_mergeable(req));
1182
1183 if (!ll_back_merge_fn(q, req, bio))
1184 break;
1185
1186 trace_block_bio_backmerge(q, bio);
1187
1188 req->biotail->bi_next = bio;
1189 req->biotail = bio;
1190 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1191 req->ioprio = ioprio_best(req->ioprio, prio);
1192 if (!blk_rq_cpu_valid(req))
1193 req->cpu = bio->bi_comp_cpu;
1194 drive_stat_acct(req, 0);
1195 if (!attempt_back_merge(q, req))
1196 elv_merged_request(q, req, el_ret);
1197 goto out;
1198
1199 case ELEVATOR_FRONT_MERGE:
1200 BUG_ON(!rq_mergeable(req));
1201
1202 if (!ll_front_merge_fn(q, req, bio))
1203 break;
1204
1205 trace_block_bio_frontmerge(q, bio);
1206
1207 bio->bi_next = req->bio;
1208 req->bio = bio;
1209
1210
1211
1212
1213
1214
1215 req->buffer = bio_data(bio);
1216 req->current_nr_sectors = bio_cur_sectors(bio);
1217 req->hard_cur_sectors = req->current_nr_sectors;
1218 req->sector = req->hard_sector = bio->bi_sector;
1219 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1220 req->ioprio = ioprio_best(req->ioprio, prio);
1221 if (!blk_rq_cpu_valid(req))
1222 req->cpu = bio->bi_comp_cpu;
1223 drive_stat_acct(req, 0);
1224 if (!attempt_front_merge(q, req))
1225 elv_merged_request(q, req, el_ret);
1226 goto out;
1227
1228
1229 default:
1230 ;
1231 }
1232
1233get_rq:
1234
1235
1236
1237
1238
1239 rw_flags = bio_data_dir(bio);
1240 if (sync)
1241 rw_flags |= REQ_RW_SYNC;
1242
1243
1244
1245
1246
1247 req = get_request_wait(q, rw_flags, bio);
1248
1249
1250
1251
1252
1253
1254
1255 init_request_from_bio(req, bio);
1256
1257 spin_lock_irq(q->queue_lock);
1258 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1259 bio_flagged(bio, BIO_CPU_AFFINE))
1260 req->cpu = blk_cpu_to_group(smp_processor_id());
1261 if (queue_should_plug(q) && elv_queue_empty(q))
1262 blk_plug_device(q);
1263 add_request(q, req);
1264out:
1265 if (unplug || !queue_should_plug(q))
1266 __generic_unplug_device(q);
1267 spin_unlock_irq(q->queue_lock);
1268 return 0;
1269}
1270
1271
1272
1273
1274static inline void blk_partition_remap(struct bio *bio)
1275{
1276 struct block_device *bdev = bio->bi_bdev;
1277
1278 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1279 struct hd_struct *p = bdev->bd_part;
1280
1281 bio->bi_sector += p->start_sect;
1282 bio->bi_bdev = bdev->bd_contains;
1283
1284 trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
1285 bdev->bd_dev, bio->bi_sector,
1286 bio->bi_sector - p->start_sect);
1287 }
1288}
1289
1290static void handle_bad_sector(struct bio *bio)
1291{
1292 char b[BDEVNAME_SIZE];
1293
1294 printk(KERN_INFO "attempt to access beyond end of device\n");
1295 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1296 bdevname(bio->bi_bdev, b),
1297 bio->bi_rw,
1298 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1299 (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1300
1301 set_bit(BIO_EOF, &bio->bi_flags);
1302}
1303
1304#ifdef CONFIG_FAIL_MAKE_REQUEST
1305
1306static DECLARE_FAULT_ATTR(fail_make_request);
1307
1308static int __init setup_fail_make_request(char *str)
1309{
1310 return setup_fault_attr(&fail_make_request, str);
1311}
1312__setup("fail_make_request=", setup_fail_make_request);
1313
1314static int should_fail_request(struct bio *bio)
1315{
1316 struct hd_struct *part = bio->bi_bdev->bd_part;
1317
1318 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1319 return should_fail(&fail_make_request, bio->bi_size);
1320
1321 return 0;
1322}
1323
1324static int __init fail_make_request_debugfs(void)
1325{
1326 return init_fault_attr_dentries(&fail_make_request,
1327 "fail_make_request");
1328}
1329
1330late_initcall(fail_make_request_debugfs);
1331
1332#else
1333
1334static inline int should_fail_request(struct bio *bio)
1335{
1336 return 0;
1337}
1338
1339#endif
1340
1341
1342
1343
1344static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1345{
1346 sector_t maxsector;
1347
1348 if (!nr_sectors)
1349 return 0;
1350
1351
1352 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1353 if (maxsector) {
1354 sector_t sector = bio->bi_sector;
1355
1356 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1357
1358
1359
1360
1361
1362 handle_bad_sector(bio);
1363 return 1;
1364 }
1365 }
1366
1367 return 0;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static inline void __generic_make_request(struct bio *bio)
1395{
1396 struct request_queue *q;
1397 sector_t old_sector;
1398 int ret, nr_sectors = bio_sectors(bio);
1399 dev_t old_dev;
1400 int err = -EIO;
1401
1402 might_sleep();
1403
1404 if (bio_check_eod(bio, nr_sectors))
1405 goto end_io;
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 old_sector = -1;
1416 old_dev = 0;
1417 do {
1418 char b[BDEVNAME_SIZE];
1419
1420 q = bdev_get_queue(bio->bi_bdev);
1421 if (unlikely(!q)) {
1422 printk(KERN_ERR
1423 "generic_make_request: Trying to access "
1424 "nonexistent block-device %s (%Lu)\n",
1425 bdevname(bio->bi_bdev, b),
1426 (long long) bio->bi_sector);
1427 goto end_io;
1428 }
1429
1430 if (unlikely(nr_sectors > q->max_hw_sectors)) {
1431 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1432 bdevname(bio->bi_bdev, b),
1433 bio_sectors(bio),
1434 q->max_hw_sectors);
1435 goto end_io;
1436 }
1437
1438 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1439 goto end_io;
1440
1441 if (should_fail_request(bio))
1442 goto end_io;
1443
1444
1445
1446
1447
1448 blk_partition_remap(bio);
1449
1450 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1451 goto end_io;
1452
1453 if (old_sector != -1)
1454 trace_block_remap(q, bio, old_dev, bio->bi_sector,
1455 old_sector);
1456
1457 trace_block_bio_queue(q, bio);
1458
1459 old_sector = bio->bi_sector;
1460 old_dev = bio->bi_bdev->bd_dev;
1461
1462 if (bio_check_eod(bio, nr_sectors))
1463 goto end_io;
1464
1465 if (bio_discard(bio) && !q->prepare_discard_fn) {
1466 err = -EOPNOTSUPP;
1467 goto end_io;
1468 }
1469
1470 ret = q->make_request_fn(q, bio);
1471 } while (ret);
1472
1473 return;
1474
1475end_io:
1476 bio_endio(bio, err);
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490void generic_make_request(struct bio *bio)
1491{
1492 if (current->bio_tail) {
1493
1494 *(current->bio_tail) = bio;
1495 bio->bi_next = NULL;
1496 current->bio_tail = &bio->bi_next;
1497 return;
1498 }
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517 BUG_ON(bio->bi_next);
1518 do {
1519 current->bio_list = bio->bi_next;
1520 if (bio->bi_next == NULL)
1521 current->bio_tail = ¤t->bio_list;
1522 else
1523 bio->bi_next = NULL;
1524 __generic_make_request(bio);
1525 bio = current->bio_list;
1526 } while (bio);
1527 current->bio_tail = NULL;
1528}
1529EXPORT_SYMBOL(generic_make_request);
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541void submit_bio(int rw, struct bio *bio)
1542{
1543 int count = bio_sectors(bio);
1544
1545 bio->bi_rw |= rw;
1546
1547
1548
1549
1550
1551 if (bio_has_data(bio)) {
1552 if (rw & WRITE) {
1553 count_vm_events(PGPGOUT, count);
1554 } else {
1555 task_io_account_read(bio->bi_size);
1556 count_vm_events(PGPGIN, count);
1557 }
1558
1559 if (unlikely(block_dump)) {
1560 char b[BDEVNAME_SIZE];
1561 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1562 current->comm, task_pid_nr(current),
1563 (rw & WRITE) ? "WRITE" : "READ",
1564 (unsigned long long)bio->bi_sector,
1565 bdevname(bio->bi_bdev, b));
1566 }
1567 }
1568
1569 generic_make_request(bio);
1570}
1571EXPORT_SYMBOL(submit_bio);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1595{
1596 if (rq->nr_sectors > q->max_sectors ||
1597 rq->data_len > q->max_hw_sectors << 9) {
1598 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1599 return -EIO;
1600 }
1601
1602
1603
1604
1605
1606
1607
1608 blk_recalc_rq_segments(rq);
1609 if (rq->nr_phys_segments > q->max_phys_segments ||
1610 rq->nr_phys_segments > q->max_hw_segments) {
1611 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1612 return -EIO;
1613 }
1614
1615 return 0;
1616}
1617EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1618
1619
1620
1621
1622
1623
1624int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1625{
1626 unsigned long flags;
1627
1628 if (blk_rq_check_limits(q, rq))
1629 return -EIO;
1630
1631#ifdef CONFIG_FAIL_MAKE_REQUEST
1632 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1633 should_fail(&fail_make_request, blk_rq_bytes(rq)))
1634 return -EIO;
1635#endif
1636
1637 spin_lock_irqsave(q->queue_lock, flags);
1638
1639
1640
1641
1642
1643 BUG_ON(blk_queued_rq(rq));
1644
1645 drive_stat_acct(rq, 1);
1646 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1647
1648 spin_unlock_irqrestore(q->queue_lock, flags);
1649
1650 return 0;
1651}
1652EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664void blkdev_dequeue_request(struct request *req)
1665{
1666 elv_dequeue_request(req->q, req);
1667
1668
1669
1670
1671
1672 blk_add_timer(req);
1673}
1674EXPORT_SYMBOL(blkdev_dequeue_request);
1675
1676static void blk_account_io_completion(struct request *req, unsigned int bytes)
1677{
1678 if (!blk_do_io_stat(req))
1679 return;
1680
1681 if (blk_fs_request(req)) {
1682 const int rw = rq_data_dir(req);
1683 struct hd_struct *part;
1684 int cpu;
1685
1686 cpu = part_stat_lock();
1687 part = disk_map_sector_rcu(req->rq_disk, req->sector);
1688 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1689 part_stat_unlock();
1690 }
1691}
1692
1693static void blk_account_io_done(struct request *req)
1694{
1695 if (!blk_do_io_stat(req))
1696 return;
1697
1698
1699
1700
1701
1702
1703 if (blk_fs_request(req) && req != &req->q->bar_rq) {
1704 unsigned long duration = jiffies - req->start_time;
1705 const int rw = rq_data_dir(req);
1706 struct hd_struct *part;
1707 int cpu;
1708
1709 cpu = part_stat_lock();
1710 part = disk_map_sector_rcu(req->rq_disk, req->sector);
1711
1712 part_stat_inc(cpu, part, ios[rw]);
1713 part_stat_add(cpu, part, ticks[rw], duration);
1714 part_round_stats(cpu, part);
1715 part_dec_in_flight(part);
1716
1717 part_stat_unlock();
1718 }
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static int __end_that_request_first(struct request *req, int error,
1736 int nr_bytes)
1737{
1738 int total_bytes, bio_nbytes, next_idx = 0;
1739 struct bio *bio;
1740
1741 trace_block_rq_complete(req->q, req);
1742
1743
1744
1745
1746
1747 if (!blk_pc_request(req))
1748 req->errors = 0;
1749
1750 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1751 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1752 req->rq_disk ? req->rq_disk->disk_name : "?",
1753 (unsigned long long)req->sector);
1754 }
1755
1756 blk_account_io_completion(req, nr_bytes);
1757
1758 total_bytes = bio_nbytes = 0;
1759 while ((bio = req->bio) != NULL) {
1760 int nbytes;
1761
1762 if (nr_bytes >= bio->bi_size) {
1763 req->bio = bio->bi_next;
1764 nbytes = bio->bi_size;
1765 req_bio_endio(req, bio, nbytes, error);
1766 next_idx = 0;
1767 bio_nbytes = 0;
1768 } else {
1769 int idx = bio->bi_idx + next_idx;
1770
1771 if (unlikely(idx >= bio->bi_vcnt)) {
1772 blk_dump_rq_flags(req, "__end_that");
1773 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1774 __func__, idx, bio->bi_vcnt);
1775 break;
1776 }
1777
1778 nbytes = bio_iovec_idx(bio, idx)->bv_len;
1779 BIO_BUG_ON(nbytes > bio->bi_size);
1780
1781
1782
1783
1784 if (unlikely(nbytes > nr_bytes)) {
1785 bio_nbytes += nr_bytes;
1786 total_bytes += nr_bytes;
1787 break;
1788 }
1789
1790
1791
1792
1793 next_idx++;
1794 bio_nbytes += nbytes;
1795 }
1796
1797 total_bytes += nbytes;
1798 nr_bytes -= nbytes;
1799
1800 bio = req->bio;
1801 if (bio) {
1802
1803
1804
1805 if (unlikely(nr_bytes <= 0))
1806 break;
1807 }
1808 }
1809
1810
1811
1812
1813 if (!req->bio)
1814 return 0;
1815
1816
1817
1818
1819 if (bio_nbytes) {
1820 req_bio_endio(req, bio, bio_nbytes, error);
1821 bio->bi_idx += next_idx;
1822 bio_iovec(bio)->bv_offset += nr_bytes;
1823 bio_iovec(bio)->bv_len -= nr_bytes;
1824 }
1825
1826 blk_recalc_rq_sectors(req, total_bytes >> 9);
1827 blk_recalc_rq_segments(req);
1828 return 1;
1829}
1830
1831
1832
1833
1834static void end_that_request_last(struct request *req, int error)
1835{
1836 if (blk_rq_tagged(req))
1837 blk_queue_end_tag(req->q, req);
1838
1839 if (blk_queued_rq(req))
1840 elv_dequeue_request(req->q, req);
1841
1842 if (unlikely(laptop_mode) && blk_fs_request(req))
1843 laptop_io_completion();
1844
1845 blk_delete_timer(req);
1846
1847 blk_account_io_done(req);
1848
1849 if (req->end_io)
1850 req->end_io(req, error);
1851 else {
1852 if (blk_bidi_rq(req))
1853 __blk_put_request(req->next_rq->q, req->next_rq);
1854
1855 __blk_put_request(req->q, req);
1856 }
1857}
1858
1859
1860
1861
1862
1863unsigned int blk_rq_bytes(struct request *rq)
1864{
1865 if (blk_fs_request(rq))
1866 return rq->hard_nr_sectors << 9;
1867
1868 return rq->data_len;
1869}
1870EXPORT_SYMBOL_GPL(blk_rq_bytes);
1871
1872
1873
1874
1875
1876unsigned int blk_rq_cur_bytes(struct request *rq)
1877{
1878 if (blk_fs_request(rq))
1879 return rq->current_nr_sectors << 9;
1880
1881 if (rq->bio)
1882 return rq->bio->bi_size;
1883
1884 return rq->data_len;
1885}
1886EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904void end_request(struct request *req, int uptodate)
1905{
1906 int error = 0;
1907
1908 if (uptodate <= 0)
1909 error = uptodate ? uptodate : -EIO;
1910
1911 __blk_end_request(req, error, req->hard_cur_sectors << 9);
1912}
1913EXPORT_SYMBOL(end_request);
1914
1915static int end_that_request_data(struct request *rq, int error,
1916 unsigned int nr_bytes, unsigned int bidi_bytes)
1917{
1918 if (rq->bio) {
1919 if (__end_that_request_first(rq, error, nr_bytes))
1920 return 1;
1921
1922
1923 if (blk_bidi_rq(rq) &&
1924 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1925 return 1;
1926 }
1927
1928 return 0;
1929}
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1951 unsigned int bidi_bytes,
1952 int (drv_callback)(struct request *))
1953{
1954 struct request_queue *q = rq->q;
1955 unsigned long flags = 0UL;
1956
1957 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1958 return 1;
1959
1960
1961 if (drv_callback && drv_callback(rq))
1962 return 1;
1963
1964 add_disk_randomness(rq->rq_disk);
1965
1966 spin_lock_irqsave(q->queue_lock, flags);
1967 end_that_request_last(rq, error);
1968 spin_unlock_irqrestore(q->queue_lock, flags);
1969
1970 return 0;
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1988{
1989 return blk_end_io(rq, error, nr_bytes, 0, NULL);
1990}
1991EXPORT_SYMBOL_GPL(blk_end_request);
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2007{
2008 if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
2009 return 1;
2010
2011 add_disk_randomness(rq->rq_disk);
2012
2013 end_that_request_last(rq, error);
2014
2015 return 0;
2016}
2017EXPORT_SYMBOL_GPL(__blk_end_request);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
2034 unsigned int bidi_bytes)
2035{
2036 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
2037}
2038EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
2056{
2057 if (!end_that_request_data(rq, error, nr_bytes, 0)) {
2058
2059
2060
2061
2062
2063
2064 rq->nr_sectors = rq->hard_nr_sectors = 0;
2065 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2066 }
2067}
2068EXPORT_SYMBOL_GPL(blk_update_request);
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095int blk_end_request_callback(struct request *rq, int error,
2096 unsigned int nr_bytes,
2097 int (drv_callback)(struct request *))
2098{
2099 return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2100}
2101EXPORT_SYMBOL_GPL(blk_end_request_callback);
2102
2103void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2104 struct bio *bio)
2105{
2106
2107
2108 rq->cmd_flags |= (bio->bi_rw & 3);
2109
2110 if (bio_has_data(bio)) {
2111 rq->nr_phys_segments = bio_phys_segments(q, bio);
2112 rq->buffer = bio_data(bio);
2113 }
2114 rq->current_nr_sectors = bio_cur_sectors(bio);
2115 rq->hard_cur_sectors = rq->current_nr_sectors;
2116 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2117 rq->data_len = bio->bi_size;
2118
2119 rq->bio = rq->biotail = bio;
2120
2121 if (bio->bi_bdev)
2122 rq->rq_disk = bio->bi_bdev->bd_disk;
2123}
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144int blk_lld_busy(struct request_queue *q)
2145{
2146 if (q->lld_busy_fn)
2147 return q->lld_busy_fn(q);
2148
2149 return 0;
2150}
2151EXPORT_SYMBOL_GPL(blk_lld_busy);
2152
2153int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2154{
2155 return queue_work(kblockd_workqueue, work);
2156}
2157EXPORT_SYMBOL(kblockd_schedule_work);
2158
2159int __init blk_dev_init(void)
2160{
2161 kblockd_workqueue = create_workqueue("kblockd");
2162 if (!kblockd_workqueue)
2163 panic("Failed to create kblockd\n");
2164
2165 request_cachep = kmem_cache_create("blkdev_requests",
2166 sizeof(struct request), 0, SLAB_PANIC, NULL);
2167
2168 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2169 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2170
2171 return 0;
2172}
2173
2174