1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h>
29#include <linux/blktrace_api.h>
30#include <linux/fault-inject.h>
31
32#include "blk.h"
33
34static int __make_request(struct request_queue *q, struct bio *bio);
35
36
37
38
39static struct kmem_cache *request_cachep;
40
41
42
43
44struct kmem_cache *blk_requestq_cachep;
45
46
47
48
49static struct workqueue_struct *kblockd_workqueue;
50
51static void drive_stat_acct(struct request *rq, int new_io)
52{
53 struct hd_struct *part;
54 int rw = rq_data_dir(rq);
55 int cpu;
56
57 if (!blk_fs_request(rq) || !rq->rq_disk)
58 return;
59
60 cpu = part_stat_lock();
61 part = disk_map_sector_rcu(rq->rq_disk, rq->sector);
62
63 if (!new_io)
64 part_stat_inc(cpu, part, merges[rw]);
65 else {
66 part_round_stats(cpu, part);
67 part_inc_in_flight(part);
68 }
69
70 part_stat_unlock();
71}
72
73void blk_queue_congestion_threshold(struct request_queue *q)
74{
75 int nr;
76
77 nr = q->nr_requests - (q->nr_requests / 8) + 1;
78 if (nr > q->nr_requests)
79 nr = q->nr_requests;
80 q->nr_congestion_on = nr;
81
82 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
83 if (nr < 1)
84 nr = 1;
85 q->nr_congestion_off = nr;
86}
87
88
89
90
91
92
93
94
95
96
97struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
98{
99 struct backing_dev_info *ret = NULL;
100 struct request_queue *q = bdev_get_queue(bdev);
101
102 if (q)
103 ret = &q->backing_dev_info;
104 return ret;
105}
106EXPORT_SYMBOL(blk_get_backing_dev_info);
107
108void blk_rq_init(struct request_queue *q, struct request *rq)
109{
110 memset(rq, 0, sizeof(*rq));
111
112 INIT_LIST_HEAD(&rq->queuelist);
113 INIT_LIST_HEAD(&rq->timeout_list);
114 rq->cpu = -1;
115 rq->q = q;
116 rq->sector = rq->hard_sector = (sector_t) -1;
117 INIT_HLIST_NODE(&rq->hash);
118 RB_CLEAR_NODE(&rq->rb_node);
119 rq->cmd = rq->__cmd;
120 rq->tag = -1;
121 rq->ref_count = 1;
122}
123EXPORT_SYMBOL(blk_rq_init);
124
125static void req_bio_endio(struct request *rq, struct bio *bio,
126 unsigned int nbytes, int error)
127{
128 struct request_queue *q = rq->q;
129
130 if (&q->bar_rq != rq) {
131 if (error)
132 clear_bit(BIO_UPTODATE, &bio->bi_flags);
133 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
134 error = -EIO;
135
136 if (unlikely(nbytes > bio->bi_size)) {
137 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
138 __func__, nbytes, bio->bi_size);
139 nbytes = bio->bi_size;
140 }
141
142 bio->bi_size -= nbytes;
143 bio->bi_sector += (nbytes >> 9);
144
145 if (bio_integrity(bio))
146 bio_integrity_advance(bio, nbytes);
147
148 if (bio->bi_size == 0)
149 bio_endio(bio, error);
150 } else {
151
152
153
154
155
156 if (error && !q->orderr)
157 q->orderr = error;
158 }
159}
160
161void blk_dump_rq_flags(struct request *rq, char *msg)
162{
163 int bit;
164
165 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
166 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
167 rq->cmd_flags);
168
169 printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
170 (unsigned long long)rq->sector,
171 rq->nr_sectors,
172 rq->current_nr_sectors);
173 printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
174 rq->bio, rq->biotail,
175 rq->buffer, rq->data,
176 rq->data_len);
177
178 if (blk_pc_request(rq)) {
179 printk(KERN_INFO " cdb: ");
180 for (bit = 0; bit < BLK_MAX_CDB; bit++)
181 printk("%02x ", rq->cmd[bit]);
182 printk("\n");
183 }
184}
185EXPORT_SYMBOL(blk_dump_rq_flags);
186
187
188
189
190
191
192
193
194
195void blk_plug_device(struct request_queue *q)
196{
197 WARN_ON(!irqs_disabled());
198
199
200
201
202
203 if (blk_queue_stopped(q))
204 return;
205
206 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
207 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
208 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
209 }
210}
211EXPORT_SYMBOL(blk_plug_device);
212
213
214
215
216
217
218
219
220
221void blk_plug_device_unlocked(struct request_queue *q)
222{
223 unsigned long flags;
224
225 spin_lock_irqsave(q->queue_lock, flags);
226 blk_plug_device(q);
227 spin_unlock_irqrestore(q->queue_lock, flags);
228}
229EXPORT_SYMBOL(blk_plug_device_unlocked);
230
231
232
233
234
235int blk_remove_plug(struct request_queue *q)
236{
237 WARN_ON(!irqs_disabled());
238
239 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
240 return 0;
241
242 del_timer(&q->unplug_timer);
243 return 1;
244}
245EXPORT_SYMBOL(blk_remove_plug);
246
247
248
249
250void __generic_unplug_device(struct request_queue *q)
251{
252 if (unlikely(blk_queue_stopped(q)))
253 return;
254
255 if (!blk_remove_plug(q))
256 return;
257
258 q->request_fn(q);
259}
260
261
262
263
264
265
266
267
268
269
270
271
272void generic_unplug_device(struct request_queue *q)
273{
274 if (blk_queue_plugged(q)) {
275 spin_lock_irq(q->queue_lock);
276 __generic_unplug_device(q);
277 spin_unlock_irq(q->queue_lock);
278 }
279}
280EXPORT_SYMBOL(generic_unplug_device);
281
282static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
283 struct page *page)
284{
285 struct request_queue *q = bdi->unplug_io_data;
286
287 blk_unplug(q);
288}
289
290void blk_unplug_work(struct work_struct *work)
291{
292 struct request_queue *q =
293 container_of(work, struct request_queue, unplug_work);
294
295 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
296 q->rq.count[READ] + q->rq.count[WRITE]);
297
298 q->unplug_fn(q);
299}
300
301void blk_unplug_timeout(unsigned long data)
302{
303 struct request_queue *q = (struct request_queue *)data;
304
305 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL,
306 q->rq.count[READ] + q->rq.count[WRITE]);
307
308 kblockd_schedule_work(q, &q->unplug_work);
309}
310
311void blk_unplug(struct request_queue *q)
312{
313
314
315
316 if (q->unplug_fn) {
317 blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
318 q->rq.count[READ] + q->rq.count[WRITE]);
319
320 q->unplug_fn(q);
321 }
322}
323EXPORT_SYMBOL(blk_unplug);
324
325static void blk_invoke_request_fn(struct request_queue *q)
326{
327 if (unlikely(blk_queue_stopped(q)))
328 return;
329
330
331
332
333
334 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
335 q->request_fn(q);
336 queue_flag_clear(QUEUE_FLAG_REENTER, q);
337 } else {
338 queue_flag_set(QUEUE_FLAG_PLUGGED, q);
339 kblockd_schedule_work(q, &q->unplug_work);
340 }
341}
342
343
344
345
346
347
348
349
350
351
352void blk_start_queue(struct request_queue *q)
353{
354 WARN_ON(!irqs_disabled());
355
356 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
357 blk_invoke_request_fn(q);
358}
359EXPORT_SYMBOL(blk_start_queue);
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375void blk_stop_queue(struct request_queue *q)
376{
377 blk_remove_plug(q);
378 queue_flag_set(QUEUE_FLAG_STOPPED, q);
379}
380EXPORT_SYMBOL(blk_stop_queue);
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396void blk_sync_queue(struct request_queue *q)
397{
398 del_timer_sync(&q->unplug_timer);
399 kblockd_flush_work(&q->unplug_work);
400}
401EXPORT_SYMBOL(blk_sync_queue);
402
403
404
405
406
407
408
409
410
411
412void __blk_run_queue(struct request_queue *q)
413{
414 blk_remove_plug(q);
415
416
417
418
419
420 if (!elv_queue_empty(q))
421 blk_invoke_request_fn(q);
422}
423EXPORT_SYMBOL(__blk_run_queue);
424
425
426
427
428
429
430
431
432
433
434
435void blk_run_queue(struct request_queue *q)
436{
437 unsigned long flags;
438
439 spin_lock_irqsave(q->queue_lock, flags);
440 __blk_run_queue(q);
441 spin_unlock_irqrestore(q->queue_lock, flags);
442}
443EXPORT_SYMBOL(blk_run_queue);
444
445void blk_put_queue(struct request_queue *q)
446{
447 kobject_put(&q->kobj);
448}
449
450void blk_cleanup_queue(struct request_queue *q)
451{
452
453
454
455
456
457
458 blk_sync_queue(q);
459
460 mutex_lock(&q->sysfs_lock);
461 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
462 mutex_unlock(&q->sysfs_lock);
463
464 if (q->elevator)
465 elevator_exit(q->elevator);
466
467 blk_put_queue(q);
468}
469EXPORT_SYMBOL(blk_cleanup_queue);
470
471static int blk_init_free_list(struct request_queue *q)
472{
473 struct request_list *rl = &q->rq;
474
475 rl->count[READ] = rl->count[WRITE] = 0;
476 rl->starved[READ] = rl->starved[WRITE] = 0;
477 rl->elvpriv = 0;
478 init_waitqueue_head(&rl->wait[READ]);
479 init_waitqueue_head(&rl->wait[WRITE]);
480
481 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
482 mempool_free_slab, request_cachep, q->node);
483
484 if (!rl->rq_pool)
485 return -ENOMEM;
486
487 return 0;
488}
489
490struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
491{
492 return blk_alloc_queue_node(gfp_mask, -1);
493}
494EXPORT_SYMBOL(blk_alloc_queue);
495
496struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
497{
498 struct request_queue *q;
499 int err;
500
501 q = kmem_cache_alloc_node(blk_requestq_cachep,
502 gfp_mask | __GFP_ZERO, node_id);
503 if (!q)
504 return NULL;
505
506 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
507 q->backing_dev_info.unplug_io_data = q;
508 err = bdi_init(&q->backing_dev_info);
509 if (err) {
510 kmem_cache_free(blk_requestq_cachep, q);
511 return NULL;
512 }
513
514 init_timer(&q->unplug_timer);
515 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
516 INIT_LIST_HEAD(&q->timeout_list);
517 INIT_WORK(&q->unplug_work, blk_unplug_work);
518
519 kobject_init(&q->kobj, &blk_queue_ktype);
520
521 mutex_init(&q->sysfs_lock);
522 spin_lock_init(&q->__queue_lock);
523
524 return q;
525}
526EXPORT_SYMBOL(blk_alloc_queue_node);
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
562{
563 return blk_init_queue_node(rfn, lock, -1);
564}
565EXPORT_SYMBOL(blk_init_queue);
566
567struct request_queue *
568blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
569{
570 struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
571
572 if (!q)
573 return NULL;
574
575 q->node = node_id;
576 if (blk_init_free_list(q)) {
577 kmem_cache_free(blk_requestq_cachep, q);
578 return NULL;
579 }
580
581
582
583
584
585 if (!lock)
586 lock = &q->__queue_lock;
587
588 q->request_fn = rfn;
589 q->prep_rq_fn = NULL;
590 q->unplug_fn = generic_unplug_device;
591 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER |
592 1 << QUEUE_FLAG_STACKABLE);
593 q->queue_lock = lock;
594
595 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
596
597 blk_queue_make_request(q, __make_request);
598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
599
600 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
601 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
602
603 q->sg_reserved_size = INT_MAX;
604
605 blk_set_cmd_filter_defaults(&q->cmd_filter);
606
607
608
609
610 if (!elevator_init(q, NULL)) {
611 blk_queue_congestion_threshold(q);
612 return q;
613 }
614
615 blk_put_queue(q);
616 return NULL;
617}
618EXPORT_SYMBOL(blk_init_queue_node);
619
620int blk_get_queue(struct request_queue *q)
621{
622 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
623 kobject_get(&q->kobj);
624 return 0;
625 }
626
627 return 1;
628}
629
630static inline void blk_free_request(struct request_queue *q, struct request *rq)
631{
632 if (rq->cmd_flags & REQ_ELVPRIV)
633 elv_put_request(q, rq);
634 mempool_free(rq, q->rq.rq_pool);
635}
636
637static struct request *
638blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
639{
640 struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
641
642 if (!rq)
643 return NULL;
644
645 blk_rq_init(q, rq);
646
647 rq->cmd_flags = rw | REQ_ALLOCED;
648
649 if (priv) {
650 if (unlikely(elv_set_request(q, rq, gfp_mask))) {
651 mempool_free(rq, q->rq.rq_pool);
652 return NULL;
653 }
654 rq->cmd_flags |= REQ_ELVPRIV;
655 }
656
657 return rq;
658}
659
660
661
662
663
664static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
665{
666 if (!ioc)
667 return 0;
668
669
670
671
672
673
674 return ioc->nr_batch_requests == q->nr_batching ||
675 (ioc->nr_batch_requests > 0
676 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
677}
678
679
680
681
682
683
684
685static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
686{
687 if (!ioc || ioc_batching(q, ioc))
688 return;
689
690 ioc->nr_batch_requests = q->nr_batching;
691 ioc->last_waited = jiffies;
692}
693
694static void __freed_request(struct request_queue *q, int rw)
695{
696 struct request_list *rl = &q->rq;
697
698 if (rl->count[rw] < queue_congestion_off_threshold(q))
699 blk_clear_queue_congested(q, rw);
700
701 if (rl->count[rw] + 1 <= q->nr_requests) {
702 if (waitqueue_active(&rl->wait[rw]))
703 wake_up(&rl->wait[rw]);
704
705 blk_clear_queue_full(q, rw);
706 }
707}
708
709
710
711
712
713static void freed_request(struct request_queue *q, int rw, int priv)
714{
715 struct request_list *rl = &q->rq;
716
717 rl->count[rw]--;
718 if (priv)
719 rl->elvpriv--;
720
721 __freed_request(q, rw);
722
723 if (unlikely(rl->starved[rw ^ 1]))
724 __freed_request(q, rw ^ 1);
725}
726
727#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
728
729
730
731
732
733static struct request *get_request(struct request_queue *q, int rw_flags,
734 struct bio *bio, gfp_t gfp_mask)
735{
736 struct request *rq = NULL;
737 struct request_list *rl = &q->rq;
738 struct io_context *ioc = NULL;
739 const int rw = rw_flags & 0x01;
740 int may_queue, priv;
741
742 may_queue = elv_may_queue(q, rw_flags);
743 if (may_queue == ELV_MQUEUE_NO)
744 goto rq_starved;
745
746 if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
747 if (rl->count[rw]+1 >= q->nr_requests) {
748 ioc = current_io_context(GFP_ATOMIC, q->node);
749
750
751
752
753
754
755 if (!blk_queue_full(q, rw)) {
756 ioc_set_batching(q, ioc);
757 blk_set_queue_full(q, rw);
758 } else {
759 if (may_queue != ELV_MQUEUE_MUST
760 && !ioc_batching(q, ioc)) {
761
762
763
764
765
766 goto out;
767 }
768 }
769 }
770 blk_set_queue_congested(q, rw);
771 }
772
773
774
775
776
777
778 if (rl->count[rw] >= (3 * q->nr_requests / 2))
779 goto out;
780
781 rl->count[rw]++;
782 rl->starved[rw] = 0;
783
784 priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
785 if (priv)
786 rl->elvpriv++;
787
788 spin_unlock_irq(q->queue_lock);
789
790 rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
791 if (unlikely(!rq)) {
792
793
794
795
796
797
798
799 spin_lock_irq(q->queue_lock);
800 freed_request(q, rw, priv);
801
802
803
804
805
806
807
808
809rq_starved:
810 if (unlikely(rl->count[rw] == 0))
811 rl->starved[rw] = 1;
812
813 goto out;
814 }
815
816
817
818
819
820
821
822 if (ioc_batching(q, ioc))
823 ioc->nr_batch_requests--;
824
825 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
826out:
827 return rq;
828}
829
830
831
832
833
834
835
836static struct request *get_request_wait(struct request_queue *q, int rw_flags,
837 struct bio *bio)
838{
839 const int rw = rw_flags & 0x01;
840 struct request *rq;
841
842 rq = get_request(q, rw_flags, bio, GFP_NOIO);
843 while (!rq) {
844 DEFINE_WAIT(wait);
845 struct io_context *ioc;
846 struct request_list *rl = &q->rq;
847
848 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
849 TASK_UNINTERRUPTIBLE);
850
851 blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ);
852
853 __generic_unplug_device(q);
854 spin_unlock_irq(q->queue_lock);
855 io_schedule();
856
857
858
859
860
861
862
863 ioc = current_io_context(GFP_NOIO, q->node);
864 ioc_set_batching(q, ioc);
865
866 spin_lock_irq(q->queue_lock);
867 finish_wait(&rl->wait[rw], &wait);
868
869 rq = get_request(q, rw_flags, bio, GFP_NOIO);
870 };
871
872 return rq;
873}
874
875struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
876{
877 struct request *rq;
878
879 BUG_ON(rw != READ && rw != WRITE);
880
881 spin_lock_irq(q->queue_lock);
882 if (gfp_mask & __GFP_WAIT) {
883 rq = get_request_wait(q, rw, NULL);
884 } else {
885 rq = get_request(q, rw, NULL, gfp_mask);
886 if (!rq)
887 spin_unlock_irq(q->queue_lock);
888 }
889
890
891 return rq;
892}
893EXPORT_SYMBOL(blk_get_request);
894
895
896
897
898
899
900
901
902
903
904
905
906void blk_start_queueing(struct request_queue *q)
907{
908 if (!blk_queue_plugged(q)) {
909 if (unlikely(blk_queue_stopped(q)))
910 return;
911 q->request_fn(q);
912 } else
913 __generic_unplug_device(q);
914}
915EXPORT_SYMBOL(blk_start_queueing);
916
917
918
919
920
921
922
923
924
925
926
927void blk_requeue_request(struct request_queue *q, struct request *rq)
928{
929 blk_delete_timer(rq);
930 blk_clear_rq_complete(rq);
931 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
932
933 if (blk_rq_tagged(rq))
934 blk_queue_end_tag(q, rq);
935
936 elv_requeue_request(q, rq);
937}
938EXPORT_SYMBOL(blk_requeue_request);
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959void blk_insert_request(struct request_queue *q, struct request *rq,
960 int at_head, void *data)
961{
962 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
963 unsigned long flags;
964
965
966
967
968
969
970 rq->cmd_type = REQ_TYPE_SPECIAL;
971 rq->cmd_flags |= REQ_SOFTBARRIER;
972
973 rq->special = data;
974
975 spin_lock_irqsave(q->queue_lock, flags);
976
977
978
979
980 if (blk_rq_tagged(rq))
981 blk_queue_end_tag(q, rq);
982
983 drive_stat_acct(rq, 1);
984 __elv_add_request(q, rq, where, 0);
985 blk_start_queueing(q);
986 spin_unlock_irqrestore(q->queue_lock, flags);
987}
988EXPORT_SYMBOL(blk_insert_request);
989
990
991
992
993
994
995static inline void add_request(struct request_queue *q, struct request *req)
996{
997 drive_stat_acct(req, 1);
998
999
1000
1001
1002
1003 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
1004}
1005
1006static void part_round_stats_single(int cpu, struct hd_struct *part,
1007 unsigned long now)
1008{
1009 if (now == part->stamp)
1010 return;
1011
1012 if (part->in_flight) {
1013 __part_stat_add(cpu, part, time_in_queue,
1014 part->in_flight * (now - part->stamp));
1015 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1016 }
1017 part->stamp = now;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036void part_round_stats(int cpu, struct hd_struct *part)
1037{
1038 unsigned long now = jiffies;
1039
1040 if (part->partno)
1041 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1042 part_round_stats_single(cpu, part, now);
1043}
1044EXPORT_SYMBOL_GPL(part_round_stats);
1045
1046
1047
1048
1049void __blk_put_request(struct request_queue *q, struct request *req)
1050{
1051 if (unlikely(!q))
1052 return;
1053 if (unlikely(--req->ref_count))
1054 return;
1055
1056 elv_completed_request(q, req);
1057
1058
1059
1060
1061
1062 if (req->cmd_flags & REQ_ALLOCED) {
1063 int rw = rq_data_dir(req);
1064 int priv = req->cmd_flags & REQ_ELVPRIV;
1065
1066 BUG_ON(!list_empty(&req->queuelist));
1067 BUG_ON(!hlist_unhashed(&req->hash));
1068
1069 blk_free_request(q, req);
1070 freed_request(q, rw, priv);
1071 }
1072}
1073EXPORT_SYMBOL_GPL(__blk_put_request);
1074
1075void blk_put_request(struct request *req)
1076{
1077 unsigned long flags;
1078 struct request_queue *q = req->q;
1079
1080 spin_lock_irqsave(q->queue_lock, flags);
1081 __blk_put_request(q, req);
1082 spin_unlock_irqrestore(q->queue_lock, flags);
1083}
1084EXPORT_SYMBOL(blk_put_request);
1085
1086void init_request_from_bio(struct request *req, struct bio *bio)
1087{
1088 req->cpu = bio->bi_comp_cpu;
1089 req->cmd_type = REQ_TYPE_FS;
1090
1091
1092
1093
1094 if (bio_rw_ahead(bio))
1095 req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
1096 REQ_FAILFAST_DRIVER);
1097 if (bio_failfast_dev(bio))
1098 req->cmd_flags |= REQ_FAILFAST_DEV;
1099 if (bio_failfast_transport(bio))
1100 req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
1101 if (bio_failfast_driver(bio))
1102 req->cmd_flags |= REQ_FAILFAST_DRIVER;
1103
1104
1105
1106
1107 if (unlikely(bio_discard(bio))) {
1108 req->cmd_flags |= REQ_DISCARD;
1109 if (bio_barrier(bio))
1110 req->cmd_flags |= REQ_SOFTBARRIER;
1111 req->q->prepare_discard_fn(req->q, req);
1112 } else if (unlikely(bio_barrier(bio)))
1113 req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
1114
1115 if (bio_sync(bio))
1116 req->cmd_flags |= REQ_RW_SYNC;
1117 if (bio_rw_meta(bio))
1118 req->cmd_flags |= REQ_RW_META;
1119
1120 req->errors = 0;
1121 req->hard_sector = req->sector = bio->bi_sector;
1122 req->ioprio = bio_prio(bio);
1123 req->start_time = jiffies;
1124 blk_rq_bio_prep(req->q, req, bio);
1125}
1126
1127static int __make_request(struct request_queue *q, struct bio *bio)
1128{
1129 struct request *req;
1130 int el_ret, nr_sectors, barrier, discard, err;
1131 const unsigned short prio = bio_prio(bio);
1132 const int sync = bio_sync(bio);
1133 int rw_flags;
1134
1135 nr_sectors = bio_sectors(bio);
1136
1137
1138
1139
1140
1141
1142 blk_queue_bounce(q, &bio);
1143
1144 barrier = bio_barrier(bio);
1145 if (unlikely(barrier) && bio_has_data(bio) &&
1146 (q->next_ordered == QUEUE_ORDERED_NONE)) {
1147 err = -EOPNOTSUPP;
1148 goto end_io;
1149 }
1150
1151 discard = bio_discard(bio);
1152 if (unlikely(discard) && !q->prepare_discard_fn) {
1153 err = -EOPNOTSUPP;
1154 goto end_io;
1155 }
1156
1157 spin_lock_irq(q->queue_lock);
1158
1159 if (unlikely(barrier) || elv_queue_empty(q))
1160 goto get_rq;
1161
1162 el_ret = elv_merge(q, &req, bio);
1163 switch (el_ret) {
1164 case ELEVATOR_BACK_MERGE:
1165 BUG_ON(!rq_mergeable(req));
1166
1167 if (!ll_back_merge_fn(q, req, bio))
1168 break;
1169
1170 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
1171
1172 req->biotail->bi_next = bio;
1173 req->biotail = bio;
1174 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1175 req->ioprio = ioprio_best(req->ioprio, prio);
1176 if (!blk_rq_cpu_valid(req))
1177 req->cpu = bio->bi_comp_cpu;
1178 drive_stat_acct(req, 0);
1179 if (!attempt_back_merge(q, req))
1180 elv_merged_request(q, req, el_ret);
1181 goto out;
1182
1183 case ELEVATOR_FRONT_MERGE:
1184 BUG_ON(!rq_mergeable(req));
1185
1186 if (!ll_front_merge_fn(q, req, bio))
1187 break;
1188
1189 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
1190
1191 bio->bi_next = req->bio;
1192 req->bio = bio;
1193
1194
1195
1196
1197
1198
1199 req->buffer = bio_data(bio);
1200 req->current_nr_sectors = bio_cur_sectors(bio);
1201 req->hard_cur_sectors = req->current_nr_sectors;
1202 req->sector = req->hard_sector = bio->bi_sector;
1203 req->nr_sectors = req->hard_nr_sectors += nr_sectors;
1204 req->ioprio = ioprio_best(req->ioprio, prio);
1205 if (!blk_rq_cpu_valid(req))
1206 req->cpu = bio->bi_comp_cpu;
1207 drive_stat_acct(req, 0);
1208 if (!attempt_front_merge(q, req))
1209 elv_merged_request(q, req, el_ret);
1210 goto out;
1211
1212
1213 default:
1214 ;
1215 }
1216
1217get_rq:
1218
1219
1220
1221
1222
1223 rw_flags = bio_data_dir(bio);
1224 if (sync)
1225 rw_flags |= REQ_RW_SYNC;
1226
1227
1228
1229
1230
1231 req = get_request_wait(q, rw_flags, bio);
1232
1233
1234
1235
1236
1237
1238
1239 init_request_from_bio(req, bio);
1240
1241 spin_lock_irq(q->queue_lock);
1242 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
1243 bio_flagged(bio, BIO_CPU_AFFINE))
1244 req->cpu = blk_cpu_to_group(smp_processor_id());
1245 if (elv_queue_empty(q))
1246 blk_plug_device(q);
1247 add_request(q, req);
1248out:
1249 if (sync)
1250 __generic_unplug_device(q);
1251 spin_unlock_irq(q->queue_lock);
1252 return 0;
1253
1254end_io:
1255 bio_endio(bio, err);
1256 return 0;
1257}
1258
1259
1260
1261
1262static inline void blk_partition_remap(struct bio *bio)
1263{
1264 struct block_device *bdev = bio->bi_bdev;
1265
1266 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1267 struct hd_struct *p = bdev->bd_part;
1268
1269 bio->bi_sector += p->start_sect;
1270 bio->bi_bdev = bdev->bd_contains;
1271
1272 blk_add_trace_remap(bdev_get_queue(bio->bi_bdev), bio,
1273 bdev->bd_dev, bio->bi_sector,
1274 bio->bi_sector - p->start_sect);
1275 }
1276}
1277
1278static void handle_bad_sector(struct bio *bio)
1279{
1280 char b[BDEVNAME_SIZE];
1281
1282 printk(KERN_INFO "attempt to access beyond end of device\n");
1283 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1284 bdevname(bio->bi_bdev, b),
1285 bio->bi_rw,
1286 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1287 (long long)(bio->bi_bdev->bd_inode->i_size >> 9));
1288
1289 set_bit(BIO_EOF, &bio->bi_flags);
1290}
1291
1292#ifdef CONFIG_FAIL_MAKE_REQUEST
1293
1294static DECLARE_FAULT_ATTR(fail_make_request);
1295
1296static int __init setup_fail_make_request(char *str)
1297{
1298 return setup_fault_attr(&fail_make_request, str);
1299}
1300__setup("fail_make_request=", setup_fail_make_request);
1301
1302static int should_fail_request(struct bio *bio)
1303{
1304 struct hd_struct *part = bio->bi_bdev->bd_part;
1305
1306 if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail)
1307 return should_fail(&fail_make_request, bio->bi_size);
1308
1309 return 0;
1310}
1311
1312static int __init fail_make_request_debugfs(void)
1313{
1314 return init_fault_attr_dentries(&fail_make_request,
1315 "fail_make_request");
1316}
1317
1318late_initcall(fail_make_request_debugfs);
1319
1320#else
1321
1322static inline int should_fail_request(struct bio *bio)
1323{
1324 return 0;
1325}
1326
1327#endif
1328
1329
1330
1331
1332static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1333{
1334 sector_t maxsector;
1335
1336 if (!nr_sectors)
1337 return 0;
1338
1339
1340 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1341 if (maxsector) {
1342 sector_t sector = bio->bi_sector;
1343
1344 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1345
1346
1347
1348
1349
1350 handle_bad_sector(bio);
1351 return 1;
1352 }
1353 }
1354
1355 return 0;
1356}
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382static inline void __generic_make_request(struct bio *bio)
1383{
1384 struct request_queue *q;
1385 sector_t old_sector;
1386 int ret, nr_sectors = bio_sectors(bio);
1387 dev_t old_dev;
1388 int err = -EIO;
1389
1390 might_sleep();
1391
1392 if (bio_check_eod(bio, nr_sectors))
1393 goto end_io;
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403 old_sector = -1;
1404 old_dev = 0;
1405 do {
1406 char b[BDEVNAME_SIZE];
1407
1408 q = bdev_get_queue(bio->bi_bdev);
1409 if (!q) {
1410 printk(KERN_ERR
1411 "generic_make_request: Trying to access "
1412 "nonexistent block-device %s (%Lu)\n",
1413 bdevname(bio->bi_bdev, b),
1414 (long long) bio->bi_sector);
1415end_io:
1416 bio_endio(bio, err);
1417 break;
1418 }
1419
1420 if (unlikely(nr_sectors > q->max_hw_sectors)) {
1421 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1422 bdevname(bio->bi_bdev, b),
1423 bio_sectors(bio),
1424 q->max_hw_sectors);
1425 goto end_io;
1426 }
1427
1428 if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
1429 goto end_io;
1430
1431 if (should_fail_request(bio))
1432 goto end_io;
1433
1434
1435
1436
1437
1438 blk_partition_remap(bio);
1439
1440 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1441 goto end_io;
1442
1443 if (old_sector != -1)
1444 blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
1445 old_sector);
1446
1447 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
1448
1449 old_sector = bio->bi_sector;
1450 old_dev = bio->bi_bdev->bd_dev;
1451
1452 if (bio_check_eod(bio, nr_sectors))
1453 goto end_io;
1454 if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
1455 (bio_discard(bio) && !q->prepare_discard_fn)) {
1456 err = -EOPNOTSUPP;
1457 goto end_io;
1458 }
1459
1460 ret = q->make_request_fn(q, bio);
1461 } while (ret);
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475void generic_make_request(struct bio *bio)
1476{
1477 if (current->bio_tail) {
1478
1479 *(current->bio_tail) = bio;
1480 bio->bi_next = NULL;
1481 current->bio_tail = &bio->bi_next;
1482 return;
1483 }
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502 BUG_ON(bio->bi_next);
1503 do {
1504 current->bio_list = bio->bi_next;
1505 if (bio->bi_next == NULL)
1506 current->bio_tail = ¤t->bio_list;
1507 else
1508 bio->bi_next = NULL;
1509 __generic_make_request(bio);
1510 bio = current->bio_list;
1511 } while (bio);
1512 current->bio_tail = NULL;
1513}
1514EXPORT_SYMBOL(generic_make_request);
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526void submit_bio(int rw, struct bio *bio)
1527{
1528 int count = bio_sectors(bio);
1529
1530 bio->bi_rw |= rw;
1531
1532
1533
1534
1535
1536 if (bio_has_data(bio)) {
1537 if (rw & WRITE) {
1538 count_vm_events(PGPGOUT, count);
1539 } else {
1540 task_io_account_read(bio->bi_size);
1541 count_vm_events(PGPGIN, count);
1542 }
1543
1544 if (unlikely(block_dump)) {
1545 char b[BDEVNAME_SIZE];
1546 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
1547 current->comm, task_pid_nr(current),
1548 (rw & WRITE) ? "WRITE" : "READ",
1549 (unsigned long long)bio->bi_sector,
1550 bdevname(bio->bi_bdev, b));
1551 }
1552 }
1553
1554 generic_make_request(bio);
1555}
1556EXPORT_SYMBOL(submit_bio);
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1580{
1581 if (rq->nr_sectors > q->max_sectors ||
1582 rq->data_len > q->max_hw_sectors << 9) {
1583 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1584 return -EIO;
1585 }
1586
1587
1588
1589
1590
1591
1592
1593 blk_recalc_rq_segments(rq);
1594 if (rq->nr_phys_segments > q->max_phys_segments ||
1595 rq->nr_phys_segments > q->max_hw_segments) {
1596 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1597 return -EIO;
1598 }
1599
1600 return 0;
1601}
1602EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1603
1604
1605
1606
1607
1608
1609int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1610{
1611 unsigned long flags;
1612
1613 if (blk_rq_check_limits(q, rq))
1614 return -EIO;
1615
1616#ifdef CONFIG_FAIL_MAKE_REQUEST
1617 if (rq->rq_disk && rq->rq_disk->part0.make_it_fail &&
1618 should_fail(&fail_make_request, blk_rq_bytes(rq)))
1619 return -EIO;
1620#endif
1621
1622 spin_lock_irqsave(q->queue_lock, flags);
1623
1624
1625
1626
1627
1628 BUG_ON(blk_queued_rq(rq));
1629
1630 drive_stat_acct(rq, 1);
1631 __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
1632
1633 spin_unlock_irqrestore(q->queue_lock, flags);
1634
1635 return 0;
1636}
1637EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649void blkdev_dequeue_request(struct request *req)
1650{
1651 elv_dequeue_request(req->q, req);
1652
1653
1654
1655
1656
1657 blk_add_timer(req);
1658}
1659EXPORT_SYMBOL(blkdev_dequeue_request);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675static int __end_that_request_first(struct request *req, int error,
1676 int nr_bytes)
1677{
1678 int total_bytes, bio_nbytes, next_idx = 0;
1679 struct bio *bio;
1680
1681 blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
1682
1683
1684
1685
1686
1687 if (!blk_pc_request(req))
1688 req->errors = 0;
1689
1690 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
1691 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
1692 req->rq_disk ? req->rq_disk->disk_name : "?",
1693 (unsigned long long)req->sector);
1694 }
1695
1696 if (blk_fs_request(req) && req->rq_disk) {
1697 const int rw = rq_data_dir(req);
1698 struct hd_struct *part;
1699 int cpu;
1700
1701 cpu = part_stat_lock();
1702 part = disk_map_sector_rcu(req->rq_disk, req->sector);
1703 part_stat_add(cpu, part, sectors[rw], nr_bytes >> 9);
1704 part_stat_unlock();
1705 }
1706
1707 total_bytes = bio_nbytes = 0;
1708 while ((bio = req->bio) != NULL) {
1709 int nbytes;
1710
1711
1712
1713
1714
1715
1716 if (blk_empty_barrier(req))
1717 bio->bi_sector = req->sector;
1718
1719 if (nr_bytes >= bio->bi_size) {
1720 req->bio = bio->bi_next;
1721 nbytes = bio->bi_size;
1722 req_bio_endio(req, bio, nbytes, error);
1723 next_idx = 0;
1724 bio_nbytes = 0;
1725 } else {
1726 int idx = bio->bi_idx + next_idx;
1727
1728 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
1729 blk_dump_rq_flags(req, "__end_that");
1730 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
1731 __func__, bio->bi_idx, bio->bi_vcnt);
1732 break;
1733 }
1734
1735 nbytes = bio_iovec_idx(bio, idx)->bv_len;
1736 BIO_BUG_ON(nbytes > bio->bi_size);
1737
1738
1739
1740
1741 if (unlikely(nbytes > nr_bytes)) {
1742 bio_nbytes += nr_bytes;
1743 total_bytes += nr_bytes;
1744 break;
1745 }
1746
1747
1748
1749
1750 next_idx++;
1751 bio_nbytes += nbytes;
1752 }
1753
1754 total_bytes += nbytes;
1755 nr_bytes -= nbytes;
1756
1757 bio = req->bio;
1758 if (bio) {
1759
1760
1761
1762 if (unlikely(nr_bytes <= 0))
1763 break;
1764 }
1765 }
1766
1767
1768
1769
1770 if (!req->bio)
1771 return 0;
1772
1773
1774
1775
1776 if (bio_nbytes) {
1777 req_bio_endio(req, bio, bio_nbytes, error);
1778 bio->bi_idx += next_idx;
1779 bio_iovec(bio)->bv_offset += nr_bytes;
1780 bio_iovec(bio)->bv_len -= nr_bytes;
1781 }
1782
1783 blk_recalc_rq_sectors(req, total_bytes >> 9);
1784 blk_recalc_rq_segments(req);
1785 return 1;
1786}
1787
1788
1789
1790
1791static void end_that_request_last(struct request *req, int error)
1792{
1793 struct gendisk *disk = req->rq_disk;
1794
1795 if (blk_rq_tagged(req))
1796 blk_queue_end_tag(req->q, req);
1797
1798 if (blk_queued_rq(req))
1799 elv_dequeue_request(req->q, req);
1800
1801 if (unlikely(laptop_mode) && blk_fs_request(req))
1802 laptop_io_completion();
1803
1804 blk_delete_timer(req);
1805
1806
1807
1808
1809
1810
1811 if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
1812 unsigned long duration = jiffies - req->start_time;
1813 const int rw = rq_data_dir(req);
1814 struct hd_struct *part;
1815 int cpu;
1816
1817 cpu = part_stat_lock();
1818 part = disk_map_sector_rcu(disk, req->sector);
1819
1820 part_stat_inc(cpu, part, ios[rw]);
1821 part_stat_add(cpu, part, ticks[rw], duration);
1822 part_round_stats(cpu, part);
1823 part_dec_in_flight(part);
1824
1825 part_stat_unlock();
1826 }
1827
1828 if (req->end_io)
1829 req->end_io(req, error);
1830 else {
1831 if (blk_bidi_rq(req))
1832 __blk_put_request(req->next_rq->q, req->next_rq);
1833
1834 __blk_put_request(req->q, req);
1835 }
1836}
1837
1838
1839
1840
1841
1842unsigned int blk_rq_bytes(struct request *rq)
1843{
1844 if (blk_fs_request(rq))
1845 return rq->hard_nr_sectors << 9;
1846
1847 return rq->data_len;
1848}
1849EXPORT_SYMBOL_GPL(blk_rq_bytes);
1850
1851
1852
1853
1854
1855unsigned int blk_rq_cur_bytes(struct request *rq)
1856{
1857 if (blk_fs_request(rq))
1858 return rq->current_nr_sectors << 9;
1859
1860 if (rq->bio)
1861 return rq->bio->bi_size;
1862
1863 return rq->data_len;
1864}
1865EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883void end_request(struct request *req, int uptodate)
1884{
1885 int error = 0;
1886
1887 if (uptodate <= 0)
1888 error = uptodate ? uptodate : -EIO;
1889
1890 __blk_end_request(req, error, req->hard_cur_sectors << 9);
1891}
1892EXPORT_SYMBOL(end_request);
1893
1894static int end_that_request_data(struct request *rq, int error,
1895 unsigned int nr_bytes, unsigned int bidi_bytes)
1896{
1897 if (rq->bio) {
1898 if (__end_that_request_first(rq, error, nr_bytes))
1899 return 1;
1900
1901
1902 if (blk_bidi_rq(rq) &&
1903 __end_that_request_first(rq->next_rq, error, bidi_bytes))
1904 return 1;
1905 }
1906
1907 return 0;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes,
1930 unsigned int bidi_bytes,
1931 int (drv_callback)(struct request *))
1932{
1933 struct request_queue *q = rq->q;
1934 unsigned long flags = 0UL;
1935
1936 if (end_that_request_data(rq, error, nr_bytes, bidi_bytes))
1937 return 1;
1938
1939
1940 if (drv_callback && drv_callback(rq))
1941 return 1;
1942
1943 add_disk_randomness(rq->rq_disk);
1944
1945 spin_lock_irqsave(q->queue_lock, flags);
1946 end_that_request_last(rq, error);
1947 spin_unlock_irqrestore(q->queue_lock, flags);
1948
1949 return 0;
1950}
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966int blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1967{
1968 return blk_end_io(rq, error, nr_bytes, 0, NULL);
1969}
1970EXPORT_SYMBOL_GPL(blk_end_request);
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
1986{
1987 if (rq->bio && __end_that_request_first(rq, error, nr_bytes))
1988 return 1;
1989
1990 add_disk_randomness(rq->rq_disk);
1991
1992 end_that_request_last(rq, error);
1993
1994 return 0;
1995}
1996EXPORT_SYMBOL_GPL(__blk_end_request);
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes,
2013 unsigned int bidi_bytes)
2014{
2015 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
2016}
2017EXPORT_SYMBOL_GPL(blk_end_bidi_request);
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034void blk_update_request(struct request *rq, int error, unsigned int nr_bytes)
2035{
2036 if (!end_that_request_data(rq, error, nr_bytes, 0)) {
2037
2038
2039
2040
2041
2042
2043 rq->nr_sectors = rq->hard_nr_sectors = 0;
2044 rq->current_nr_sectors = rq->hard_cur_sectors = 0;
2045 }
2046}
2047EXPORT_SYMBOL_GPL(blk_update_request);
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074int blk_end_request_callback(struct request *rq, int error,
2075 unsigned int nr_bytes,
2076 int (drv_callback)(struct request *))
2077{
2078 return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
2079}
2080EXPORT_SYMBOL_GPL(blk_end_request_callback);
2081
2082void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2083 struct bio *bio)
2084{
2085
2086
2087 rq->cmd_flags |= (bio->bi_rw & 3);
2088
2089 if (bio_has_data(bio)) {
2090 rq->nr_phys_segments = bio_phys_segments(q, bio);
2091 rq->buffer = bio_data(bio);
2092 }
2093 rq->current_nr_sectors = bio_cur_sectors(bio);
2094 rq->hard_cur_sectors = rq->current_nr_sectors;
2095 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
2096 rq->data_len = bio->bi_size;
2097
2098 rq->bio = rq->biotail = bio;
2099
2100 if (bio->bi_bdev)
2101 rq->rq_disk = bio->bi_bdev->bd_disk;
2102}
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123int blk_lld_busy(struct request_queue *q)
2124{
2125 if (q->lld_busy_fn)
2126 return q->lld_busy_fn(q);
2127
2128 return 0;
2129}
2130EXPORT_SYMBOL_GPL(blk_lld_busy);
2131
2132int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2133{
2134 return queue_work(kblockd_workqueue, work);
2135}
2136EXPORT_SYMBOL(kblockd_schedule_work);
2137
2138void kblockd_flush_work(struct work_struct *work)
2139{
2140 cancel_work_sync(work);
2141}
2142EXPORT_SYMBOL(kblockd_flush_work);
2143
2144int __init blk_dev_init(void)
2145{
2146 kblockd_workqueue = create_workqueue("kblockd");
2147 if (!kblockd_workqueue)
2148 panic("Failed to create kblockd\n");
2149
2150 request_cachep = kmem_cache_create("blkdev_requests",
2151 sizeof(struct request), 0, SLAB_PANIC, NULL);
2152
2153 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
2154 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
2155
2156 return 0;
2157}
2158
2159