1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/blk-pm.h>
20#include <linux/blk-integrity.h>
21#include <linux/highmem.h>
22#include <linux/mm.h>
23#include <linux/pagemap.h>
24#include <linux/kernel_stat.h>
25#include <linux/string.h>
26#include <linux/init.h>
27#include <linux/completion.h>
28#include <linux/slab.h>
29#include <linux/swap.h>
30#include <linux/writeback.h>
31#include <linux/task_io_accounting_ops.h>
32#include <linux/fault-inject.h>
33#include <linux/list_sort.h>
34#include <linux/delay.h>
35#include <linux/ratelimit.h>
36#include <linux/pm_runtime.h>
37#include <linux/t10-pi.h>
38#include <linux/debugfs.h>
39#include <linux/bpf.h>
40#include <linux/part_stat.h>
41#include <linux/sched/sysctl.h>
42#include <linux/blk-crypto.h>
43
44#define CREATE_TRACE_POINTS
45#include <trace/events/block.h>
46
47#include "blk.h"
48#include "blk-mq-sched.h"
49#include "blk-pm.h"
50#include "blk-cgroup.h"
51#include "blk-throttle.h"
52
53struct dentry *blk_debugfs_root;
54
55EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
56EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
57EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
58EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
59EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
60EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
61
62static DEFINE_IDA(blk_queue_ida);
63
64
65
66
67static struct kmem_cache *blk_requestq_cachep;
68
69
70
71
72static struct workqueue_struct *kblockd_workqueue;
73
74
75
76
77
78
79void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
80{
81 set_bit(flag, &q->queue_flags);
82}
83EXPORT_SYMBOL(blk_queue_flag_set);
84
85
86
87
88
89
90void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
91{
92 clear_bit(flag, &q->queue_flags);
93}
94EXPORT_SYMBOL(blk_queue_flag_clear);
95
96
97
98
99
100
101
102
103
104bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
105{
106 return test_and_set_bit(flag, &q->queue_flags);
107}
108EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
109
110#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
111static const char *const blk_op_name[] = {
112 REQ_OP_NAME(READ),
113 REQ_OP_NAME(WRITE),
114 REQ_OP_NAME(FLUSH),
115 REQ_OP_NAME(DISCARD),
116 REQ_OP_NAME(SECURE_ERASE),
117 REQ_OP_NAME(ZONE_RESET),
118 REQ_OP_NAME(ZONE_RESET_ALL),
119 REQ_OP_NAME(ZONE_OPEN),
120 REQ_OP_NAME(ZONE_CLOSE),
121 REQ_OP_NAME(ZONE_FINISH),
122 REQ_OP_NAME(ZONE_APPEND),
123 REQ_OP_NAME(WRITE_ZEROES),
124 REQ_OP_NAME(DRV_IN),
125 REQ_OP_NAME(DRV_OUT),
126};
127#undef REQ_OP_NAME
128
129
130
131
132
133
134
135
136
137inline const char *blk_op_str(enum req_op op)
138{
139 const char *op_str = "UNKNOWN";
140
141 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
142 op_str = blk_op_name[op];
143
144 return op_str;
145}
146EXPORT_SYMBOL_GPL(blk_op_str);
147
148static const struct {
149 int errno;
150 const char *name;
151} blk_errors[] = {
152 [BLK_STS_OK] = { 0, "" },
153 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
154 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
155 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
156 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
157 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
158 [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
159 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
160 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
161 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
162 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
163 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
164 [BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
165
166
167 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
168
169
170 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
171 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
172
173
174 [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
175
176
177 [BLK_STS_IOERR] = { -EIO, "I/O" },
178};
179
180blk_status_t errno_to_blk_status(int errno)
181{
182 int i;
183
184 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
185 if (blk_errors[i].errno == errno)
186 return (__force blk_status_t)i;
187 }
188
189 return BLK_STS_IOERR;
190}
191EXPORT_SYMBOL_GPL(errno_to_blk_status);
192
193int blk_status_to_errno(blk_status_t status)
194{
195 int idx = (__force int)status;
196
197 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
198 return -EIO;
199 return blk_errors[idx].errno;
200}
201EXPORT_SYMBOL_GPL(blk_status_to_errno);
202
203const char *blk_status_to_str(blk_status_t status)
204{
205 int idx = (__force int)status;
206
207 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
208 return "<null>";
209 return blk_errors[idx].name;
210}
211EXPORT_SYMBOL_GPL(blk_status_to_str);
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231void blk_sync_queue(struct request_queue *q)
232{
233 del_timer_sync(&q->timeout);
234 cancel_work_sync(&q->timeout_work);
235}
236EXPORT_SYMBOL(blk_sync_queue);
237
238
239
240
241
242void blk_set_pm_only(struct request_queue *q)
243{
244 atomic_inc(&q->pm_only);
245}
246EXPORT_SYMBOL_GPL(blk_set_pm_only);
247
248void blk_clear_pm_only(struct request_queue *q)
249{
250 int pm_only;
251
252 pm_only = atomic_dec_return(&q->pm_only);
253 WARN_ON_ONCE(pm_only < 0);
254 if (pm_only == 0)
255 wake_up_all(&q->mq_freeze_wq);
256}
257EXPORT_SYMBOL_GPL(blk_clear_pm_only);
258
259static void blk_free_queue_rcu(struct rcu_head *rcu_head)
260{
261 struct request_queue *q = container_of(rcu_head,
262 struct request_queue, rcu_head);
263
264 percpu_ref_exit(&q->q_usage_counter);
265 kmem_cache_free(blk_requestq_cachep, q);
266}
267
268static void blk_free_queue(struct request_queue *q)
269{
270 blk_free_queue_stats(q->stats);
271 if (queue_is_mq(q))
272 blk_mq_release(q);
273
274 ida_free(&blk_queue_ida, q->id);
275 call_rcu(&q->rcu_head, blk_free_queue_rcu);
276}
277
278
279
280
281
282
283
284
285void blk_put_queue(struct request_queue *q)
286{
287 if (refcount_dec_and_test(&q->refs))
288 blk_free_queue(q);
289}
290EXPORT_SYMBOL(blk_put_queue);
291
292void blk_queue_start_drain(struct request_queue *q)
293{
294
295
296
297
298
299 blk_freeze_queue_start(q);
300 if (queue_is_mq(q))
301 blk_mq_wake_waiters(q);
302
303 wake_up_all(&q->mq_freeze_wq);
304}
305
306
307
308
309
310
311int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
312{
313 const bool pm = flags & BLK_MQ_REQ_PM;
314
315 while (!blk_try_enter_queue(q, pm)) {
316 if (flags & BLK_MQ_REQ_NOWAIT)
317 return -EAGAIN;
318
319
320
321
322
323
324
325
326 smp_rmb();
327 wait_event(q->mq_freeze_wq,
328 (!q->mq_freeze_depth &&
329 blk_pm_resume_queue(pm, q)) ||
330 blk_queue_dying(q));
331 if (blk_queue_dying(q))
332 return -ENODEV;
333 }
334
335 return 0;
336}
337
338int __bio_queue_enter(struct request_queue *q, struct bio *bio)
339{
340 while (!blk_try_enter_queue(q, false)) {
341 struct gendisk *disk = bio->bi_bdev->bd_disk;
342
343 if (bio->bi_opf & REQ_NOWAIT) {
344 if (test_bit(GD_DEAD, &disk->state))
345 goto dead;
346 bio_wouldblock_error(bio);
347 return -EAGAIN;
348 }
349
350
351
352
353
354
355
356
357 smp_rmb();
358 wait_event(q->mq_freeze_wq,
359 (!q->mq_freeze_depth &&
360 blk_pm_resume_queue(false, q)) ||
361 test_bit(GD_DEAD, &disk->state));
362 if (test_bit(GD_DEAD, &disk->state))
363 goto dead;
364 }
365
366 return 0;
367dead:
368 bio_io_error(bio);
369 return -ENODEV;
370}
371
372void blk_queue_exit(struct request_queue *q)
373{
374 percpu_ref_put(&q->q_usage_counter);
375}
376
377static void blk_queue_usage_counter_release(struct percpu_ref *ref)
378{
379 struct request_queue *q =
380 container_of(ref, struct request_queue, q_usage_counter);
381
382 wake_up_all(&q->mq_freeze_wq);
383}
384
385static void blk_rq_timed_out_timer(struct timer_list *t)
386{
387 struct request_queue *q = from_timer(q, t, timeout);
388
389 kblockd_schedule_work(&q->timeout_work);
390}
391
392static void blk_timeout_work(struct work_struct *work)
393{
394}
395
396struct request_queue *blk_alloc_queue(int node_id)
397{
398 struct request_queue *q;
399
400 q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
401 node_id);
402 if (!q)
403 return NULL;
404
405 q->last_merge = NULL;
406
407 q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
408 if (q->id < 0)
409 goto fail_q;
410
411 q->stats = blk_alloc_queue_stats();
412 if (!q->stats)
413 goto fail_id;
414
415 q->node = node_id;
416
417 atomic_set(&q->nr_active_requests_shared_tags, 0);
418
419 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
420 INIT_WORK(&q->timeout_work, blk_timeout_work);
421 INIT_LIST_HEAD(&q->icq_list);
422
423 refcount_set(&q->refs, 1);
424 mutex_init(&q->debugfs_mutex);
425 mutex_init(&q->sysfs_lock);
426 mutex_init(&q->sysfs_dir_lock);
427 mutex_init(&q->rq_qos_mutex);
428 spin_lock_init(&q->queue_lock);
429
430 init_waitqueue_head(&q->mq_freeze_wq);
431 mutex_init(&q->mq_freeze_lock);
432
433
434
435
436
437 if (percpu_ref_init(&q->q_usage_counter,
438 blk_queue_usage_counter_release,
439 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
440 goto fail_stats;
441
442 blk_set_default_limits(&q->limits);
443 q->nr_requests = BLKDEV_DEFAULT_RQ;
444
445 return q;
446
447fail_stats:
448 blk_free_queue_stats(q->stats);
449fail_id:
450 ida_free(&blk_queue_ida, q->id);
451fail_q:
452 kmem_cache_free(blk_requestq_cachep, q);
453 return NULL;
454}
455
456
457
458
459
460
461
462
463
464bool blk_get_queue(struct request_queue *q)
465{
466 if (unlikely(blk_queue_dying(q)))
467 return false;
468 refcount_inc(&q->refs);
469 return true;
470}
471EXPORT_SYMBOL(blk_get_queue);
472
473#ifdef CONFIG_FAIL_MAKE_REQUEST
474
475static DECLARE_FAULT_ATTR(fail_make_request);
476
477static int __init setup_fail_make_request(char *str)
478{
479 return setup_fault_attr(&fail_make_request, str);
480}
481__setup("fail_make_request=", setup_fail_make_request);
482
483bool should_fail_request(struct block_device *part, unsigned int bytes)
484{
485 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
486}
487
488static int __init fail_make_request_debugfs(void)
489{
490 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
491 NULL, &fail_make_request);
492
493 return PTR_ERR_OR_ZERO(dir);
494}
495
496late_initcall(fail_make_request_debugfs);
497#endif
498
499static inline void bio_check_ro(struct bio *bio)
500{
501 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
502 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
503 return;
504
505 if (bio->bi_bdev->bd_ro_warned)
506 return;
507
508 bio->bi_bdev->bd_ro_warned = true;
509
510
511
512
513 pr_warn("Trying to write to read-only block-device %pg\n",
514 bio->bi_bdev);
515 }
516}
517
518static noinline int should_fail_bio(struct bio *bio)
519{
520 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
521 return -EIO;
522 return 0;
523}
524ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
525
526
527
528
529
530
531static inline int bio_check_eod(struct bio *bio)
532{
533 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
534 unsigned int nr_sectors = bio_sectors(bio);
535
536 if (nr_sectors &&
537 (nr_sectors > maxsector ||
538 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
539 pr_info_ratelimited("%s: attempt to access beyond end of device\n"
540 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
541 current->comm, bio->bi_bdev, bio->bi_opf,
542 bio->bi_iter.bi_sector, nr_sectors, maxsector);
543 return -EIO;
544 }
545 return 0;
546}
547
548
549
550
551static int blk_partition_remap(struct bio *bio)
552{
553 struct block_device *p = bio->bi_bdev;
554
555 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
556 return -EIO;
557 if (bio_sectors(bio)) {
558 bio->bi_iter.bi_sector += p->bd_start_sect;
559 trace_block_bio_remap(bio, p->bd_dev,
560 bio->bi_iter.bi_sector -
561 p->bd_start_sect);
562 }
563 bio_set_flag(bio, BIO_REMAPPED);
564 return 0;
565}
566
567
568
569
570static inline blk_status_t blk_check_zone_append(struct request_queue *q,
571 struct bio *bio)
572{
573 int nr_sectors = bio_sectors(bio);
574
575
576 if (!bdev_is_zoned(bio->bi_bdev))
577 return BLK_STS_NOTSUPP;
578
579
580 if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector) ||
581 !bio_zone_is_seq(bio))
582 return BLK_STS_IOERR;
583
584
585
586
587
588
589 if (nr_sectors > q->limits.chunk_sectors)
590 return BLK_STS_IOERR;
591
592
593 if (nr_sectors > q->limits.max_zone_append_sectors)
594 return BLK_STS_IOERR;
595
596 bio->bi_opf |= REQ_NOMERGE;
597
598 return BLK_STS_OK;
599}
600
601static void __submit_bio(struct bio *bio)
602{
603 if (unlikely(!blk_crypto_bio_prep(&bio)))
604 return;
605
606 if (!bio->bi_bdev->bd_has_submit_bio) {
607 blk_mq_submit_bio(bio);
608 } else if (likely(bio_queue_enter(bio) == 0)) {
609 struct gendisk *disk = bio->bi_bdev->bd_disk;
610
611 disk->fops->submit_bio(bio);
612 blk_queue_exit(disk->queue);
613 }
614}
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635static void __submit_bio_noacct(struct bio *bio)
636{
637 struct bio_list bio_list_on_stack[2];
638
639 BUG_ON(bio->bi_next);
640
641 bio_list_init(&bio_list_on_stack[0]);
642 current->bio_list = bio_list_on_stack;
643
644 do {
645 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
646 struct bio_list lower, same;
647
648
649
650
651 bio_list_on_stack[1] = bio_list_on_stack[0];
652 bio_list_init(&bio_list_on_stack[0]);
653
654 __submit_bio(bio);
655
656
657
658
659
660 bio_list_init(&lower);
661 bio_list_init(&same);
662 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
663 if (q == bdev_get_queue(bio->bi_bdev))
664 bio_list_add(&same, bio);
665 else
666 bio_list_add(&lower, bio);
667
668
669
670
671 bio_list_merge(&bio_list_on_stack[0], &lower);
672 bio_list_merge(&bio_list_on_stack[0], &same);
673 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
674 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
675
676 current->bio_list = NULL;
677}
678
679static void __submit_bio_noacct_mq(struct bio *bio)
680{
681 struct bio_list bio_list[2] = { };
682
683 current->bio_list = bio_list;
684
685 do {
686 __submit_bio(bio);
687 } while ((bio = bio_list_pop(&bio_list[0])));
688
689 current->bio_list = NULL;
690}
691
692void submit_bio_noacct_nocheck(struct bio *bio)
693{
694 blk_cgroup_bio_start(bio);
695 blkcg_bio_issue_init(bio);
696
697 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
698 trace_block_bio_queue(bio);
699
700
701
702
703 bio_set_flag(bio, BIO_TRACE_COMPLETION);
704 }
705
706
707
708
709
710
711
712 if (current->bio_list)
713 bio_list_add(¤t->bio_list[0], bio);
714 else if (!bio->bi_bdev->bd_has_submit_bio)
715 __submit_bio_noacct_mq(bio);
716 else
717 __submit_bio_noacct(bio);
718}
719
720
721
722
723
724
725
726
727
728
729void submit_bio_noacct(struct bio *bio)
730{
731 struct block_device *bdev = bio->bi_bdev;
732 struct request_queue *q = bdev_get_queue(bdev);
733 blk_status_t status = BLK_STS_IOERR;
734
735 might_sleep();
736
737
738
739
740
741 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
742 goto not_supported;
743
744 if (should_fail_bio(bio))
745 goto end_io;
746 bio_check_ro(bio);
747 if (!bio_flagged(bio, BIO_REMAPPED)) {
748 if (unlikely(bio_check_eod(bio)))
749 goto end_io;
750 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
751 goto end_io;
752 }
753
754
755
756
757
758 if (op_is_flush(bio->bi_opf)) {
759 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
760 bio_op(bio) != REQ_OP_ZONE_APPEND))
761 goto end_io;
762 if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
763 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
764 if (!bio_sectors(bio)) {
765 status = BLK_STS_OK;
766 goto end_io;
767 }
768 }
769 }
770
771 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
772 bio_clear_polled(bio);
773
774 switch (bio_op(bio)) {
775 case REQ_OP_DISCARD:
776 if (!bdev_max_discard_sectors(bdev))
777 goto not_supported;
778 break;
779 case REQ_OP_SECURE_ERASE:
780 if (!bdev_max_secure_erase_sectors(bdev))
781 goto not_supported;
782 break;
783 case REQ_OP_ZONE_APPEND:
784 status = blk_check_zone_append(q, bio);
785 if (status != BLK_STS_OK)
786 goto end_io;
787 break;
788 case REQ_OP_ZONE_RESET:
789 case REQ_OP_ZONE_OPEN:
790 case REQ_OP_ZONE_CLOSE:
791 case REQ_OP_ZONE_FINISH:
792 if (!bdev_is_zoned(bio->bi_bdev))
793 goto not_supported;
794 break;
795 case REQ_OP_ZONE_RESET_ALL:
796 if (!bdev_is_zoned(bio->bi_bdev) || !blk_queue_zone_resetall(q))
797 goto not_supported;
798 break;
799 case REQ_OP_WRITE_ZEROES:
800 if (!q->limits.max_write_zeroes_sectors)
801 goto not_supported;
802 break;
803 default:
804 break;
805 }
806
807 if (blk_throtl_bio(bio))
808 return;
809 submit_bio_noacct_nocheck(bio);
810 return;
811
812not_supported:
813 status = BLK_STS_NOTSUPP;
814end_io:
815 bio->bi_status = status;
816 bio_endio(bio);
817}
818EXPORT_SYMBOL(submit_bio_noacct);
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833void submit_bio(struct bio *bio)
834{
835 if (bio_op(bio) == REQ_OP_READ) {
836 task_io_account_read(bio->bi_iter.bi_size);
837 count_vm_events(PGPGIN, bio_sectors(bio));
838 } else if (bio_op(bio) == REQ_OP_WRITE) {
839 count_vm_events(PGPGOUT, bio_sectors(bio));
840 }
841
842 submit_bio_noacct(bio);
843}
844EXPORT_SYMBOL(submit_bio);
845
846
847
848
849
850
851
852
853
854
855
856
857
858int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
859{
860 blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
861 struct block_device *bdev;
862 struct request_queue *q;
863 int ret = 0;
864
865 bdev = READ_ONCE(bio->bi_bdev);
866 if (!bdev)
867 return 0;
868
869 q = bdev_get_queue(bdev);
870 if (cookie == BLK_QC_T_NONE ||
871 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
872 return 0;
873
874
875
876
877
878
879
880 blk_flush_plug(current->plug, false);
881
882
883
884
885
886
887
888
889
890
891 if (!percpu_ref_tryget(&q->q_usage_counter))
892 return 0;
893 if (queue_is_mq(q)) {
894 ret = blk_mq_poll(q, cookie, iob, flags);
895 } else {
896 struct gendisk *disk = q->disk;
897
898 if (disk && disk->fops->poll_bio)
899 ret = disk->fops->poll_bio(bio, iob, flags);
900 }
901 blk_queue_exit(q);
902 return ret;
903}
904EXPORT_SYMBOL_GPL(bio_poll);
905
906
907
908
909
910int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
911 unsigned int flags)
912{
913 struct bio *bio;
914 int ret = 0;
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936 rcu_read_lock();
937 bio = READ_ONCE(kiocb->private);
938 if (bio)
939 ret = bio_poll(bio, iob, flags);
940 rcu_read_unlock();
941
942 return ret;
943}
944EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
945
946void update_io_ticks(struct block_device *part, unsigned long now, bool end)
947{
948 unsigned long stamp;
949again:
950 stamp = READ_ONCE(part->bd_stamp);
951 if (unlikely(time_after(now, stamp))) {
952 if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
953 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
954 }
955 if (part->bd_partno) {
956 part = bdev_whole(part);
957 goto again;
958 }
959}
960
961unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
962 unsigned long start_time)
963{
964 part_stat_lock();
965 update_io_ticks(bdev, start_time, false);
966 part_stat_local_inc(bdev, in_flight[op_is_write(op)]);
967 part_stat_unlock();
968
969 return start_time;
970}
971EXPORT_SYMBOL(bdev_start_io_acct);
972
973
974
975
976
977
978
979unsigned long bio_start_io_acct(struct bio *bio)
980{
981 return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
982}
983EXPORT_SYMBOL_GPL(bio_start_io_acct);
984
985void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
986 unsigned int sectors, unsigned long start_time)
987{
988 const int sgrp = op_stat_group(op);
989 unsigned long now = READ_ONCE(jiffies);
990 unsigned long duration = now - start_time;
991
992 part_stat_lock();
993 update_io_ticks(bdev, now, true);
994 part_stat_inc(bdev, ios[sgrp]);
995 part_stat_add(bdev, sectors[sgrp], sectors);
996 part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration));
997 part_stat_local_dec(bdev, in_flight[op_is_write(op)]);
998 part_stat_unlock();
999}
1000EXPORT_SYMBOL(bdev_end_io_acct);
1001
1002void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1003 struct block_device *orig_bdev)
1004{
1005 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time);
1006}
1007EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028int blk_lld_busy(struct request_queue *q)
1029{
1030 if (queue_is_mq(q) && q->mq_ops->busy)
1031 return q->mq_ops->busy(q);
1032
1033 return 0;
1034}
1035EXPORT_SYMBOL_GPL(blk_lld_busy);
1036
1037int kblockd_schedule_work(struct work_struct *work)
1038{
1039 return queue_work(kblockd_workqueue, work);
1040}
1041EXPORT_SYMBOL(kblockd_schedule_work);
1042
1043int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1044 unsigned long delay)
1045{
1046 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1047}
1048EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1049
1050void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
1051{
1052 struct task_struct *tsk = current;
1053
1054
1055
1056
1057 if (tsk->plug)
1058 return;
1059
1060 plug->mq_list = NULL;
1061 plug->cached_rq = NULL;
1062 plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
1063 plug->rq_count = 0;
1064 plug->multiple_queues = false;
1065 plug->has_elevator = false;
1066 INIT_LIST_HEAD(&plug->cb_list);
1067
1068
1069
1070
1071
1072 tsk->plug = plug;
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098void blk_start_plug(struct blk_plug *plug)
1099{
1100 blk_start_plug_nr_ios(plug, 1);
1101}
1102EXPORT_SYMBOL(blk_start_plug);
1103
1104static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1105{
1106 LIST_HEAD(callbacks);
1107
1108 while (!list_empty(&plug->cb_list)) {
1109 list_splice_init(&plug->cb_list, &callbacks);
1110
1111 while (!list_empty(&callbacks)) {
1112 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1113 struct blk_plug_cb,
1114 list);
1115 list_del(&cb->list);
1116 cb->callback(cb, from_schedule);
1117 }
1118 }
1119}
1120
1121struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1122 int size)
1123{
1124 struct blk_plug *plug = current->plug;
1125 struct blk_plug_cb *cb;
1126
1127 if (!plug)
1128 return NULL;
1129
1130 list_for_each_entry(cb, &plug->cb_list, list)
1131 if (cb->callback == unplug && cb->data == data)
1132 return cb;
1133
1134
1135 BUG_ON(size < sizeof(*cb));
1136 cb = kzalloc(size, GFP_ATOMIC);
1137 if (cb) {
1138 cb->data = data;
1139 cb->callback = unplug;
1140 list_add(&cb->list, &plug->cb_list);
1141 }
1142 return cb;
1143}
1144EXPORT_SYMBOL(blk_check_plugged);
1145
1146void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
1147{
1148 if (!list_empty(&plug->cb_list))
1149 flush_plug_callbacks(plug, from_schedule);
1150 blk_mq_flush_plug_list(plug, from_schedule);
1151
1152
1153
1154
1155
1156
1157 if (unlikely(!rq_list_empty(plug->cached_rq)))
1158 blk_mq_free_plug_rqs(plug);
1159}
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171void blk_finish_plug(struct blk_plug *plug)
1172{
1173 if (plug == current->plug) {
1174 __blk_flush_plug(plug, false);
1175 current->plug = NULL;
1176 }
1177}
1178EXPORT_SYMBOL(blk_finish_plug);
1179
1180void blk_io_schedule(void)
1181{
1182
1183 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1184
1185 if (timeout)
1186 io_schedule_timeout(timeout);
1187 else
1188 io_schedule();
1189}
1190EXPORT_SYMBOL_GPL(blk_io_schedule);
1191
1192int __init blk_dev_init(void)
1193{
1194 BUILD_BUG_ON((__force u32)REQ_OP_LAST >= (1 << REQ_OP_BITS));
1195 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1196 sizeof_field(struct request, cmd_flags));
1197 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1198 sizeof_field(struct bio, bi_opf));
1199
1200
1201 kblockd_workqueue = alloc_workqueue("kblockd",
1202 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1203 if (!kblockd_workqueue)
1204 panic("Failed to create kblockd\n");
1205
1206 blk_requestq_cachep = kmem_cache_create("request_queue",
1207 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1208
1209 blk_debugfs_root = debugfs_create_dir("block", NULL);
1210
1211 return 0;
1212}
1213