1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/backing-dev.h>
18#include <linux/bio.h>
19#include <linux/blkdev.h>
20#include <linux/blk-mq.h>
21#include <linux/blk-pm.h>
22#include <linux/highmem.h>
23#include <linux/mm.h>
24#include <linux/pagemap.h>
25#include <linux/kernel_stat.h>
26#include <linux/string.h>
27#include <linux/init.h>
28#include <linux/completion.h>
29#include <linux/slab.h>
30#include <linux/swap.h>
31#include <linux/writeback.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/fault-inject.h>
34#include <linux/list_sort.h>
35#include <linux/delay.h>
36#include <linux/ratelimit.h>
37#include <linux/pm_runtime.h>
38#include <linux/blk-cgroup.h>
39#include <linux/t10-pi.h>
40#include <linux/debugfs.h>
41#include <linux/bpf.h>
42#include <linux/psi.h>
43#include <linux/sched/sysctl.h>
44#include <linux/blk-crypto.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/block.h>
48
49#include "blk.h"
50#include "blk-mq.h"
51#include "blk-mq-sched.h"
52#include "blk-pm.h"
53#include "blk-rq-qos.h"
54
55struct dentry *blk_debugfs_root;
56
57EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
58EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
59EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
60EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
61EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
62EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
63
64DEFINE_IDA(blk_queue_ida);
65
66
67
68
69struct kmem_cache *blk_requestq_cachep;
70
71
72
73
74static struct workqueue_struct *kblockd_workqueue;
75
76
77
78
79
80
81void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
82{
83 set_bit(flag, &q->queue_flags);
84}
85EXPORT_SYMBOL(blk_queue_flag_set);
86
87
88
89
90
91
92void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
93{
94 clear_bit(flag, &q->queue_flags);
95}
96EXPORT_SYMBOL(blk_queue_flag_clear);
97
98
99
100
101
102
103
104
105
106bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
107{
108 return test_and_set_bit(flag, &q->queue_flags);
109}
110EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
111
112void blk_rq_init(struct request_queue *q, struct request *rq)
113{
114 memset(rq, 0, sizeof(*rq));
115
116 INIT_LIST_HEAD(&rq->queuelist);
117 rq->q = q;
118 rq->__sector = (sector_t) -1;
119 INIT_HLIST_NODE(&rq->hash);
120 RB_CLEAR_NODE(&rq->rb_node);
121 rq->tag = BLK_MQ_NO_TAG;
122 rq->internal_tag = BLK_MQ_NO_TAG;
123 rq->start_time_ns = ktime_get_ns();
124 rq->part = NULL;
125 blk_crypto_rq_set_defaults(rq);
126}
127EXPORT_SYMBOL(blk_rq_init);
128
129#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
130static const char *const blk_op_name[] = {
131 REQ_OP_NAME(READ),
132 REQ_OP_NAME(WRITE),
133 REQ_OP_NAME(FLUSH),
134 REQ_OP_NAME(DISCARD),
135 REQ_OP_NAME(SECURE_ERASE),
136 REQ_OP_NAME(ZONE_RESET),
137 REQ_OP_NAME(ZONE_RESET_ALL),
138 REQ_OP_NAME(ZONE_OPEN),
139 REQ_OP_NAME(ZONE_CLOSE),
140 REQ_OP_NAME(ZONE_FINISH),
141 REQ_OP_NAME(ZONE_APPEND),
142 REQ_OP_NAME(WRITE_SAME),
143 REQ_OP_NAME(WRITE_ZEROES),
144 REQ_OP_NAME(SCSI_IN),
145 REQ_OP_NAME(SCSI_OUT),
146 REQ_OP_NAME(DRV_IN),
147 REQ_OP_NAME(DRV_OUT),
148};
149#undef REQ_OP_NAME
150
151
152
153
154
155
156
157
158
159inline const char *blk_op_str(unsigned int op)
160{
161 const char *op_str = "UNKNOWN";
162
163 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
164 op_str = blk_op_name[op];
165
166 return op_str;
167}
168EXPORT_SYMBOL_GPL(blk_op_str);
169
170static const struct {
171 int errno;
172 const char *name;
173} blk_errors[] = {
174 [BLK_STS_OK] = { 0, "" },
175 [BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
176 [BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
177 [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
178 [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
179 [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
180 [BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
181 [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
182 [BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
183 [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
184 [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
185 [BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
186
187
188 [BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
189
190
191 [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
192 [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
193
194
195 [BLK_STS_IOERR] = { -EIO, "I/O" },
196};
197
198blk_status_t errno_to_blk_status(int errno)
199{
200 int i;
201
202 for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
203 if (blk_errors[i].errno == errno)
204 return (__force blk_status_t)i;
205 }
206
207 return BLK_STS_IOERR;
208}
209EXPORT_SYMBOL_GPL(errno_to_blk_status);
210
211int blk_status_to_errno(blk_status_t status)
212{
213 int idx = (__force int)status;
214
215 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
216 return -EIO;
217 return blk_errors[idx].errno;
218}
219EXPORT_SYMBOL_GPL(blk_status_to_errno);
220
221static void print_req_error(struct request *req, blk_status_t status,
222 const char *caller)
223{
224 int idx = (__force int)status;
225
226 if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
227 return;
228
229 printk_ratelimited(KERN_ERR
230 "%s: %s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
231 "phys_seg %u prio class %u\n",
232 caller, blk_errors[idx].name,
233 req->rq_disk ? req->rq_disk->disk_name : "?",
234 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
235 req->cmd_flags & ~REQ_OP_MASK,
236 req->nr_phys_segments,
237 IOPRIO_PRIO_CLASS(req->ioprio));
238}
239
240static void req_bio_endio(struct request *rq, struct bio *bio,
241 unsigned int nbytes, blk_status_t error)
242{
243 if (error)
244 bio->bi_status = error;
245
246 if (unlikely(rq->rq_flags & RQF_QUIET))
247 bio_set_flag(bio, BIO_QUIET);
248
249 bio_advance(bio, nbytes);
250
251 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) {
252
253
254
255
256 if (bio->bi_iter.bi_size)
257 bio->bi_status = BLK_STS_IOERR;
258 else
259 bio->bi_iter.bi_sector = rq->__sector;
260 }
261
262
263 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
264 bio_endio(bio);
265}
266
267void blk_dump_rq_flags(struct request *rq, char *msg)
268{
269 printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
270 rq->rq_disk ? rq->rq_disk->disk_name : "?",
271 (unsigned long long) rq->cmd_flags);
272
273 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
274 (unsigned long long)blk_rq_pos(rq),
275 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
276 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
277 rq->bio, rq->biotail, blk_rq_bytes(rq));
278}
279EXPORT_SYMBOL(blk_dump_rq_flags);
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299void blk_sync_queue(struct request_queue *q)
300{
301 del_timer_sync(&q->timeout);
302 cancel_work_sync(&q->timeout_work);
303}
304EXPORT_SYMBOL(blk_sync_queue);
305
306
307
308
309
310void blk_set_pm_only(struct request_queue *q)
311{
312 atomic_inc(&q->pm_only);
313}
314EXPORT_SYMBOL_GPL(blk_set_pm_only);
315
316void blk_clear_pm_only(struct request_queue *q)
317{
318 int pm_only;
319
320 pm_only = atomic_dec_return(&q->pm_only);
321 WARN_ON_ONCE(pm_only < 0);
322 if (pm_only == 0)
323 wake_up_all(&q->mq_freeze_wq);
324}
325EXPORT_SYMBOL_GPL(blk_clear_pm_only);
326
327
328
329
330
331
332
333
334
335
336
337void blk_put_queue(struct request_queue *q)
338{
339 kobject_put(&q->kobj);
340}
341EXPORT_SYMBOL(blk_put_queue);
342
343void blk_set_queue_dying(struct request_queue *q)
344{
345 blk_queue_flag_set(QUEUE_FLAG_DYING, q);
346
347
348
349
350
351
352 blk_freeze_queue_start(q);
353
354 if (queue_is_mq(q))
355 blk_mq_wake_waiters(q);
356
357
358 wake_up_all(&q->mq_freeze_wq);
359}
360EXPORT_SYMBOL_GPL(blk_set_queue_dying);
361
362
363
364
365
366
367
368
369
370
371void blk_cleanup_queue(struct request_queue *q)
372{
373
374 might_sleep();
375
376 WARN_ON_ONCE(blk_queue_registered(q));
377
378
379 blk_set_queue_dying(q);
380
381 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
382 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
383
384
385
386
387
388
389 blk_freeze_queue(q);
390
391 rq_qos_exit(q);
392
393 blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
394
395
396 blk_flush_integrity();
397
398
399 del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
400 blk_sync_queue(q);
401
402 if (queue_is_mq(q))
403 blk_mq_exit_queue(q);
404
405
406
407
408
409
410
411
412
413 mutex_lock(&q->sysfs_lock);
414 if (q->elevator)
415 blk_mq_sched_free_requests(q);
416 mutex_unlock(&q->sysfs_lock);
417
418 percpu_ref_exit(&q->q_usage_counter);
419
420
421 blk_put_queue(q);
422}
423EXPORT_SYMBOL(blk_cleanup_queue);
424
425
426
427
428
429
430int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
431{
432 const bool pm = flags & BLK_MQ_REQ_PM;
433
434 while (true) {
435 bool success = false;
436
437 rcu_read_lock();
438 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
439
440
441
442
443
444 if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
445 !blk_queue_pm_only(q)) {
446 success = true;
447 } else {
448 percpu_ref_put(&q->q_usage_counter);
449 }
450 }
451 rcu_read_unlock();
452
453 if (success)
454 return 0;
455
456 if (flags & BLK_MQ_REQ_NOWAIT)
457 return -EBUSY;
458
459
460
461
462
463
464
465
466 smp_rmb();
467
468 wait_event(q->mq_freeze_wq,
469 (!q->mq_freeze_depth &&
470 blk_pm_resume_queue(pm, q)) ||
471 blk_queue_dying(q));
472 if (blk_queue_dying(q))
473 return -ENODEV;
474 }
475}
476
477static inline int bio_queue_enter(struct bio *bio)
478{
479 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
480 bool nowait = bio->bi_opf & REQ_NOWAIT;
481 int ret;
482
483 ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0);
484 if (unlikely(ret)) {
485 if (nowait && !blk_queue_dying(q))
486 bio_wouldblock_error(bio);
487 else
488 bio_io_error(bio);
489 }
490
491 return ret;
492}
493
494void blk_queue_exit(struct request_queue *q)
495{
496 percpu_ref_put(&q->q_usage_counter);
497}
498
499static void blk_queue_usage_counter_release(struct percpu_ref *ref)
500{
501 struct request_queue *q =
502 container_of(ref, struct request_queue, q_usage_counter);
503
504 wake_up_all(&q->mq_freeze_wq);
505}
506
507static void blk_rq_timed_out_timer(struct timer_list *t)
508{
509 struct request_queue *q = from_timer(q, t, timeout);
510
511 kblockd_schedule_work(&q->timeout_work);
512}
513
514static void blk_timeout_work(struct work_struct *work)
515{
516}
517
518struct request_queue *blk_alloc_queue(int node_id)
519{
520 struct request_queue *q;
521 int ret;
522
523 q = kmem_cache_alloc_node(blk_requestq_cachep,
524 GFP_KERNEL | __GFP_ZERO, node_id);
525 if (!q)
526 return NULL;
527
528 q->last_merge = NULL;
529
530 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
531 if (q->id < 0)
532 goto fail_q;
533
534 ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
535 if (ret)
536 goto fail_id;
537
538 q->backing_dev_info = bdi_alloc(node_id);
539 if (!q->backing_dev_info)
540 goto fail_split;
541
542 q->stats = blk_alloc_queue_stats();
543 if (!q->stats)
544 goto fail_stats;
545
546 q->node = node_id;
547
548 atomic_set(&q->nr_active_requests_shared_sbitmap, 0);
549
550 timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
551 laptop_mode_timer_fn, 0);
552 timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
553 INIT_WORK(&q->timeout_work, blk_timeout_work);
554 INIT_LIST_HEAD(&q->icq_list);
555#ifdef CONFIG_BLK_CGROUP
556 INIT_LIST_HEAD(&q->blkg_list);
557#endif
558
559 kobject_init(&q->kobj, &blk_queue_ktype);
560
561 mutex_init(&q->debugfs_mutex);
562 mutex_init(&q->sysfs_lock);
563 mutex_init(&q->sysfs_dir_lock);
564 spin_lock_init(&q->queue_lock);
565
566 init_waitqueue_head(&q->mq_freeze_wq);
567 mutex_init(&q->mq_freeze_lock);
568
569
570
571
572
573 if (percpu_ref_init(&q->q_usage_counter,
574 blk_queue_usage_counter_release,
575 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
576 goto fail_bdi;
577
578 if (blkcg_init_queue(q))
579 goto fail_ref;
580
581 blk_queue_dma_alignment(q, 511);
582 blk_set_default_limits(&q->limits);
583 q->nr_requests = BLKDEV_MAX_RQ;
584
585 return q;
586
587fail_ref:
588 percpu_ref_exit(&q->q_usage_counter);
589fail_bdi:
590 blk_free_queue_stats(q->stats);
591fail_stats:
592 bdi_put(q->backing_dev_info);
593fail_split:
594 bioset_exit(&q->bio_split);
595fail_id:
596 ida_simple_remove(&blk_queue_ida, q->id);
597fail_q:
598 kmem_cache_free(blk_requestq_cachep, q);
599 return NULL;
600}
601EXPORT_SYMBOL(blk_alloc_queue);
602
603
604
605
606
607
608
609
610
611bool blk_get_queue(struct request_queue *q)
612{
613 if (likely(!blk_queue_dying(q))) {
614 __blk_get_queue(q);
615 return true;
616 }
617
618 return false;
619}
620EXPORT_SYMBOL(blk_get_queue);
621
622
623
624
625
626
627
628struct request *blk_get_request(struct request_queue *q, unsigned int op,
629 blk_mq_req_flags_t flags)
630{
631 struct request *req;
632
633 WARN_ON_ONCE(op & REQ_NOWAIT);
634 WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
635
636 req = blk_mq_alloc_request(q, op, flags);
637 if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
638 q->mq_ops->initialize_rq_fn(req);
639
640 return req;
641}
642EXPORT_SYMBOL(blk_get_request);
643
644void blk_put_request(struct request *req)
645{
646 blk_mq_free_request(req);
647}
648EXPORT_SYMBOL(blk_put_request);
649
650static void handle_bad_sector(struct bio *bio, sector_t maxsector)
651{
652 char b[BDEVNAME_SIZE];
653
654 pr_info_ratelimited("attempt to access beyond end of device\n"
655 "%s: rw=%d, want=%llu, limit=%llu\n",
656 bio_devname(bio, b), bio->bi_opf,
657 bio_end_sector(bio), maxsector);
658}
659
660#ifdef CONFIG_FAIL_MAKE_REQUEST
661
662static DECLARE_FAULT_ATTR(fail_make_request);
663
664static int __init setup_fail_make_request(char *str)
665{
666 return setup_fault_attr(&fail_make_request, str);
667}
668__setup("fail_make_request=", setup_fail_make_request);
669
670static bool should_fail_request(struct block_device *part, unsigned int bytes)
671{
672 return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
673}
674
675static int __init fail_make_request_debugfs(void)
676{
677 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
678 NULL, &fail_make_request);
679
680 return PTR_ERR_OR_ZERO(dir);
681}
682
683late_initcall(fail_make_request_debugfs);
684
685#else
686
687static inline bool should_fail_request(struct block_device *part,
688 unsigned int bytes)
689{
690 return false;
691}
692
693#endif
694
695static inline bool bio_check_ro(struct bio *bio)
696{
697 if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
698 char b[BDEVNAME_SIZE];
699
700 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
701 return false;
702
703 WARN_ONCE(1,
704 "Trying to write to read-only block-device %s (partno %d)\n",
705 bio_devname(bio, b), bio->bi_bdev->bd_partno);
706
707 return false;
708 }
709
710 return false;
711}
712
713static noinline int should_fail_bio(struct bio *bio)
714{
715 if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size))
716 return -EIO;
717 return 0;
718}
719ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
720
721
722
723
724
725
726static inline int bio_check_eod(struct bio *bio)
727{
728 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
729 unsigned int nr_sectors = bio_sectors(bio);
730
731 if (nr_sectors && maxsector &&
732 (nr_sectors > maxsector ||
733 bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
734 handle_bad_sector(bio, maxsector);
735 return -EIO;
736 }
737 return 0;
738}
739
740
741
742
743static int blk_partition_remap(struct bio *bio)
744{
745 struct block_device *p = bio->bi_bdev;
746
747 if (unlikely(should_fail_request(p, bio->bi_iter.bi_size)))
748 return -EIO;
749 if (bio_sectors(bio)) {
750 bio->bi_iter.bi_sector += p->bd_start_sect;
751 trace_block_bio_remap(bio, p->bd_dev,
752 bio->bi_iter.bi_sector -
753 p->bd_start_sect);
754 }
755 bio_set_flag(bio, BIO_REMAPPED);
756 return 0;
757}
758
759
760
761
762static inline blk_status_t blk_check_zone_append(struct request_queue *q,
763 struct bio *bio)
764{
765 sector_t pos = bio->bi_iter.bi_sector;
766 int nr_sectors = bio_sectors(bio);
767
768
769 if (!blk_queue_is_zoned(q))
770 return BLK_STS_NOTSUPP;
771
772
773 if (pos & (blk_queue_zone_sectors(q) - 1) ||
774 !blk_queue_zone_is_seq(q, pos))
775 return BLK_STS_IOERR;
776
777
778
779
780
781
782 if (nr_sectors > q->limits.chunk_sectors)
783 return BLK_STS_IOERR;
784
785
786 if (nr_sectors > q->limits.max_zone_append_sectors)
787 return BLK_STS_IOERR;
788
789 bio->bi_opf |= REQ_NOMERGE;
790
791 return BLK_STS_OK;
792}
793
794static noinline_for_stack bool submit_bio_checks(struct bio *bio)
795{
796 struct block_device *bdev = bio->bi_bdev;
797 struct request_queue *q = bdev->bd_disk->queue;
798 blk_status_t status = BLK_STS_IOERR;
799 struct blk_plug *plug;
800
801 might_sleep();
802
803 plug = blk_mq_plug(q, bio);
804 if (plug && plug->nowait)
805 bio->bi_opf |= REQ_NOWAIT;
806
807
808
809
810
811 if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q))
812 goto not_supported;
813
814 if (should_fail_bio(bio))
815 goto end_io;
816 if (unlikely(bio_check_ro(bio)))
817 goto end_io;
818 if (!bio_flagged(bio, BIO_REMAPPED)) {
819 if (unlikely(bio_check_eod(bio)))
820 goto end_io;
821 if (bdev->bd_partno && unlikely(blk_partition_remap(bio)))
822 goto end_io;
823 }
824
825
826
827
828
829 if (op_is_flush(bio->bi_opf) &&
830 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
831 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
832 if (!bio_sectors(bio)) {
833 status = BLK_STS_OK;
834 goto end_io;
835 }
836 }
837
838 if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
839 bio->bi_opf &= ~REQ_HIPRI;
840
841 switch (bio_op(bio)) {
842 case REQ_OP_DISCARD:
843 if (!blk_queue_discard(q))
844 goto not_supported;
845 break;
846 case REQ_OP_SECURE_ERASE:
847 if (!blk_queue_secure_erase(q))
848 goto not_supported;
849 break;
850 case REQ_OP_WRITE_SAME:
851 if (!q->limits.max_write_same_sectors)
852 goto not_supported;
853 break;
854 case REQ_OP_ZONE_APPEND:
855 status = blk_check_zone_append(q, bio);
856 if (status != BLK_STS_OK)
857 goto end_io;
858 break;
859 case REQ_OP_ZONE_RESET:
860 case REQ_OP_ZONE_OPEN:
861 case REQ_OP_ZONE_CLOSE:
862 case REQ_OP_ZONE_FINISH:
863 if (!blk_queue_is_zoned(q))
864 goto not_supported;
865 break;
866 case REQ_OP_ZONE_RESET_ALL:
867 if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q))
868 goto not_supported;
869 break;
870 case REQ_OP_WRITE_ZEROES:
871 if (!q->limits.max_write_zeroes_sectors)
872 goto not_supported;
873 break;
874 default:
875 break;
876 }
877
878
879
880
881
882
883
884 if (unlikely(!current->io_context))
885 create_task_io_context(current, GFP_ATOMIC, q->node);
886
887 if (blk_throtl_bio(bio)) {
888 blkcg_bio_issue_init(bio);
889 return false;
890 }
891
892 blk_cgroup_bio_start(bio);
893 blkcg_bio_issue_init(bio);
894
895 if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
896 trace_block_bio_queue(bio);
897
898
899
900 bio_set_flag(bio, BIO_TRACE_COMPLETION);
901 }
902 return true;
903
904not_supported:
905 status = BLK_STS_NOTSUPP;
906end_io:
907 bio->bi_status = status;
908 bio_endio(bio);
909 return false;
910}
911
912static blk_qc_t __submit_bio(struct bio *bio)
913{
914 struct gendisk *disk = bio->bi_bdev->bd_disk;
915 blk_qc_t ret = BLK_QC_T_NONE;
916
917 if (blk_crypto_bio_prep(&bio)) {
918 if (!disk->fops->submit_bio)
919 return blk_mq_submit_bio(bio);
920 ret = disk->fops->submit_bio(bio);
921 }
922 blk_queue_exit(disk->queue);
923 return ret;
924}
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945static blk_qc_t __submit_bio_noacct(struct bio *bio)
946{
947 struct bio_list bio_list_on_stack[2];
948 blk_qc_t ret = BLK_QC_T_NONE;
949
950 BUG_ON(bio->bi_next);
951
952 bio_list_init(&bio_list_on_stack[0]);
953 current->bio_list = bio_list_on_stack;
954
955 do {
956 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
957 struct bio_list lower, same;
958
959 if (unlikely(bio_queue_enter(bio) != 0))
960 continue;
961
962
963
964
965 bio_list_on_stack[1] = bio_list_on_stack[0];
966 bio_list_init(&bio_list_on_stack[0]);
967
968 ret = __submit_bio(bio);
969
970
971
972
973
974 bio_list_init(&lower);
975 bio_list_init(&same);
976 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
977 if (q == bio->bi_bdev->bd_disk->queue)
978 bio_list_add(&same, bio);
979 else
980 bio_list_add(&lower, bio);
981
982
983
984
985 bio_list_merge(&bio_list_on_stack[0], &lower);
986 bio_list_merge(&bio_list_on_stack[0], &same);
987 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
988 } while ((bio = bio_list_pop(&bio_list_on_stack[0])));
989
990 current->bio_list = NULL;
991 return ret;
992}
993
994static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
995{
996 struct bio_list bio_list[2] = { };
997 blk_qc_t ret = BLK_QC_T_NONE;
998
999 current->bio_list = bio_list;
1000
1001 do {
1002 struct gendisk *disk = bio->bi_bdev->bd_disk;
1003
1004 if (unlikely(bio_queue_enter(bio) != 0))
1005 continue;
1006
1007 if (!blk_crypto_bio_prep(&bio)) {
1008 blk_queue_exit(disk->queue);
1009 ret = BLK_QC_T_NONE;
1010 continue;
1011 }
1012
1013 ret = blk_mq_submit_bio(bio);
1014 } while ((bio = bio_list_pop(&bio_list[0])));
1015
1016 current->bio_list = NULL;
1017 return ret;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029blk_qc_t submit_bio_noacct(struct bio *bio)
1030{
1031 if (!submit_bio_checks(bio))
1032 return BLK_QC_T_NONE;
1033
1034
1035
1036
1037
1038
1039
1040 if (current->bio_list) {
1041 bio_list_add(¤t->bio_list[0], bio);
1042 return BLK_QC_T_NONE;
1043 }
1044
1045 if (!bio->bi_bdev->bd_disk->fops->submit_bio)
1046 return __submit_bio_noacct_mq(bio);
1047 return __submit_bio_noacct(bio);
1048}
1049EXPORT_SYMBOL(submit_bio_noacct);
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064blk_qc_t submit_bio(struct bio *bio)
1065{
1066 if (blkcg_punt_bio_submit(bio))
1067 return BLK_QC_T_NONE;
1068
1069
1070
1071
1072
1073 if (bio_has_data(bio)) {
1074 unsigned int count;
1075
1076 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
1077 count = queue_logical_block_size(
1078 bio->bi_bdev->bd_disk->queue) >> 9;
1079 else
1080 count = bio_sectors(bio);
1081
1082 if (op_is_write(bio_op(bio))) {
1083 count_vm_events(PGPGOUT, count);
1084 } else {
1085 task_io_account_read(bio->bi_iter.bi_size);
1086 count_vm_events(PGPGIN, count);
1087 }
1088
1089 if (unlikely(block_dump)) {
1090 char b[BDEVNAME_SIZE];
1091 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1092 current->comm, task_pid_nr(current),
1093 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
1094 (unsigned long long)bio->bi_iter.bi_sector,
1095 bio_devname(bio, b), count);
1096 }
1097 }
1098
1099
1100
1101
1102
1103
1104
1105 if (unlikely(bio_op(bio) == REQ_OP_READ &&
1106 bio_flagged(bio, BIO_WORKINGSET))) {
1107 unsigned long pflags;
1108 blk_qc_t ret;
1109
1110 psi_memstall_enter(&pflags);
1111 ret = submit_bio_noacct(bio);
1112 psi_memstall_leave(&pflags);
1113
1114 return ret;
1115 }
1116
1117 return submit_bio_noacct(bio);
1118}
1119EXPORT_SYMBOL(submit_bio);
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
1139 struct request *rq)
1140{
1141 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
1142
1143 if (blk_rq_sectors(rq) > max_sectors) {
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 if (max_sectors == 0)
1155 return BLK_STS_NOTSUPP;
1156
1157 printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
1158 __func__, blk_rq_sectors(rq), max_sectors);
1159 return BLK_STS_IOERR;
1160 }
1161
1162
1163
1164
1165
1166 rq->nr_phys_segments = blk_recalc_rq_segments(rq);
1167 if (rq->nr_phys_segments > queue_max_segments(q)) {
1168 printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
1169 __func__, rq->nr_phys_segments, queue_max_segments(q));
1170 return BLK_STS_IOERR;
1171 }
1172
1173 return BLK_STS_OK;
1174}
1175
1176
1177
1178
1179
1180
1181blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1182{
1183 blk_status_t ret;
1184
1185 ret = blk_cloned_rq_check_limits(q, rq);
1186 if (ret != BLK_STS_OK)
1187 return ret;
1188
1189 if (rq->rq_disk &&
1190 should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
1191 return BLK_STS_IOERR;
1192
1193 if (blk_crypto_insert_cloned_request(rq))
1194 return BLK_STS_IOERR;
1195
1196 if (blk_queue_io_stat(q))
1197 blk_account_io_start(rq);
1198
1199
1200
1201
1202
1203
1204 return blk_mq_request_issue_directly(rq, true);
1205}
1206EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221unsigned int blk_rq_err_bytes(const struct request *rq)
1222{
1223 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1224 unsigned int bytes = 0;
1225 struct bio *bio;
1226
1227 if (!(rq->rq_flags & RQF_MIXED_MERGE))
1228 return blk_rq_bytes(rq);
1229
1230
1231
1232
1233
1234
1235
1236
1237 for (bio = rq->bio; bio; bio = bio->bi_next) {
1238 if ((bio->bi_opf & ff) != ff)
1239 break;
1240 bytes += bio->bi_iter.bi_size;
1241 }
1242
1243
1244 BUG_ON(blk_rq_bytes(rq) && !bytes);
1245 return bytes;
1246}
1247EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1248
1249static void update_io_ticks(struct block_device *part, unsigned long now,
1250 bool end)
1251{
1252 unsigned long stamp;
1253again:
1254 stamp = READ_ONCE(part->bd_stamp);
1255 if (unlikely(time_after(now, stamp))) {
1256 if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
1257 __part_stat_add(part, io_ticks, end ? now - stamp : 1);
1258 }
1259 if (part->bd_partno) {
1260 part = bdev_whole(part);
1261 goto again;
1262 }
1263}
1264
1265static void blk_account_io_completion(struct request *req, unsigned int bytes)
1266{
1267 if (req->part && blk_do_io_stat(req)) {
1268 const int sgrp = op_stat_group(req_op(req));
1269
1270 part_stat_lock();
1271 part_stat_add(req->part, sectors[sgrp], bytes >> 9);
1272 part_stat_unlock();
1273 }
1274}
1275
1276void blk_account_io_done(struct request *req, u64 now)
1277{
1278
1279
1280
1281
1282
1283 if (req->part && blk_do_io_stat(req) &&
1284 !(req->rq_flags & RQF_FLUSH_SEQ)) {
1285 const int sgrp = op_stat_group(req_op(req));
1286
1287 part_stat_lock();
1288 update_io_ticks(req->part, jiffies, true);
1289 part_stat_inc(req->part, ios[sgrp]);
1290 part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
1291 part_stat_unlock();
1292 }
1293}
1294
1295void blk_account_io_start(struct request *rq)
1296{
1297 if (!blk_do_io_stat(rq))
1298 return;
1299
1300
1301 if (rq->bio && rq->bio->bi_bdev)
1302 rq->part = rq->bio->bi_bdev;
1303 else
1304 rq->part = rq->rq_disk->part0;
1305
1306 part_stat_lock();
1307 update_io_ticks(rq->part, jiffies, false);
1308 part_stat_unlock();
1309}
1310
1311static unsigned long __part_start_io_acct(struct block_device *part,
1312 unsigned int sectors, unsigned int op)
1313{
1314 const int sgrp = op_stat_group(op);
1315 unsigned long now = READ_ONCE(jiffies);
1316
1317 part_stat_lock();
1318 update_io_ticks(part, now, false);
1319 part_stat_inc(part, ios[sgrp]);
1320 part_stat_add(part, sectors[sgrp], sectors);
1321 part_stat_local_inc(part, in_flight[op_is_write(op)]);
1322 part_stat_unlock();
1323
1324 return now;
1325}
1326
1327
1328
1329
1330
1331
1332
1333unsigned long bio_start_io_acct(struct bio *bio)
1334{
1335 return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
1336}
1337EXPORT_SYMBOL_GPL(bio_start_io_acct);
1338
1339unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
1340 unsigned int op)
1341{
1342 return __part_start_io_acct(disk->part0, sectors, op);
1343}
1344EXPORT_SYMBOL(disk_start_io_acct);
1345
1346static void __part_end_io_acct(struct block_device *part, unsigned int op,
1347 unsigned long start_time)
1348{
1349 const int sgrp = op_stat_group(op);
1350 unsigned long now = READ_ONCE(jiffies);
1351 unsigned long duration = now - start_time;
1352
1353 part_stat_lock();
1354 update_io_ticks(part, now, true);
1355 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
1356 part_stat_local_dec(part, in_flight[op_is_write(op)]);
1357 part_stat_unlock();
1358}
1359
1360void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
1361 struct block_device *orig_bdev)
1362{
1363 __part_end_io_acct(orig_bdev, bio_op(bio), start_time);
1364}
1365EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped);
1366
1367void disk_end_io_acct(struct gendisk *disk, unsigned int op,
1368 unsigned long start_time)
1369{
1370 __part_end_io_acct(disk->part0, op, start_time);
1371}
1372EXPORT_SYMBOL(disk_end_io_acct);
1373
1374
1375
1376
1377
1378void blk_steal_bios(struct bio_list *list, struct request *rq)
1379{
1380 if (rq->bio) {
1381 if (list->tail)
1382 list->tail->bi_next = rq->bio;
1383 else
1384 list->head = rq->bio;
1385 list->tail = rq->biotail;
1386
1387 rq->bio = NULL;
1388 rq->biotail = NULL;
1389 }
1390
1391 rq->__data_len = 0;
1392}
1393EXPORT_SYMBOL_GPL(blk_steal_bios);
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421bool blk_update_request(struct request *req, blk_status_t error,
1422 unsigned int nr_bytes)
1423{
1424 int total_bytes;
1425
1426 trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
1427
1428 if (!req->bio)
1429 return false;
1430
1431#ifdef CONFIG_BLK_DEV_INTEGRITY
1432 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
1433 error == BLK_STS_OK)
1434 req->q->integrity.profile->complete_fn(req, nr_bytes);
1435#endif
1436
1437 if (unlikely(error && !blk_rq_is_passthrough(req) &&
1438 !(req->rq_flags & RQF_QUIET)))
1439 print_req_error(req, error, __func__);
1440
1441 blk_account_io_completion(req, nr_bytes);
1442
1443 total_bytes = 0;
1444 while (req->bio) {
1445 struct bio *bio = req->bio;
1446 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
1447
1448 if (bio_bytes == bio->bi_iter.bi_size)
1449 req->bio = bio->bi_next;
1450
1451
1452 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1453 req_bio_endio(req, bio, bio_bytes, error);
1454
1455 total_bytes += bio_bytes;
1456 nr_bytes -= bio_bytes;
1457
1458 if (!nr_bytes)
1459 break;
1460 }
1461
1462
1463
1464
1465 if (!req->bio) {
1466
1467
1468
1469
1470
1471 req->__data_len = 0;
1472 return false;
1473 }
1474
1475 req->__data_len -= total_bytes;
1476
1477
1478 if (!blk_rq_is_passthrough(req))
1479 req->__sector += total_bytes >> 9;
1480
1481
1482 if (req->rq_flags & RQF_MIXED_MERGE) {
1483 req->cmd_flags &= ~REQ_FAILFAST_MASK;
1484 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
1485 }
1486
1487 if (!(req->rq_flags & RQF_SPECIAL_PAYLOAD)) {
1488
1489
1490
1491
1492 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
1493 blk_dump_rq_flags(req, "request botched");
1494 req->__data_len = blk_rq_cur_bytes(req);
1495 }
1496
1497
1498 req->nr_phys_segments = blk_recalc_rq_segments(req);
1499 }
1500
1501 return true;
1502}
1503EXPORT_SYMBOL_GPL(blk_update_request);
1504
1505#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1506
1507
1508
1509
1510
1511
1512
1513void rq_flush_dcache_pages(struct request *rq)
1514{
1515 struct req_iterator iter;
1516 struct bio_vec bvec;
1517
1518 rq_for_each_segment(bvec, rq, iter)
1519 flush_dcache_page(bvec.bv_page);
1520}
1521EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
1522#endif
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543int blk_lld_busy(struct request_queue *q)
1544{
1545 if (queue_is_mq(q) && q->mq_ops->busy)
1546 return q->mq_ops->busy(q);
1547
1548 return 0;
1549}
1550EXPORT_SYMBOL_GPL(blk_lld_busy);
1551
1552
1553
1554
1555
1556
1557
1558
1559void blk_rq_unprep_clone(struct request *rq)
1560{
1561 struct bio *bio;
1562
1563 while ((bio = rq->bio) != NULL) {
1564 rq->bio = bio->bi_next;
1565
1566 bio_put(bio);
1567 }
1568}
1569EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
1589 struct bio_set *bs, gfp_t gfp_mask,
1590 int (*bio_ctr)(struct bio *, struct bio *, void *),
1591 void *data)
1592{
1593 struct bio *bio, *bio_src;
1594
1595 if (!bs)
1596 bs = &fs_bio_set;
1597
1598 __rq_for_each_bio(bio_src, rq_src) {
1599 bio = bio_clone_fast(bio_src, gfp_mask, bs);
1600 if (!bio)
1601 goto free_and_out;
1602
1603 if (bio_ctr && bio_ctr(bio, bio_src, data))
1604 goto free_and_out;
1605
1606 if (rq->bio) {
1607 rq->biotail->bi_next = bio;
1608 rq->biotail = bio;
1609 } else {
1610 rq->bio = rq->biotail = bio;
1611 }
1612 bio = NULL;
1613 }
1614
1615
1616 rq->__sector = blk_rq_pos(rq_src);
1617 rq->__data_len = blk_rq_bytes(rq_src);
1618 if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
1619 rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
1620 rq->special_vec = rq_src->special_vec;
1621 }
1622 rq->nr_phys_segments = rq_src->nr_phys_segments;
1623 rq->ioprio = rq_src->ioprio;
1624
1625 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
1626 goto free_and_out;
1627
1628 return 0;
1629
1630free_and_out:
1631 if (bio)
1632 bio_put(bio);
1633 blk_rq_unprep_clone(rq);
1634
1635 return -ENOMEM;
1636}
1637EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
1638
1639int kblockd_schedule_work(struct work_struct *work)
1640{
1641 return queue_work(kblockd_workqueue, work);
1642}
1643EXPORT_SYMBOL(kblockd_schedule_work);
1644
1645int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
1646 unsigned long delay)
1647{
1648 return mod_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
1649}
1650EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675void blk_start_plug(struct blk_plug *plug)
1676{
1677 struct task_struct *tsk = current;
1678
1679
1680
1681
1682 if (tsk->plug)
1683 return;
1684
1685 INIT_LIST_HEAD(&plug->mq_list);
1686 INIT_LIST_HEAD(&plug->cb_list);
1687 plug->rq_count = 0;
1688 plug->multiple_queues = false;
1689 plug->nowait = false;
1690
1691
1692
1693
1694
1695 tsk->plug = plug;
1696}
1697EXPORT_SYMBOL(blk_start_plug);
1698
1699static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
1700{
1701 LIST_HEAD(callbacks);
1702
1703 while (!list_empty(&plug->cb_list)) {
1704 list_splice_init(&plug->cb_list, &callbacks);
1705
1706 while (!list_empty(&callbacks)) {
1707 struct blk_plug_cb *cb = list_first_entry(&callbacks,
1708 struct blk_plug_cb,
1709 list);
1710 list_del(&cb->list);
1711 cb->callback(cb, from_schedule);
1712 }
1713 }
1714}
1715
1716struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
1717 int size)
1718{
1719 struct blk_plug *plug = current->plug;
1720 struct blk_plug_cb *cb;
1721
1722 if (!plug)
1723 return NULL;
1724
1725 list_for_each_entry(cb, &plug->cb_list, list)
1726 if (cb->callback == unplug && cb->data == data)
1727 return cb;
1728
1729
1730 BUG_ON(size < sizeof(*cb));
1731 cb = kzalloc(size, GFP_ATOMIC);
1732 if (cb) {
1733 cb->data = data;
1734 cb->callback = unplug;
1735 list_add(&cb->list, &plug->cb_list);
1736 }
1737 return cb;
1738}
1739EXPORT_SYMBOL(blk_check_plugged);
1740
1741void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
1742{
1743 flush_plug_callbacks(plug, from_schedule);
1744
1745 if (!list_empty(&plug->mq_list))
1746 blk_mq_flush_plug_list(plug, from_schedule);
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759void blk_finish_plug(struct blk_plug *plug)
1760{
1761 if (plug != current->plug)
1762 return;
1763 blk_flush_plug_list(plug, false);
1764
1765 current->plug = NULL;
1766}
1767EXPORT_SYMBOL(blk_finish_plug);
1768
1769void blk_io_schedule(void)
1770{
1771
1772 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
1773
1774 if (timeout)
1775 io_schedule_timeout(timeout);
1776 else
1777 io_schedule();
1778}
1779EXPORT_SYMBOL_GPL(blk_io_schedule);
1780
1781int __init blk_dev_init(void)
1782{
1783 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
1784 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1785 sizeof_field(struct request, cmd_flags));
1786 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
1787 sizeof_field(struct bio, bi_opf));
1788
1789
1790 kblockd_workqueue = alloc_workqueue("kblockd",
1791 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1792 if (!kblockd_workqueue)
1793 panic("Failed to create kblockd\n");
1794
1795 blk_requestq_cachep = kmem_cache_create("request_queue",
1796 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
1797
1798 blk_debugfs_root = debugfs_create_dir("block", NULL);
1799
1800 return 0;
1801}
1802