1
2
3
4
5
6
7
8#include <linux/module.h>
9#include <linux/ctype.h>
10#include <linux/fs.h>
11#include <linux/genhd.h>
12#include <linux/kdev_t.h>
13#include <linux/kernel.h>
14#include <linux/blkdev.h>
15#include <linux/backing-dev.h>
16#include <linux/init.h>
17#include <linux/spinlock.h>
18#include <linux/proc_fs.h>
19#include <linux/seq_file.h>
20#include <linux/slab.h>
21#include <linux/kmod.h>
22#include <linux/mutex.h>
23#include <linux/idr.h>
24#include <linux/log2.h>
25#include <linux/pm_runtime.h>
26#include <linux/badblocks.h>
27
28#include "blk.h"
29
30static struct kobject *block_depr;
31
32
33#define NR_EXT_DEVT (1 << MINORBITS)
34static DEFINE_IDA(ext_devt_ida);
35
36static void disk_check_events(struct disk_events *ev,
37 unsigned int *clearing_ptr);
38static void disk_alloc_events(struct gendisk *disk);
39static void disk_add_events(struct gendisk *disk);
40static void disk_del_events(struct gendisk *disk);
41static void disk_release_events(struct gendisk *disk);
42
43void set_capacity(struct gendisk *disk, sector_t sectors)
44{
45 struct block_device *bdev = disk->part0;
46
47 spin_lock(&bdev->bd_size_lock);
48 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
49 spin_unlock(&bdev->bd_size_lock);
50}
51EXPORT_SYMBOL(set_capacity);
52
53
54
55
56
57bool set_capacity_and_notify(struct gendisk *disk, sector_t size)
58{
59 sector_t capacity = get_capacity(disk);
60 char *envp[] = { "RESIZE=1", NULL };
61
62 set_capacity(disk, size);
63
64
65
66
67
68
69 if (size == capacity ||
70 (disk->flags & (GENHD_FL_UP | GENHD_FL_HIDDEN)) != GENHD_FL_UP)
71 return false;
72
73 pr_info("%s: detected capacity change from %lld to %lld\n",
74 disk->disk_name, capacity, size);
75
76
77
78
79
80 if (!capacity || !size)
81 return false;
82 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
83 return true;
84}
85EXPORT_SYMBOL_GPL(set_capacity_and_notify);
86
87
88
89
90
91char *disk_name(struct gendisk *hd, int partno, char *buf)
92{
93 if (!partno)
94 snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
95 else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
96 snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
97 else
98 snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
99
100 return buf;
101}
102
103const char *bdevname(struct block_device *bdev, char *buf)
104{
105 return disk_name(bdev->bd_disk, bdev->bd_partno, buf);
106}
107EXPORT_SYMBOL(bdevname);
108
109static void part_stat_read_all(struct block_device *part,
110 struct disk_stats *stat)
111{
112 int cpu;
113
114 memset(stat, 0, sizeof(struct disk_stats));
115 for_each_possible_cpu(cpu) {
116 struct disk_stats *ptr = per_cpu_ptr(part->bd_stats, cpu);
117 int group;
118
119 for (group = 0; group < NR_STAT_GROUPS; group++) {
120 stat->nsecs[group] += ptr->nsecs[group];
121 stat->sectors[group] += ptr->sectors[group];
122 stat->ios[group] += ptr->ios[group];
123 stat->merges[group] += ptr->merges[group];
124 }
125
126 stat->io_ticks += ptr->io_ticks;
127 }
128}
129
130static unsigned int part_in_flight(struct block_device *part)
131{
132 unsigned int inflight = 0;
133 int cpu;
134
135 for_each_possible_cpu(cpu) {
136 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
137 part_stat_local_read_cpu(part, in_flight[1], cpu);
138 }
139 if ((int)inflight < 0)
140 inflight = 0;
141
142 return inflight;
143}
144
145static void part_in_flight_rw(struct block_device *part,
146 unsigned int inflight[2])
147{
148 int cpu;
149
150 inflight[0] = 0;
151 inflight[1] = 0;
152 for_each_possible_cpu(cpu) {
153 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
154 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
155 }
156 if ((int)inflight[0] < 0)
157 inflight[0] = 0;
158 if ((int)inflight[1] < 0)
159 inflight[1] = 0;
160}
161
162
163
164
165
166#define BLKDEV_MAJOR_HASH_SIZE 255
167static struct blk_major_name {
168 struct blk_major_name *next;
169 int major;
170 char name[16];
171 void (*probe)(dev_t devt);
172} *major_names[BLKDEV_MAJOR_HASH_SIZE];
173static DEFINE_MUTEX(major_names_lock);
174
175
176static inline int major_to_index(unsigned major)
177{
178 return major % BLKDEV_MAJOR_HASH_SIZE;
179}
180
181#ifdef CONFIG_PROC_FS
182void blkdev_show(struct seq_file *seqf, off_t offset)
183{
184 struct blk_major_name *dp;
185
186 mutex_lock(&major_names_lock);
187 for (dp = major_names[major_to_index(offset)]; dp; dp = dp->next)
188 if (dp->major == offset)
189 seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
190 mutex_unlock(&major_names_lock);
191}
192#endif
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217int __register_blkdev(unsigned int major, const char *name,
218 void (*probe)(dev_t devt))
219{
220 struct blk_major_name **n, *p;
221 int index, ret = 0;
222
223 mutex_lock(&major_names_lock);
224
225
226 if (major == 0) {
227 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
228 if (major_names[index] == NULL)
229 break;
230 }
231
232 if (index == 0) {
233 printk("%s: failed to get major for %s\n",
234 __func__, name);
235 ret = -EBUSY;
236 goto out;
237 }
238 major = index;
239 ret = major;
240 }
241
242 if (major >= BLKDEV_MAJOR_MAX) {
243 pr_err("%s: major requested (%u) is greater than the maximum (%u) for %s\n",
244 __func__, major, BLKDEV_MAJOR_MAX-1, name);
245
246 ret = -EINVAL;
247 goto out;
248 }
249
250 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
251 if (p == NULL) {
252 ret = -ENOMEM;
253 goto out;
254 }
255
256 p->major = major;
257 p->probe = probe;
258 strlcpy(p->name, name, sizeof(p->name));
259 p->next = NULL;
260 index = major_to_index(major);
261
262 for (n = &major_names[index]; *n; n = &(*n)->next) {
263 if ((*n)->major == major)
264 break;
265 }
266 if (!*n)
267 *n = p;
268 else
269 ret = -EBUSY;
270
271 if (ret < 0) {
272 printk("register_blkdev: cannot get major %u for %s\n",
273 major, name);
274 kfree(p);
275 }
276out:
277 mutex_unlock(&major_names_lock);
278 return ret;
279}
280EXPORT_SYMBOL(__register_blkdev);
281
282void unregister_blkdev(unsigned int major, const char *name)
283{
284 struct blk_major_name **n;
285 struct blk_major_name *p = NULL;
286 int index = major_to_index(major);
287
288 mutex_lock(&major_names_lock);
289 for (n = &major_names[index]; *n; n = &(*n)->next)
290 if ((*n)->major == major)
291 break;
292 if (!*n || strcmp((*n)->name, name)) {
293 WARN_ON(1);
294 } else {
295 p = *n;
296 *n = p->next;
297 }
298 mutex_unlock(&major_names_lock);
299 kfree(p);
300}
301
302EXPORT_SYMBOL(unregister_blkdev);
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317static int blk_mangle_minor(int minor)
318{
319#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
320 int i;
321
322 for (i = 0; i < MINORBITS / 2; i++) {
323 int low = minor & (1 << i);
324 int high = minor & (1 << (MINORBITS - 1 - i));
325 int distance = MINORBITS - 1 - 2 * i;
326
327 minor ^= low | high;
328 low <<= distance;
329 high >>= distance;
330 minor |= low | high;
331 }
332#endif
333 return minor;
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350int blk_alloc_devt(struct block_device *bdev, dev_t *devt)
351{
352 struct gendisk *disk = bdev->bd_disk;
353 int idx;
354
355
356 if (bdev->bd_partno < disk->minors) {
357 *devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno);
358 return 0;
359 }
360
361 idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL);
362 if (idx < 0)
363 return idx == -ENOSPC ? -EBUSY : idx;
364
365 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
366 return 0;
367}
368
369
370
371
372
373
374
375
376
377
378void blk_free_devt(dev_t devt)
379{
380 if (MAJOR(devt) == BLOCK_EXT_MAJOR)
381 ida_free(&ext_devt_ida, blk_mangle_minor(MINOR(devt)));
382}
383
384static char *bdevt_str(dev_t devt, char *buf)
385{
386 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
387 char tbuf[BDEVT_SIZE];
388 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
389 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
390 } else
391 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
392
393 return buf;
394}
395
396void disk_uevent(struct gendisk *disk, enum kobject_action action)
397{
398 struct block_device *part;
399 unsigned long idx;
400
401 rcu_read_lock();
402 xa_for_each(&disk->part_tbl, idx, part) {
403 if (bdev_is_partition(part) && !bdev_nr_sectors(part))
404 continue;
405 if (!kobject_get_unless_zero(&part->bd_device.kobj))
406 continue;
407
408 rcu_read_unlock();
409 kobject_uevent(bdev_kobj(part), action);
410 put_device(&part->bd_device);
411 rcu_read_lock();
412 }
413 rcu_read_unlock();
414}
415EXPORT_SYMBOL_GPL(disk_uevent);
416
417static void disk_scan_partitions(struct gendisk *disk)
418{
419 struct block_device *bdev;
420
421 if (!get_capacity(disk) || !disk_part_scan_enabled(disk))
422 return;
423
424 set_bit(GD_NEED_PART_SCAN, &disk->state);
425 bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL);
426 if (!IS_ERR(bdev))
427 blkdev_put(bdev, FMODE_READ);
428}
429
430static void register_disk(struct device *parent, struct gendisk *disk,
431 const struct attribute_group **groups)
432{
433 struct device *ddev = disk_to_dev(disk);
434 int err;
435
436 ddev->parent = parent;
437
438 dev_set_name(ddev, "%s", disk->disk_name);
439
440
441 dev_set_uevent_suppress(ddev, 1);
442
443 if (groups) {
444 WARN_ON(ddev->groups);
445 ddev->groups = groups;
446 }
447 if (device_add(ddev))
448 return;
449 if (!sysfs_deprecated) {
450 err = sysfs_create_link(block_depr, &ddev->kobj,
451 kobject_name(&ddev->kobj));
452 if (err) {
453 device_del(ddev);
454 return;
455 }
456 }
457
458
459
460
461
462
463 pm_runtime_set_memalloc_noio(ddev, true);
464
465 disk->part0->bd_holder_dir =
466 kobject_create_and_add("holders", &ddev->kobj);
467 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
468
469 if (disk->flags & GENHD_FL_HIDDEN)
470 return;
471
472 disk_scan_partitions(disk);
473
474
475 dev_set_uevent_suppress(ddev, 0);
476 disk_uevent(disk, KOBJ_ADD);
477
478 if (disk->queue->backing_dev_info->dev) {
479 err = sysfs_create_link(&ddev->kobj,
480 &disk->queue->backing_dev_info->dev->kobj,
481 "bdi");
482 WARN_ON(err);
483 }
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498static void __device_add_disk(struct device *parent, struct gendisk *disk,
499 const struct attribute_group **groups,
500 bool register_queue)
501{
502 dev_t devt;
503 int retval;
504
505
506
507
508
509
510
511 if (register_queue)
512 elevator_init_mq(disk->queue);
513
514
515
516
517
518 WARN_ON(disk->minors && !(disk->major || disk->first_minor));
519 WARN_ON(!disk->minors &&
520 !(disk->flags & (GENHD_FL_EXT_DEVT | GENHD_FL_HIDDEN)));
521
522 disk->flags |= GENHD_FL_UP;
523
524 retval = blk_alloc_devt(disk->part0, &devt);
525 if (retval) {
526 WARN_ON(1);
527 return;
528 }
529 disk->major = MAJOR(devt);
530 disk->first_minor = MINOR(devt);
531
532 disk_alloc_events(disk);
533
534 if (disk->flags & GENHD_FL_HIDDEN) {
535
536
537
538
539 disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
540 disk->flags |= GENHD_FL_NO_PART_SCAN;
541 } else {
542 struct backing_dev_info *bdi = disk->queue->backing_dev_info;
543 struct device *dev = disk_to_dev(disk);
544 int ret;
545
546
547 dev->devt = devt;
548 ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
549 WARN_ON(ret);
550 bdi_set_owner(bdi, dev);
551 bdev_add(disk->part0, devt);
552 }
553 register_disk(parent, disk, groups);
554 if (register_queue)
555 blk_register_queue(disk);
556
557
558
559
560
561 WARN_ON_ONCE(!blk_get_queue(disk->queue));
562
563 disk_add_events(disk);
564 blk_integrity_add(disk);
565}
566
567void device_add_disk(struct device *parent, struct gendisk *disk,
568 const struct attribute_group **groups)
569
570{
571 __device_add_disk(parent, disk, groups, true);
572}
573EXPORT_SYMBOL(device_add_disk);
574
575void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
576{
577 __device_add_disk(parent, disk, NULL, false);
578}
579EXPORT_SYMBOL(device_add_disk_no_queue_reg);
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600void del_gendisk(struct gendisk *disk)
601{
602 might_sleep();
603
604 if (WARN_ON_ONCE(!disk->queue))
605 return;
606
607 blk_integrity_del(disk);
608 disk_del_events(disk);
609
610 mutex_lock(&disk->part0->bd_mutex);
611 disk->flags &= ~GENHD_FL_UP;
612 blk_drop_partitions(disk);
613 mutex_unlock(&disk->part0->bd_mutex);
614
615 fsync_bdev(disk->part0);
616 __invalidate_device(disk->part0, true);
617
618
619
620
621
622 remove_inode_hash(disk->part0->bd_inode);
623
624 set_capacity(disk, 0);
625
626 if (!(disk->flags & GENHD_FL_HIDDEN)) {
627 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
628
629
630
631
632
633 bdi_unregister(disk->queue->backing_dev_info);
634 }
635
636 blk_unregister_queue(disk);
637
638 kobject_put(disk->part0->bd_holder_dir);
639 kobject_put(disk->slave_dir);
640
641 part_stat_set_all(disk->part0, 0);
642 disk->part0->bd_stamp = 0;
643 if (!sysfs_deprecated)
644 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
645 pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
646 device_del(disk_to_dev(disk));
647}
648EXPORT_SYMBOL(del_gendisk);
649
650
651static ssize_t disk_badblocks_show(struct device *dev,
652 struct device_attribute *attr,
653 char *page)
654{
655 struct gendisk *disk = dev_to_disk(dev);
656
657 if (!disk->bb)
658 return sprintf(page, "\n");
659
660 return badblocks_show(disk->bb, page, 0);
661}
662
663static ssize_t disk_badblocks_store(struct device *dev,
664 struct device_attribute *attr,
665 const char *page, size_t len)
666{
667 struct gendisk *disk = dev_to_disk(dev);
668
669 if (!disk->bb)
670 return -ENXIO;
671
672 return badblocks_store(disk->bb, page, len, 0);
673}
674
675void blk_request_module(dev_t devt)
676{
677 unsigned int major = MAJOR(devt);
678 struct blk_major_name **n;
679
680 mutex_lock(&major_names_lock);
681 for (n = &major_names[major_to_index(major)]; *n; n = &(*n)->next) {
682 if ((*n)->major == major && (*n)->probe) {
683 (*n)->probe(devt);
684 mutex_unlock(&major_names_lock);
685 return;
686 }
687 }
688 mutex_unlock(&major_names_lock);
689
690 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
691
692 request_module("block-major-%d", MAJOR(devt));
693}
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708struct block_device *bdget_disk(struct gendisk *disk, int partno)
709{
710 struct block_device *bdev = NULL;
711
712 rcu_read_lock();
713 bdev = xa_load(&disk->part_tbl, partno);
714 if (bdev && !bdgrab(bdev))
715 bdev = NULL;
716 rcu_read_unlock();
717
718 return bdev;
719}
720
721
722
723
724
725
726void __init printk_all_partitions(void)
727{
728 struct class_dev_iter iter;
729 struct device *dev;
730
731 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
732 while ((dev = class_dev_iter_next(&iter))) {
733 struct gendisk *disk = dev_to_disk(dev);
734 struct block_device *part;
735 char name_buf[BDEVNAME_SIZE];
736 char devt_buf[BDEVT_SIZE];
737 unsigned long idx;
738
739
740
741
742
743 if (get_capacity(disk) == 0 ||
744 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
745 continue;
746
747
748
749
750
751 rcu_read_lock();
752 xa_for_each(&disk->part_tbl, idx, part) {
753 if (!bdev_nr_sectors(part))
754 continue;
755 printk("%s%s %10llu %s %s",
756 bdev_is_partition(part) ? " " : "",
757 bdevt_str(part->bd_dev, devt_buf),
758 bdev_nr_sectors(part) >> 1,
759 disk_name(disk, part->bd_partno, name_buf),
760 part->bd_meta_info ?
761 part->bd_meta_info->uuid : "");
762 if (bdev_is_partition(part))
763 printk("\n");
764 else if (dev->parent && dev->parent->driver)
765 printk(" driver: %s\n",
766 dev->parent->driver->name);
767 else
768 printk(" (driver?)\n");
769 }
770 rcu_read_unlock();
771 }
772 class_dev_iter_exit(&iter);
773}
774
775#ifdef CONFIG_PROC_FS
776
777static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
778{
779 loff_t skip = *pos;
780 struct class_dev_iter *iter;
781 struct device *dev;
782
783 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
784 if (!iter)
785 return ERR_PTR(-ENOMEM);
786
787 seqf->private = iter;
788 class_dev_iter_init(iter, &block_class, NULL, &disk_type);
789 do {
790 dev = class_dev_iter_next(iter);
791 if (!dev)
792 return NULL;
793 } while (skip--);
794
795 return dev_to_disk(dev);
796}
797
798static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
799{
800 struct device *dev;
801
802 (*pos)++;
803 dev = class_dev_iter_next(seqf->private);
804 if (dev)
805 return dev_to_disk(dev);
806
807 return NULL;
808}
809
810static void disk_seqf_stop(struct seq_file *seqf, void *v)
811{
812 struct class_dev_iter *iter = seqf->private;
813
814
815 if (iter) {
816 class_dev_iter_exit(iter);
817 kfree(iter);
818 seqf->private = NULL;
819 }
820}
821
822static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
823{
824 void *p;
825
826 p = disk_seqf_start(seqf, pos);
827 if (!IS_ERR_OR_NULL(p) && !*pos)
828 seq_puts(seqf, "major minor #blocks name\n\n");
829 return p;
830}
831
832static int show_partition(struct seq_file *seqf, void *v)
833{
834 struct gendisk *sgp = v;
835 struct block_device *part;
836 unsigned long idx;
837 char buf[BDEVNAME_SIZE];
838
839
840 if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
841 (sgp->flags & GENHD_FL_REMOVABLE)))
842 return 0;
843 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
844 return 0;
845
846 rcu_read_lock();
847 xa_for_each(&sgp->part_tbl, idx, part) {
848 if (!bdev_nr_sectors(part))
849 continue;
850 seq_printf(seqf, "%4d %7d %10llu %s\n",
851 MAJOR(part->bd_dev), MINOR(part->bd_dev),
852 bdev_nr_sectors(part) >> 1,
853 disk_name(sgp, part->bd_partno, buf));
854 }
855 rcu_read_unlock();
856 return 0;
857}
858
859static const struct seq_operations partitions_op = {
860 .start = show_partition_start,
861 .next = disk_seqf_next,
862 .stop = disk_seqf_stop,
863 .show = show_partition
864};
865#endif
866
867static int __init genhd_device_init(void)
868{
869 int error;
870
871 block_class.dev_kobj = sysfs_dev_block_kobj;
872 error = class_register(&block_class);
873 if (unlikely(error))
874 return error;
875 blk_dev_init();
876
877 register_blkdev(BLOCK_EXT_MAJOR, "blkext");
878
879
880 if (!sysfs_deprecated)
881 block_depr = kobject_create_and_add("block", NULL);
882 return 0;
883}
884
885subsys_initcall(genhd_device_init);
886
887static ssize_t disk_range_show(struct device *dev,
888 struct device_attribute *attr, char *buf)
889{
890 struct gendisk *disk = dev_to_disk(dev);
891
892 return sprintf(buf, "%d\n", disk->minors);
893}
894
895static ssize_t disk_ext_range_show(struct device *dev,
896 struct device_attribute *attr, char *buf)
897{
898 struct gendisk *disk = dev_to_disk(dev);
899
900 return sprintf(buf, "%d\n", disk_max_parts(disk));
901}
902
903static ssize_t disk_removable_show(struct device *dev,
904 struct device_attribute *attr, char *buf)
905{
906 struct gendisk *disk = dev_to_disk(dev);
907
908 return sprintf(buf, "%d\n",
909 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
910}
911
912static ssize_t disk_hidden_show(struct device *dev,
913 struct device_attribute *attr, char *buf)
914{
915 struct gendisk *disk = dev_to_disk(dev);
916
917 return sprintf(buf, "%d\n",
918 (disk->flags & GENHD_FL_HIDDEN ? 1 : 0));
919}
920
921static ssize_t disk_ro_show(struct device *dev,
922 struct device_attribute *attr, char *buf)
923{
924 struct gendisk *disk = dev_to_disk(dev);
925
926 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
927}
928
929ssize_t part_size_show(struct device *dev,
930 struct device_attribute *attr, char *buf)
931{
932 return sprintf(buf, "%llu\n", bdev_nr_sectors(dev_to_bdev(dev)));
933}
934
935ssize_t part_stat_show(struct device *dev,
936 struct device_attribute *attr, char *buf)
937{
938 struct block_device *bdev = dev_to_bdev(dev);
939 struct request_queue *q = bdev->bd_disk->queue;
940 struct disk_stats stat;
941 unsigned int inflight;
942
943 part_stat_read_all(bdev, &stat);
944 if (queue_is_mq(q))
945 inflight = blk_mq_in_flight(q, bdev);
946 else
947 inflight = part_in_flight(bdev);
948
949 return sprintf(buf,
950 "%8lu %8lu %8llu %8u "
951 "%8lu %8lu %8llu %8u "
952 "%8u %8u %8u "
953 "%8lu %8lu %8llu %8u "
954 "%8lu %8u"
955 "\n",
956 stat.ios[STAT_READ],
957 stat.merges[STAT_READ],
958 (unsigned long long)stat.sectors[STAT_READ],
959 (unsigned int)div_u64(stat.nsecs[STAT_READ], NSEC_PER_MSEC),
960 stat.ios[STAT_WRITE],
961 stat.merges[STAT_WRITE],
962 (unsigned long long)stat.sectors[STAT_WRITE],
963 (unsigned int)div_u64(stat.nsecs[STAT_WRITE], NSEC_PER_MSEC),
964 inflight,
965 jiffies_to_msecs(stat.io_ticks),
966 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
967 stat.nsecs[STAT_WRITE] +
968 stat.nsecs[STAT_DISCARD] +
969 stat.nsecs[STAT_FLUSH],
970 NSEC_PER_MSEC),
971 stat.ios[STAT_DISCARD],
972 stat.merges[STAT_DISCARD],
973 (unsigned long long)stat.sectors[STAT_DISCARD],
974 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
975 stat.ios[STAT_FLUSH],
976 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
977}
978
979ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
980 char *buf)
981{
982 struct block_device *bdev = dev_to_bdev(dev);
983 struct request_queue *q = bdev->bd_disk->queue;
984 unsigned int inflight[2];
985
986 if (queue_is_mq(q))
987 blk_mq_in_flight_rw(q, bdev, inflight);
988 else
989 part_in_flight_rw(bdev, inflight);
990
991 return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
992}
993
994static ssize_t disk_capability_show(struct device *dev,
995 struct device_attribute *attr, char *buf)
996{
997 struct gendisk *disk = dev_to_disk(dev);
998
999 return sprintf(buf, "%x\n", disk->flags);
1000}
1001
1002static ssize_t disk_alignment_offset_show(struct device *dev,
1003 struct device_attribute *attr,
1004 char *buf)
1005{
1006 struct gendisk *disk = dev_to_disk(dev);
1007
1008 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1009}
1010
1011static ssize_t disk_discard_alignment_show(struct device *dev,
1012 struct device_attribute *attr,
1013 char *buf)
1014{
1015 struct gendisk *disk = dev_to_disk(dev);
1016
1017 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1018}
1019
1020static DEVICE_ATTR(range, 0444, disk_range_show, NULL);
1021static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL);
1022static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL);
1023static DEVICE_ATTR(hidden, 0444, disk_hidden_show, NULL);
1024static DEVICE_ATTR(ro, 0444, disk_ro_show, NULL);
1025static DEVICE_ATTR(size, 0444, part_size_show, NULL);
1026static DEVICE_ATTR(alignment_offset, 0444, disk_alignment_offset_show, NULL);
1027static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL);
1028static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL);
1029static DEVICE_ATTR(stat, 0444, part_stat_show, NULL);
1030static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL);
1031static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store);
1032
1033#ifdef CONFIG_FAIL_MAKE_REQUEST
1034ssize_t part_fail_show(struct device *dev,
1035 struct device_attribute *attr, char *buf)
1036{
1037 return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail);
1038}
1039
1040ssize_t part_fail_store(struct device *dev,
1041 struct device_attribute *attr,
1042 const char *buf, size_t count)
1043{
1044 int i;
1045
1046 if (count > 0 && sscanf(buf, "%d", &i) > 0)
1047 dev_to_bdev(dev)->bd_make_it_fail = i;
1048
1049 return count;
1050}
1051
1052static struct device_attribute dev_attr_fail =
1053 __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store);
1054#endif
1055
1056#ifdef CONFIG_FAIL_IO_TIMEOUT
1057static struct device_attribute dev_attr_fail_timeout =
1058 __ATTR(io-timeout-fail, 0644, part_timeout_show, part_timeout_store);
1059#endif
1060
1061static struct attribute *disk_attrs[] = {
1062 &dev_attr_range.attr,
1063 &dev_attr_ext_range.attr,
1064 &dev_attr_removable.attr,
1065 &dev_attr_hidden.attr,
1066 &dev_attr_ro.attr,
1067 &dev_attr_size.attr,
1068 &dev_attr_alignment_offset.attr,
1069 &dev_attr_discard_alignment.attr,
1070 &dev_attr_capability.attr,
1071 &dev_attr_stat.attr,
1072 &dev_attr_inflight.attr,
1073 &dev_attr_badblocks.attr,
1074#ifdef CONFIG_FAIL_MAKE_REQUEST
1075 &dev_attr_fail.attr,
1076#endif
1077#ifdef CONFIG_FAIL_IO_TIMEOUT
1078 &dev_attr_fail_timeout.attr,
1079#endif
1080 NULL
1081};
1082
1083static umode_t disk_visible(struct kobject *kobj, struct attribute *a, int n)
1084{
1085 struct device *dev = container_of(kobj, typeof(*dev), kobj);
1086 struct gendisk *disk = dev_to_disk(dev);
1087
1088 if (a == &dev_attr_badblocks.attr && !disk->bb)
1089 return 0;
1090 return a->mode;
1091}
1092
1093static struct attribute_group disk_attr_group = {
1094 .attrs = disk_attrs,
1095 .is_visible = disk_visible,
1096};
1097
1098static const struct attribute_group *disk_attr_groups[] = {
1099 &disk_attr_group,
1100 NULL
1101};
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117static void disk_release(struct device *dev)
1118{
1119 struct gendisk *disk = dev_to_disk(dev);
1120
1121 might_sleep();
1122
1123 blk_free_devt(dev->devt);
1124 disk_release_events(disk);
1125 kfree(disk->random);
1126 xa_destroy(&disk->part_tbl);
1127 if (disk->queue)
1128 blk_put_queue(disk->queue);
1129 bdput(disk->part0);
1130}
1131struct class block_class = {
1132 .name = "block",
1133};
1134
1135static char *block_devnode(struct device *dev, umode_t *mode,
1136 kuid_t *uid, kgid_t *gid)
1137{
1138 struct gendisk *disk = dev_to_disk(dev);
1139
1140 if (disk->fops->devnode)
1141 return disk->fops->devnode(disk, mode);
1142 return NULL;
1143}
1144
1145const struct device_type disk_type = {
1146 .name = "disk",
1147 .groups = disk_attr_groups,
1148 .release = disk_release,
1149 .devnode = block_devnode,
1150};
1151
1152#ifdef CONFIG_PROC_FS
1153
1154
1155
1156
1157
1158
1159
1160static int diskstats_show(struct seq_file *seqf, void *v)
1161{
1162 struct gendisk *gp = v;
1163 struct block_device *hd;
1164 char buf[BDEVNAME_SIZE];
1165 unsigned int inflight;
1166 struct disk_stats stat;
1167 unsigned long idx;
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 rcu_read_lock();
1178 xa_for_each(&gp->part_tbl, idx, hd) {
1179 if (bdev_is_partition(hd) && !bdev_nr_sectors(hd))
1180 continue;
1181 part_stat_read_all(hd, &stat);
1182 if (queue_is_mq(gp->queue))
1183 inflight = blk_mq_in_flight(gp->queue, hd);
1184 else
1185 inflight = part_in_flight(hd);
1186
1187 seq_printf(seqf, "%4d %7d %s "
1188 "%lu %lu %lu %u "
1189 "%lu %lu %lu %u "
1190 "%u %u %u "
1191 "%lu %lu %lu %u "
1192 "%lu %u"
1193 "\n",
1194 MAJOR(hd->bd_dev), MINOR(hd->bd_dev),
1195 disk_name(gp, hd->bd_partno, buf),
1196 stat.ios[STAT_READ],
1197 stat.merges[STAT_READ],
1198 stat.sectors[STAT_READ],
1199 (unsigned int)div_u64(stat.nsecs[STAT_READ],
1200 NSEC_PER_MSEC),
1201 stat.ios[STAT_WRITE],
1202 stat.merges[STAT_WRITE],
1203 stat.sectors[STAT_WRITE],
1204 (unsigned int)div_u64(stat.nsecs[STAT_WRITE],
1205 NSEC_PER_MSEC),
1206 inflight,
1207 jiffies_to_msecs(stat.io_ticks),
1208 (unsigned int)div_u64(stat.nsecs[STAT_READ] +
1209 stat.nsecs[STAT_WRITE] +
1210 stat.nsecs[STAT_DISCARD] +
1211 stat.nsecs[STAT_FLUSH],
1212 NSEC_PER_MSEC),
1213 stat.ios[STAT_DISCARD],
1214 stat.merges[STAT_DISCARD],
1215 stat.sectors[STAT_DISCARD],
1216 (unsigned int)div_u64(stat.nsecs[STAT_DISCARD],
1217 NSEC_PER_MSEC),
1218 stat.ios[STAT_FLUSH],
1219 (unsigned int)div_u64(stat.nsecs[STAT_FLUSH],
1220 NSEC_PER_MSEC)
1221 );
1222 }
1223 rcu_read_unlock();
1224
1225 return 0;
1226}
1227
1228static const struct seq_operations diskstats_op = {
1229 .start = disk_seqf_start,
1230 .next = disk_seqf_next,
1231 .stop = disk_seqf_stop,
1232 .show = diskstats_show
1233};
1234
1235static int __init proc_genhd_init(void)
1236{
1237 proc_create_seq("diskstats", 0, NULL, &diskstats_op);
1238 proc_create_seq("partitions", 0, NULL, &partitions_op);
1239 return 0;
1240}
1241module_init(proc_genhd_init);
1242#endif
1243
1244dev_t blk_lookup_devt(const char *name, int partno)
1245{
1246 dev_t devt = MKDEV(0, 0);
1247 struct class_dev_iter iter;
1248 struct device *dev;
1249
1250 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1251 while ((dev = class_dev_iter_next(&iter))) {
1252 struct gendisk *disk = dev_to_disk(dev);
1253 struct block_device *part;
1254
1255 if (strcmp(dev_name(dev), name))
1256 continue;
1257
1258 if (partno < disk->minors) {
1259
1260
1261
1262 devt = MKDEV(MAJOR(dev->devt),
1263 MINOR(dev->devt) + partno);
1264 break;
1265 }
1266 part = bdget_disk(disk, partno);
1267 if (part) {
1268 devt = part->bd_dev;
1269 bdput(part);
1270 break;
1271 }
1272 }
1273 class_dev_iter_exit(&iter);
1274 return devt;
1275}
1276
1277struct gendisk *__alloc_disk_node(int minors, int node_id)
1278{
1279 struct gendisk *disk;
1280
1281 if (minors > DISK_MAX_PARTS) {
1282 printk(KERN_ERR
1283 "block: can't allocate more than %d partitions\n",
1284 DISK_MAX_PARTS);
1285 minors = DISK_MAX_PARTS;
1286 }
1287
1288 disk = kzalloc_node(sizeof(struct gendisk), GFP_KERNEL, node_id);
1289 if (!disk)
1290 return NULL;
1291
1292 disk->part0 = bdev_alloc(disk, 0);
1293 if (!disk->part0)
1294 goto out_free_disk;
1295
1296 disk->node_id = node_id;
1297 xa_init(&disk->part_tbl);
1298 if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
1299 goto out_destroy_part_tbl;
1300
1301 disk->minors = minors;
1302 rand_initialize_disk(disk);
1303 disk_to_dev(disk)->class = &block_class;
1304 disk_to_dev(disk)->type = &disk_type;
1305 device_initialize(disk_to_dev(disk));
1306 return disk;
1307
1308out_destroy_part_tbl:
1309 xa_destroy(&disk->part_tbl);
1310 bdput(disk->part0);
1311out_free_disk:
1312 kfree(disk);
1313 return NULL;
1314}
1315EXPORT_SYMBOL(__alloc_disk_node);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327void put_disk(struct gendisk *disk)
1328{
1329 if (disk)
1330 put_device(disk_to_dev(disk));
1331}
1332EXPORT_SYMBOL(put_disk);
1333
1334static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1335{
1336 char event[] = "DISK_RO=1";
1337 char *envp[] = { event, NULL };
1338
1339 if (!ro)
1340 event[8] = '0';
1341 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353void set_disk_ro(struct gendisk *disk, bool read_only)
1354{
1355 if (read_only) {
1356 if (test_and_set_bit(GD_READ_ONLY, &disk->state))
1357 return;
1358 } else {
1359 if (!test_and_clear_bit(GD_READ_ONLY, &disk->state))
1360 return;
1361 }
1362 set_disk_ro_uevent(disk, read_only);
1363}
1364EXPORT_SYMBOL(set_disk_ro);
1365
1366int bdev_read_only(struct block_device *bdev)
1367{
1368 return bdev->bd_read_only || get_disk_ro(bdev->bd_disk);
1369}
1370EXPORT_SYMBOL(bdev_read_only);
1371
1372
1373
1374
1375struct disk_events {
1376 struct list_head node;
1377 struct gendisk *disk;
1378 spinlock_t lock;
1379
1380 struct mutex block_mutex;
1381 int block;
1382 unsigned int pending;
1383 unsigned int clearing;
1384
1385 long poll_msecs;
1386 struct delayed_work dwork;
1387};
1388
1389static const char *disk_events_strs[] = {
1390 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
1391 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
1392};
1393
1394static char *disk_uevents[] = {
1395 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
1396 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
1397};
1398
1399
1400static DEFINE_MUTEX(disk_events_mutex);
1401static LIST_HEAD(disk_events);
1402
1403
1404static unsigned long disk_events_dfl_poll_msecs;
1405
1406static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1407{
1408 struct disk_events *ev = disk->ev;
1409 long intv_msecs = 0;
1410
1411
1412
1413
1414
1415 if (ev->poll_msecs >= 0)
1416 intv_msecs = ev->poll_msecs;
1417 else if (disk->event_flags & DISK_EVENT_FLAG_POLL)
1418 intv_msecs = disk_events_dfl_poll_msecs;
1419
1420 return msecs_to_jiffies(intv_msecs);
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438void disk_block_events(struct gendisk *disk)
1439{
1440 struct disk_events *ev = disk->ev;
1441 unsigned long flags;
1442 bool cancel;
1443
1444 if (!ev)
1445 return;
1446
1447
1448
1449
1450
1451 mutex_lock(&ev->block_mutex);
1452
1453 spin_lock_irqsave(&ev->lock, flags);
1454 cancel = !ev->block++;
1455 spin_unlock_irqrestore(&ev->lock, flags);
1456
1457 if (cancel)
1458 cancel_delayed_work_sync(&disk->ev->dwork);
1459
1460 mutex_unlock(&ev->block_mutex);
1461}
1462
1463static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1464{
1465 struct disk_events *ev = disk->ev;
1466 unsigned long intv;
1467 unsigned long flags;
1468
1469 spin_lock_irqsave(&ev->lock, flags);
1470
1471 if (WARN_ON_ONCE(ev->block <= 0))
1472 goto out_unlock;
1473
1474 if (--ev->block)
1475 goto out_unlock;
1476
1477 intv = disk_events_poll_jiffies(disk);
1478 if (check_now)
1479 queue_delayed_work(system_freezable_power_efficient_wq,
1480 &ev->dwork, 0);
1481 else if (intv)
1482 queue_delayed_work(system_freezable_power_efficient_wq,
1483 &ev->dwork, intv);
1484out_unlock:
1485 spin_unlock_irqrestore(&ev->lock, flags);
1486}
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498void disk_unblock_events(struct gendisk *disk)
1499{
1500 if (disk->ev)
1501 __disk_unblock_events(disk, false);
1502}
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516void disk_flush_events(struct gendisk *disk, unsigned int mask)
1517{
1518 struct disk_events *ev = disk->ev;
1519
1520 if (!ev)
1521 return;
1522
1523 spin_lock_irq(&ev->lock);
1524 ev->clearing |= mask;
1525 if (!ev->block)
1526 mod_delayed_work(system_freezable_power_efficient_wq,
1527 &ev->dwork, 0);
1528 spin_unlock_irq(&ev->lock);
1529}
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542static unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1543{
1544 struct disk_events *ev = disk->ev;
1545 unsigned int pending;
1546 unsigned int clearing = mask;
1547
1548 if (!ev)
1549 return 0;
1550
1551 disk_block_events(disk);
1552
1553
1554
1555
1556
1557
1558 spin_lock_irq(&ev->lock);
1559 clearing |= ev->clearing;
1560 ev->clearing = 0;
1561 spin_unlock_irq(&ev->lock);
1562
1563 disk_check_events(ev, &clearing);
1564
1565
1566
1567
1568 __disk_unblock_events(disk, ev->clearing ? true : false);
1569
1570
1571 spin_lock_irq(&ev->lock);
1572 pending = ev->pending & mask;
1573 ev->pending &= ~mask;
1574 spin_unlock_irq(&ev->lock);
1575 WARN_ON_ONCE(clearing & mask);
1576
1577 return pending;
1578}
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590bool bdev_check_media_change(struct block_device *bdev)
1591{
1592 unsigned int events;
1593
1594 events = disk_clear_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE |
1595 DISK_EVENT_EJECT_REQUEST);
1596 if (!(events & DISK_EVENT_MEDIA_CHANGE))
1597 return false;
1598
1599 if (__invalidate_device(bdev, true))
1600 pr_warn("VFS: busy inodes on changed media %s\n",
1601 bdev->bd_disk->disk_name);
1602 set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
1603 return true;
1604}
1605EXPORT_SYMBOL(bdev_check_media_change);
1606
1607
1608
1609
1610
1611static void disk_events_workfn(struct work_struct *work)
1612{
1613 struct delayed_work *dwork = to_delayed_work(work);
1614 struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
1615
1616 disk_check_events(ev, &ev->clearing);
1617}
1618
1619static void disk_check_events(struct disk_events *ev,
1620 unsigned int *clearing_ptr)
1621{
1622 struct gendisk *disk = ev->disk;
1623 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
1624 unsigned int clearing = *clearing_ptr;
1625 unsigned int events;
1626 unsigned long intv;
1627 int nr_events = 0, i;
1628
1629
1630 events = disk->fops->check_events(disk, clearing);
1631
1632
1633 spin_lock_irq(&ev->lock);
1634
1635 events &= ~ev->pending;
1636 ev->pending |= events;
1637 *clearing_ptr &= ~clearing;
1638
1639 intv = disk_events_poll_jiffies(disk);
1640 if (!ev->block && intv)
1641 queue_delayed_work(system_freezable_power_efficient_wq,
1642 &ev->dwork, intv);
1643
1644 spin_unlock_irq(&ev->lock);
1645
1646
1647
1648
1649
1650
1651
1652 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1653 if ((events & disk->events & (1 << i)) &&
1654 (disk->event_flags & DISK_EVENT_FLAG_UEVENT))
1655 envp[nr_events++] = disk_uevents[i];
1656
1657 if (nr_events)
1658 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670static ssize_t __disk_events_show(unsigned int events, char *buf)
1671{
1672 const char *delim = "";
1673 ssize_t pos = 0;
1674 int i;
1675
1676 for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
1677 if (events & (1 << i)) {
1678 pos += sprintf(buf + pos, "%s%s",
1679 delim, disk_events_strs[i]);
1680 delim = " ";
1681 }
1682 if (pos)
1683 pos += sprintf(buf + pos, "\n");
1684 return pos;
1685}
1686
1687static ssize_t disk_events_show(struct device *dev,
1688 struct device_attribute *attr, char *buf)
1689{
1690 struct gendisk *disk = dev_to_disk(dev);
1691
1692 if (!(disk->event_flags & DISK_EVENT_FLAG_UEVENT))
1693 return 0;
1694
1695 return __disk_events_show(disk->events, buf);
1696}
1697
1698static ssize_t disk_events_async_show(struct device *dev,
1699 struct device_attribute *attr, char *buf)
1700{
1701 return 0;
1702}
1703
1704static ssize_t disk_events_poll_msecs_show(struct device *dev,
1705 struct device_attribute *attr,
1706 char *buf)
1707{
1708 struct gendisk *disk = dev_to_disk(dev);
1709
1710 if (!disk->ev)
1711 return sprintf(buf, "-1\n");
1712
1713 return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
1714}
1715
1716static ssize_t disk_events_poll_msecs_store(struct device *dev,
1717 struct device_attribute *attr,
1718 const char *buf, size_t count)
1719{
1720 struct gendisk *disk = dev_to_disk(dev);
1721 long intv;
1722
1723 if (!count || !sscanf(buf, "%ld", &intv))
1724 return -EINVAL;
1725
1726 if (intv < 0 && intv != -1)
1727 return -EINVAL;
1728
1729 if (!disk->ev)
1730 return -ENODEV;
1731
1732 disk_block_events(disk);
1733 disk->ev->poll_msecs = intv;
1734 __disk_unblock_events(disk, true);
1735
1736 return count;
1737}
1738
1739static const DEVICE_ATTR(events, 0444, disk_events_show, NULL);
1740static const DEVICE_ATTR(events_async, 0444, disk_events_async_show, NULL);
1741static const DEVICE_ATTR(events_poll_msecs, 0644,
1742 disk_events_poll_msecs_show,
1743 disk_events_poll_msecs_store);
1744
1745static const struct attribute *disk_events_attrs[] = {
1746 &dev_attr_events.attr,
1747 &dev_attr_events_async.attr,
1748 &dev_attr_events_poll_msecs.attr,
1749 NULL,
1750};
1751
1752
1753
1754
1755
1756
1757
1758static int disk_events_set_dfl_poll_msecs(const char *val,
1759 const struct kernel_param *kp)
1760{
1761 struct disk_events *ev;
1762 int ret;
1763
1764 ret = param_set_ulong(val, kp);
1765 if (ret < 0)
1766 return ret;
1767
1768 mutex_lock(&disk_events_mutex);
1769
1770 list_for_each_entry(ev, &disk_events, node)
1771 disk_flush_events(ev->disk, 0);
1772
1773 mutex_unlock(&disk_events_mutex);
1774
1775 return 0;
1776}
1777
1778static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
1779 .set = disk_events_set_dfl_poll_msecs,
1780 .get = param_get_ulong,
1781};
1782
1783#undef MODULE_PARAM_PREFIX
1784#define MODULE_PARAM_PREFIX "block."
1785
1786module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
1787 &disk_events_dfl_poll_msecs, 0644);
1788
1789
1790
1791
1792static void disk_alloc_events(struct gendisk *disk)
1793{
1794 struct disk_events *ev;
1795
1796 if (!disk->fops->check_events || !disk->events)
1797 return;
1798
1799 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1800 if (!ev) {
1801 pr_warn("%s: failed to initialize events\n", disk->disk_name);
1802 return;
1803 }
1804
1805 INIT_LIST_HEAD(&ev->node);
1806 ev->disk = disk;
1807 spin_lock_init(&ev->lock);
1808 mutex_init(&ev->block_mutex);
1809 ev->block = 1;
1810 ev->poll_msecs = -1;
1811 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
1812
1813 disk->ev = ev;
1814}
1815
1816static void disk_add_events(struct gendisk *disk)
1817{
1818
1819 if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0)
1820 pr_warn("%s: failed to create sysfs files for events\n",
1821 disk->disk_name);
1822
1823 if (!disk->ev)
1824 return;
1825
1826 mutex_lock(&disk_events_mutex);
1827 list_add_tail(&disk->ev->node, &disk_events);
1828 mutex_unlock(&disk_events_mutex);
1829
1830
1831
1832
1833
1834 __disk_unblock_events(disk, true);
1835}
1836
1837static void disk_del_events(struct gendisk *disk)
1838{
1839 if (disk->ev) {
1840 disk_block_events(disk);
1841
1842 mutex_lock(&disk_events_mutex);
1843 list_del_init(&disk->ev->node);
1844 mutex_unlock(&disk_events_mutex);
1845 }
1846
1847 sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
1848}
1849
1850static void disk_release_events(struct gendisk *disk)
1851{
1852
1853 WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
1854 kfree(disk->ev);
1855}
1856