1
2
3
4
5
6
7
8
9
10
11
12#include <linux/init.h>
13#include <linux/initrd.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/major.h>
17#include <linux/blkdev.h>
18#include <linux/bio.h>
19#include <linux/highmem.h>
20#include <linux/mutex.h>
21#include <linux/pagemap.h>
22#include <linux/radix-tree.h>
23#include <linux/fs.h>
24#include <linux/slab.h>
25#include <linux/backing-dev.h>
26#include <linux/debugfs.h>
27
28#include <linux/uaccess.h>
29
30#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
31#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
32
33
34
35
36
37
38
39
40struct brd_device {
41 int brd_number;
42
43 struct request_queue *brd_queue;
44 struct gendisk *brd_disk;
45 struct list_head brd_list;
46
47
48
49
50
51 spinlock_t brd_lock;
52 struct radix_tree_root brd_pages;
53 u64 brd_nr_pages;
54};
55
56
57
58
59static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
60{
61 pgoff_t idx;
62 struct page *page;
63
64
65
66
67
68
69
70
71
72
73
74
75 rcu_read_lock();
76 idx = sector >> PAGE_SECTORS_SHIFT;
77 page = radix_tree_lookup(&brd->brd_pages, idx);
78 rcu_read_unlock();
79
80 BUG_ON(page && page->index != idx);
81
82 return page;
83}
84
85
86
87
88
89
90static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
91{
92 pgoff_t idx;
93 struct page *page;
94 gfp_t gfp_flags;
95
96 page = brd_lookup_page(brd, sector);
97 if (page)
98 return page;
99
100
101
102
103
104 gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
105 page = alloc_page(gfp_flags);
106 if (!page)
107 return NULL;
108
109 if (radix_tree_preload(GFP_NOIO)) {
110 __free_page(page);
111 return NULL;
112 }
113
114 spin_lock(&brd->brd_lock);
115 idx = sector >> PAGE_SECTORS_SHIFT;
116 page->index = idx;
117 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
118 __free_page(page);
119 page = radix_tree_lookup(&brd->brd_pages, idx);
120 BUG_ON(!page);
121 BUG_ON(page->index != idx);
122 } else {
123 brd->brd_nr_pages++;
124 }
125 spin_unlock(&brd->brd_lock);
126
127 radix_tree_preload_end();
128
129 return page;
130}
131
132
133
134
135
136#define FREE_BATCH 16
137static void brd_free_pages(struct brd_device *brd)
138{
139 unsigned long pos = 0;
140 struct page *pages[FREE_BATCH];
141 int nr_pages;
142
143 do {
144 int i;
145
146 nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
147 (void **)pages, pos, FREE_BATCH);
148
149 for (i = 0; i < nr_pages; i++) {
150 void *ret;
151
152 BUG_ON(pages[i]->index < pos);
153 pos = pages[i]->index;
154 ret = radix_tree_delete(&brd->brd_pages, pos);
155 BUG_ON(!ret || ret != pages[i]);
156 __free_page(pages[i]);
157 }
158
159 pos++;
160
161
162
163
164
165 cond_resched();
166
167
168
169
170
171
172 } while (nr_pages == FREE_BATCH);
173}
174
175
176
177
178static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
179{
180 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
181 size_t copy;
182
183 copy = min_t(size_t, n, PAGE_SIZE - offset);
184 if (!brd_insert_page(brd, sector))
185 return -ENOSPC;
186 if (copy < n) {
187 sector += copy >> SECTOR_SHIFT;
188 if (!brd_insert_page(brd, sector))
189 return -ENOSPC;
190 }
191 return 0;
192}
193
194
195
196
197static void copy_to_brd(struct brd_device *brd, const void *src,
198 sector_t sector, size_t n)
199{
200 struct page *page;
201 void *dst;
202 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
203 size_t copy;
204
205 copy = min_t(size_t, n, PAGE_SIZE - offset);
206 page = brd_lookup_page(brd, sector);
207 BUG_ON(!page);
208
209 dst = kmap_atomic(page);
210 memcpy(dst + offset, src, copy);
211 kunmap_atomic(dst);
212
213 if (copy < n) {
214 src += copy;
215 sector += copy >> SECTOR_SHIFT;
216 copy = n - copy;
217 page = brd_lookup_page(brd, sector);
218 BUG_ON(!page);
219
220 dst = kmap_atomic(page);
221 memcpy(dst, src, copy);
222 kunmap_atomic(dst);
223 }
224}
225
226
227
228
229static void copy_from_brd(void *dst, struct brd_device *brd,
230 sector_t sector, size_t n)
231{
232 struct page *page;
233 void *src;
234 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
235 size_t copy;
236
237 copy = min_t(size_t, n, PAGE_SIZE - offset);
238 page = brd_lookup_page(brd, sector);
239 if (page) {
240 src = kmap_atomic(page);
241 memcpy(dst, src + offset, copy);
242 kunmap_atomic(src);
243 } else
244 memset(dst, 0, copy);
245
246 if (copy < n) {
247 dst += copy;
248 sector += copy >> SECTOR_SHIFT;
249 copy = n - copy;
250 page = brd_lookup_page(brd, sector);
251 if (page) {
252 src = kmap_atomic(page);
253 memcpy(dst, src, copy);
254 kunmap_atomic(src);
255 } else
256 memset(dst, 0, copy);
257 }
258}
259
260
261
262
263static int brd_do_bvec(struct brd_device *brd, struct page *page,
264 unsigned int len, unsigned int off, unsigned int op,
265 sector_t sector)
266{
267 void *mem;
268 int err = 0;
269
270 if (op_is_write(op)) {
271 err = copy_to_brd_setup(brd, sector, len);
272 if (err)
273 goto out;
274 }
275
276 mem = kmap_atomic(page);
277 if (!op_is_write(op)) {
278 copy_from_brd(mem + off, brd, sector, len);
279 flush_dcache_page(page);
280 } else {
281 flush_dcache_page(page);
282 copy_to_brd(brd, mem + off, sector, len);
283 }
284 kunmap_atomic(mem);
285
286out:
287 return err;
288}
289
290static blk_qc_t brd_submit_bio(struct bio *bio)
291{
292 struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
293 sector_t sector = bio->bi_iter.bi_sector;
294 struct bio_vec bvec;
295 struct bvec_iter iter;
296
297 bio_for_each_segment(bvec, bio, iter) {
298 unsigned int len = bvec.bv_len;
299 int err;
300
301
302 WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
303 (len & (SECTOR_SIZE - 1)));
304
305 err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
306 bio_op(bio), sector);
307 if (err)
308 goto io_error;
309 sector += len >> SECTOR_SHIFT;
310 }
311
312 bio_endio(bio);
313 return BLK_QC_T_NONE;
314io_error:
315 bio_io_error(bio);
316 return BLK_QC_T_NONE;
317}
318
319static int brd_rw_page(struct block_device *bdev, sector_t sector,
320 struct page *page, unsigned int op)
321{
322 struct brd_device *brd = bdev->bd_disk->private_data;
323 int err;
324
325 if (PageTransHuge(page))
326 return -ENOTSUPP;
327 err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
328 page_endio(page, op_is_write(op), err);
329 return err;
330}
331
332static const struct block_device_operations brd_fops = {
333 .owner = THIS_MODULE,
334 .submit_bio = brd_submit_bio,
335 .rw_page = brd_rw_page,
336};
337
338
339
340
341static int rd_nr = CONFIG_BLK_DEV_RAM_COUNT;
342module_param(rd_nr, int, 0444);
343MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
344
345unsigned long rd_size = CONFIG_BLK_DEV_RAM_SIZE;
346module_param(rd_size, ulong, 0444);
347MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
348
349static int max_part = 1;
350module_param(max_part, int, 0444);
351MODULE_PARM_DESC(max_part, "Num Minors to reserve between devices");
352
353MODULE_LICENSE("GPL");
354MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
355MODULE_ALIAS("rd");
356
357#ifndef MODULE
358
359static int __init ramdisk_size(char *str)
360{
361 rd_size = simple_strtol(str, NULL, 0);
362 return 1;
363}
364__setup("ramdisk_size=", ramdisk_size);
365#endif
366
367
368
369
370
371static LIST_HEAD(brd_devices);
372static DEFINE_MUTEX(brd_devices_mutex);
373static struct dentry *brd_debugfs_dir;
374
375static struct brd_device *brd_alloc(int i)
376{
377 struct brd_device *brd;
378 struct gendisk *disk;
379 char buf[DISK_NAME_LEN];
380
381 brd = kzalloc(sizeof(*brd), GFP_KERNEL);
382 if (!brd)
383 goto out;
384 brd->brd_number = i;
385 spin_lock_init(&brd->brd_lock);
386 INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
387
388 brd->brd_queue = blk_alloc_queue(NUMA_NO_NODE);
389 if (!brd->brd_queue)
390 goto out_free_dev;
391
392 snprintf(buf, DISK_NAME_LEN, "ram%d", i);
393 if (!IS_ERR_OR_NULL(brd_debugfs_dir))
394 debugfs_create_u64(buf, 0444, brd_debugfs_dir,
395 &brd->brd_nr_pages);
396
397
398
399
400
401
402
403 blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
404 disk = brd->brd_disk = alloc_disk(max_part);
405 if (!disk)
406 goto out_free_queue;
407 disk->major = RAMDISK_MAJOR;
408 disk->first_minor = i * max_part;
409 disk->fops = &brd_fops;
410 disk->private_data = brd;
411 disk->flags = GENHD_FL_EXT_DEVT;
412 strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
413 set_capacity(disk, rd_size * 2);
414
415
416 blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
417 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
418
419 return brd;
420
421out_free_queue:
422 blk_cleanup_queue(brd->brd_queue);
423out_free_dev:
424 kfree(brd);
425out:
426 return NULL;
427}
428
429static void brd_free(struct brd_device *brd)
430{
431 put_disk(brd->brd_disk);
432 blk_cleanup_queue(brd->brd_queue);
433 brd_free_pages(brd);
434 kfree(brd);
435}
436
437static void brd_probe(dev_t dev)
438{
439 struct brd_device *brd;
440 int i = MINOR(dev) / max_part;
441
442 mutex_lock(&brd_devices_mutex);
443 list_for_each_entry(brd, &brd_devices, brd_list) {
444 if (brd->brd_number == i)
445 goto out_unlock;
446 }
447
448 brd = brd_alloc(i);
449 if (brd) {
450 brd->brd_disk->queue = brd->brd_queue;
451 add_disk(brd->brd_disk);
452 list_add_tail(&brd->brd_list, &brd_devices);
453 }
454
455out_unlock:
456 mutex_unlock(&brd_devices_mutex);
457}
458
459static void brd_del_one(struct brd_device *brd)
460{
461 list_del(&brd->brd_list);
462 del_gendisk(brd->brd_disk);
463 brd_free(brd);
464}
465
466static inline void brd_check_and_reset_par(void)
467{
468 if (unlikely(!max_part))
469 max_part = 1;
470
471
472
473
474
475 if ((1U << MINORBITS) % max_part != 0)
476 max_part = 1UL << fls(max_part);
477
478 if (max_part > DISK_MAX_PARTS) {
479 pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
480 DISK_MAX_PARTS, DISK_MAX_PARTS);
481 max_part = DISK_MAX_PARTS;
482 }
483}
484
485static int __init brd_init(void)
486{
487 struct brd_device *brd, *next;
488 int i;
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505 if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
506 return -EIO;
507
508 brd_check_and_reset_par();
509
510 brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
511
512 mutex_lock(&brd_devices_mutex);
513 for (i = 0; i < rd_nr; i++) {
514 brd = brd_alloc(i);
515 if (!brd)
516 goto out_free;
517 list_add_tail(&brd->brd_list, &brd_devices);
518 }
519
520
521
522 list_for_each_entry(brd, &brd_devices, brd_list) {
523
524
525
526
527 brd->brd_disk->queue = brd->brd_queue;
528 add_disk(brd->brd_disk);
529 }
530 mutex_unlock(&brd_devices_mutex);
531
532 pr_info("brd: module loaded\n");
533 return 0;
534
535out_free:
536 debugfs_remove_recursive(brd_debugfs_dir);
537
538 list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
539 list_del(&brd->brd_list);
540 brd_free(brd);
541 }
542 mutex_unlock(&brd_devices_mutex);
543 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
544
545 pr_info("brd: module NOT loaded !!!\n");
546 return -ENOMEM;
547}
548
549static void __exit brd_exit(void)
550{
551 struct brd_device *brd, *next;
552
553 debugfs_remove_recursive(brd_debugfs_dir);
554
555 list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
556 brd_del_one(brd);
557
558 unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
559
560 pr_info("brd: module unloaded\n");
561}
562
563module_init(brd_init);
564module_exit(brd_exit);
565
566