1
2
3
4
5
6
7
8
9#include <linux/config.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/compatmac.h>
16
17#define MAJOR_NR MTD_BLOCK_MAJOR
18#define DEVICE_NAME "mtdblock"
19#define DEVICE_REQUEST mtdblock_request
20#define DEVICE_NR(device) (device)
21#define DEVICE_ON(device)
22#define DEVICE_OFF(device)
23#define DEVICE_NO_RANDOM
24#include <linux/blk.h>
25
26#ifndef QUEUE_EMPTY
27#define QUEUE_EMPTY (!CURRENT)
28#endif
29#if LINUX_VERSION_CODE < 0x20300
30#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].plug_tq.sync)
31#else
32#define QUEUE_PLUGGED (blk_dev[MAJOR_NR].request_queue.plugged)
33#endif
34
35#ifdef CONFIG_DEVFS_FS
36#include <linux/devfs_fs_kernel.h>
37static void mtd_notify_add(struct mtd_info* mtd);
38static void mtd_notify_remove(struct mtd_info* mtd);
39static struct mtd_notifier notifier = {
40 mtd_notify_add,
41 mtd_notify_remove,
42 NULL
43};
44static devfs_handle_t devfs_dir_handle = NULL;
45static devfs_handle_t devfs_rw_handle[MAX_MTD_DEVICES];
46#endif
47
48static struct mtdblk_dev {
49 struct mtd_info *mtd;
50 int count;
51 struct semaphore cache_sem;
52 unsigned char *cache_data;
53 unsigned long cache_offset;
54 unsigned int cache_size;
55 enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
56} *mtdblks[MAX_MTD_DEVICES];
57
58static spinlock_t mtdblks_lock;
59
60static int mtd_sizes[MAX_MTD_DEVICES];
61static int mtd_blksizes[MAX_MTD_DEVICES];
62
63#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
64#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
65#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
66#else
67#define BLK_INC_USE_COUNT do {} while(0)
68#define BLK_DEC_USE_COUNT do {} while(0)
69#endif
70
71
72
73
74
75
76
77
78
79
80
81static void erase_callback(struct erase_info *done)
82{
83 wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
84 wake_up(wait_q);
85}
86
87static int erase_write (struct mtd_info *mtd, unsigned long pos,
88 int len, const char *buf)
89{
90 struct erase_info erase;
91 DECLARE_WAITQUEUE(wait, current);
92 wait_queue_head_t wait_q;
93 size_t retlen;
94 int ret;
95
96
97
98
99
100 init_waitqueue_head(&wait_q);
101 erase.mtd = mtd;
102 erase.callback = erase_callback;
103 erase.addr = pos;
104 erase.len = len;
105 erase.priv = (u_long)&wait_q;
106
107 set_current_state(TASK_INTERRUPTIBLE);
108 add_wait_queue(&wait_q, &wait);
109
110 ret = MTD_ERASE(mtd, &erase);
111 if (ret) {
112 set_current_state(TASK_RUNNING);
113 remove_wait_queue(&wait_q, &wait);
114 printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
115 "on \"%s\" failed\n",
116 pos, len, mtd->name);
117 return ret;
118 }
119
120 schedule();
121 remove_wait_queue(&wait_q, &wait);
122
123
124
125
126
127 ret = MTD_WRITE (mtd, pos, len, &retlen, buf);
128 if (ret)
129 return ret;
130 if (retlen != len)
131 return -EIO;
132 return 0;
133}
134
135
136static int write_cached_data (struct mtdblk_dev *mtdblk)
137{
138 struct mtd_info *mtd = mtdblk->mtd;
139 int ret;
140
141 if (mtdblk->cache_state != STATE_DIRTY)
142 return 0;
143
144 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
145 "at 0x%lx, size 0x%x\n", mtd->name,
146 mtdblk->cache_offset, mtdblk->cache_size);
147
148 ret = erase_write (mtd, mtdblk->cache_offset,
149 mtdblk->cache_size, mtdblk->cache_data);
150 if (ret)
151 return ret;
152
153
154
155
156
157
158
159
160 mtdblk->cache_state = STATE_EMPTY;
161 return 0;
162}
163
164
165static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
166 int len, const char *buf)
167{
168 struct mtd_info *mtd = mtdblk->mtd;
169 unsigned int sect_size = mtdblk->cache_size;
170 size_t retlen;
171 int ret;
172
173 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
174 mtd->name, pos, len);
175
176 if (!sect_size)
177 return MTD_WRITE (mtd, pos, len, &retlen, buf);
178
179 while (len > 0) {
180 unsigned long sect_start = (pos/sect_size)*sect_size;
181 unsigned int offset = pos - sect_start;
182 unsigned int size = sect_size - offset;
183 if( size > len )
184 size = len;
185
186 if (size == sect_size) {
187
188
189
190
191
192 ret = erase_write (mtd, pos, size, buf);
193 if (ret)
194 return ret;
195 } else {
196
197
198 if (mtdblk->cache_state == STATE_DIRTY &&
199 mtdblk->cache_offset != sect_start) {
200 ret = write_cached_data(mtdblk);
201 if (ret)
202 return ret;
203 }
204
205 if (mtdblk->cache_state == STATE_EMPTY ||
206 mtdblk->cache_offset != sect_start) {
207
208 mtdblk->cache_state = STATE_EMPTY;
209 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data);
210 if (ret)
211 return ret;
212 if (retlen != sect_size)
213 return -EIO;
214
215 mtdblk->cache_offset = sect_start;
216 mtdblk->cache_size = sect_size;
217 mtdblk->cache_state = STATE_CLEAN;
218 }
219
220
221 memcpy (mtdblk->cache_data + offset, buf, size);
222 mtdblk->cache_state = STATE_DIRTY;
223 }
224
225 buf += size;
226 pos += size;
227 len -= size;
228 }
229
230 return 0;
231}
232
233
234static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
235 int len, char *buf)
236{
237 struct mtd_info *mtd = mtdblk->mtd;
238 unsigned int sect_size = mtdblk->cache_size;
239 size_t retlen;
240 int ret;
241
242 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
243 mtd->name, pos, len);
244
245 if (!sect_size)
246 return MTD_READ (mtd, pos, len, &retlen, buf);
247
248 while (len > 0) {
249 unsigned long sect_start = (pos/sect_size)*sect_size;
250 unsigned int offset = pos - sect_start;
251 unsigned int size = sect_size - offset;
252 if (size > len)
253 size = len;
254
255
256
257
258
259
260
261 if (mtdblk->cache_state != STATE_EMPTY &&
262 mtdblk->cache_offset == sect_start) {
263 memcpy (buf, mtdblk->cache_data + offset, size);
264 } else {
265 ret = MTD_READ (mtd, pos, size, &retlen, buf);
266 if (ret)
267 return ret;
268 if (retlen != size)
269 return -EIO;
270 }
271
272 buf += size;
273 pos += size;
274 len -= size;
275 }
276
277 return 0;
278}
279
280
281
282static int mtdblock_open(struct inode *inode, struct file *file)
283{
284 struct mtdblk_dev *mtdblk;
285 struct mtd_info *mtd;
286 int dev;
287
288 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
289
290 if (!inode)
291 return -EINVAL;
292
293 dev = MINOR(inode->i_rdev);
294 if (dev >= MAX_MTD_DEVICES)
295 return -EINVAL;
296
297 BLK_INC_USE_COUNT;
298
299 mtd = get_mtd_device(NULL, dev);
300 if (!mtd)
301 return -ENODEV;
302 if (MTD_ABSENT == mtd->type) {
303 put_mtd_device(mtd);
304 BLK_DEC_USE_COUNT;
305 return -ENODEV;
306 }
307
308 spin_lock(&mtdblks_lock);
309
310
311 if (mtdblks[dev]) {
312 mtdblks[dev]->count++;
313 spin_unlock(&mtdblks_lock);
314 put_mtd_device(mtd);
315 return 0;
316 }
317
318
319
320
321
322
323 spin_unlock(&mtdblks_lock);
324
325 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
326 if (!mtdblk) {
327 put_mtd_device(mtd);
328 BLK_DEC_USE_COUNT;
329 return -ENOMEM;
330 }
331 memset(mtdblk, 0, sizeof(*mtdblk));
332 mtdblk->count = 1;
333 mtdblk->mtd = mtd;
334
335 init_MUTEX (&mtdblk->cache_sem);
336 mtdblk->cache_state = STATE_EMPTY;
337 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM &&
338 mtdblk->mtd->erasesize) {
339 mtdblk->cache_size = mtdblk->mtd->erasesize;
340 mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
341 if (!mtdblk->cache_data) {
342 put_mtd_device(mtdblk->mtd);
343 kfree(mtdblk);
344 BLK_DEC_USE_COUNT;
345 return -ENOMEM;
346 }
347 }
348
349
350
351 spin_lock(&mtdblks_lock);
352
353 if (mtdblks[dev]) {
354
355 mtdblks[dev]->count++;
356 spin_unlock(&mtdblks_lock);
357 put_mtd_device(mtdblk->mtd);
358 vfree(mtdblk->cache_data);
359 kfree(mtdblk);
360 return 0;
361 }
362
363 mtdblks[dev] = mtdblk;
364 mtd_sizes[dev] = mtdblk->mtd->size/1024;
365 if (mtdblk->mtd->erasesize)
366 mtd_blksizes[dev] = mtdblk->mtd->erasesize;
367 if (mtd_blksizes[dev] > PAGE_SIZE)
368 mtd_blksizes[dev] = PAGE_SIZE;
369 set_device_ro (inode->i_rdev, !(mtdblk->mtd->flags & MTD_WRITEABLE));
370
371 spin_unlock(&mtdblks_lock);
372
373 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
374
375 return 0;
376}
377
378static release_t mtdblock_release(struct inode *inode, struct file *file)
379{
380 int dev;
381 struct mtdblk_dev *mtdblk;
382 DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
383
384 if (inode == NULL)
385 release_return(-ENODEV);
386
387 dev = MINOR(inode->i_rdev);
388 mtdblk = mtdblks[dev];
389
390 down(&mtdblk->cache_sem);
391 write_cached_data(mtdblk);
392 up(&mtdblk->cache_sem);
393
394 spin_lock(&mtdblks_lock);
395 if (!--mtdblk->count) {
396
397 mtdblks[dev] = NULL;
398 spin_unlock(&mtdblks_lock);
399 if (mtdblk->mtd->sync)
400 mtdblk->mtd->sync(mtdblk->mtd);
401 put_mtd_device(mtdblk->mtd);
402 vfree(mtdblk->cache_data);
403 kfree(mtdblk);
404 } else {
405 spin_unlock(&mtdblks_lock);
406 }
407
408 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
409
410 BLK_DEC_USE_COUNT;
411 release_return(0);
412}
413
414
415
416
417
418
419
420
421
422static void handle_mtdblock_request(void)
423{
424 struct request *req;
425 struct mtdblk_dev *mtdblk;
426 unsigned int res;
427
428 for (;;) {
429 INIT_REQUEST;
430 req = CURRENT;
431 spin_unlock_irq(&io_request_lock);
432 mtdblk = mtdblks[MINOR(req->rq_dev)];
433 res = 0;
434
435 if (MINOR(req->rq_dev) >= MAX_MTD_DEVICES)
436 panic("%s: minor out of bounds", __FUNCTION__);
437
438 if ((req->sector + req->current_nr_sectors) > (mtdblk->mtd->size >> 9))
439 goto end_req;
440
441
442 switch (req->cmd)
443 {
444 int err;
445
446 case READ:
447 down(&mtdblk->cache_sem);
448 err = do_cached_read (mtdblk, req->sector << 9,
449 req->current_nr_sectors << 9,
450 req->buffer);
451 up(&mtdblk->cache_sem);
452 if (!err)
453 res = 1;
454 break;
455
456 case WRITE:
457
458 if ( !(mtdblk->mtd->flags & MTD_WRITEABLE) )
459 break;
460
461
462 down(&mtdblk->cache_sem);
463 err = do_cached_write (mtdblk, req->sector << 9,
464 req->current_nr_sectors << 9,
465 req->buffer);
466 up(&mtdblk->cache_sem);
467 if (!err)
468 res = 1;
469 break;
470 }
471
472end_req:
473 spin_lock_irq(&io_request_lock);
474 end_request(res);
475 }
476}
477
478static volatile int leaving = 0;
479static DECLARE_MUTEX_LOCKED(thread_sem);
480static DECLARE_WAIT_QUEUE_HEAD(thr_wq);
481
482int mtdblock_thread(void *dummy)
483{
484 struct task_struct *tsk = current;
485 DECLARE_WAITQUEUE(wait, tsk);
486
487
488 tsk->flags |= PF_MEMALLOC;
489 strcpy(tsk->comm, "mtdblockd");
490 spin_lock_irq(&tsk->sigmask_lock);
491 sigfillset(&tsk->blocked);
492 recalc_sigpending(tsk);
493 spin_unlock_irq(&tsk->sigmask_lock);
494 daemonize();
495
496 while (!leaving) {
497 add_wait_queue(&thr_wq, &wait);
498 set_current_state(TASK_INTERRUPTIBLE);
499 spin_lock_irq(&io_request_lock);
500 if (QUEUE_EMPTY || QUEUE_PLUGGED) {
501 spin_unlock_irq(&io_request_lock);
502 schedule();
503 remove_wait_queue(&thr_wq, &wait);
504 } else {
505 remove_wait_queue(&thr_wq, &wait);
506 set_current_state(TASK_RUNNING);
507 handle_mtdblock_request();
508 spin_unlock_irq(&io_request_lock);
509 }
510 }
511
512 up(&thread_sem);
513 return 0;
514}
515
516#if LINUX_VERSION_CODE < 0x20300
517#define RQFUNC_ARG void
518#else
519#define RQFUNC_ARG request_queue_t *q
520#endif
521
522static void mtdblock_request(RQFUNC_ARG)
523{
524
525 wake_up(&thr_wq);
526}
527
528
529static int mtdblock_ioctl(struct inode * inode, struct file * file,
530 unsigned int cmd, unsigned long arg)
531{
532 struct mtdblk_dev *mtdblk;
533
534 mtdblk = mtdblks[MINOR(inode->i_rdev)];
535
536#ifdef PARANOIA
537 if (!mtdblk)
538 BUG();
539#endif
540
541 switch (cmd) {
542 case BLKGETSIZE:
543 return put_user((mtdblk->mtd->size >> 9), (unsigned long *) arg);
544
545#ifdef BLKGETSIZE64
546 case BLKGETSIZE64:
547 return put_user((u64)mtdblk->mtd->size, (u64 *)arg);
548#endif
549
550 case BLKFLSBUF:
551#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
552 if(!capable(CAP_SYS_ADMIN))
553 return -EACCES;
554#endif
555 fsync_dev(inode->i_rdev);
556 invalidate_buffers(inode->i_rdev);
557 down(&mtdblk->cache_sem);
558 write_cached_data(mtdblk);
559 up(&mtdblk->cache_sem);
560 if (mtdblk->mtd->sync)
561 mtdblk->mtd->sync(mtdblk->mtd);
562 return 0;
563
564 default:
565 return -EINVAL;
566 }
567}
568
569#if LINUX_VERSION_CODE < 0x20326
570static struct file_operations mtd_fops =
571{
572 open: mtdblock_open,
573 ioctl: mtdblock_ioctl,
574 release: mtdblock_release,
575 read: block_read,
576 write: block_write
577};
578#else
579static struct block_device_operations mtd_fops =
580{
581#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
582 owner: THIS_MODULE,
583#endif
584 open: mtdblock_open,
585 release: mtdblock_release,
586 ioctl: mtdblock_ioctl
587};
588#endif
589
590#ifdef CONFIG_DEVFS_FS
591
592
593
594static void mtd_notify_add(struct mtd_info* mtd)
595{
596 char name[8];
597
598 if (!mtd || mtd->type == MTD_ABSENT)
599 return;
600
601 sprintf(name, "%d", mtd->index);
602 devfs_rw_handle[mtd->index] = devfs_register(devfs_dir_handle, name,
603 DEVFS_FL_DEFAULT, MTD_BLOCK_MAJOR, mtd->index,
604 S_IFBLK | S_IRUGO | S_IWUGO,
605 &mtd_fops, NULL);
606}
607
608static void mtd_notify_remove(struct mtd_info* mtd)
609{
610 if (!mtd || mtd->type == MTD_ABSENT)
611 return;
612
613 devfs_unregister(devfs_rw_handle[mtd->index]);
614}
615#endif
616
617int __init init_mtdblock(void)
618{
619 int i;
620
621 spin_lock_init(&mtdblks_lock);
622#ifdef CONFIG_DEVFS_FS
623 if (devfs_register_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME, &mtd_fops))
624 {
625 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
626 MTD_BLOCK_MAJOR);
627 return -EAGAIN;
628 }
629
630 devfs_dir_handle = devfs_mk_dir(NULL, DEVICE_NAME, NULL);
631 register_mtd_user(¬ifier);
632#else
633 if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
634 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
635 MTD_BLOCK_MAJOR);
636 return -EAGAIN;
637 }
638#endif
639
640
641 for (i=0; i< MAX_MTD_DEVICES; i++) {
642 mtd_sizes[i] = 0;
643 mtd_blksizes[i] = BLOCK_SIZE;
644 }
645 init_waitqueue_head(&thr_wq);
646
647 blksize_size[MAJOR_NR] = mtd_blksizes;
648 blk_size[MAJOR_NR] = mtd_sizes;
649
650 blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
651 kernel_thread (mtdblock_thread, NULL, CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
652 return 0;
653}
654
655static void __exit cleanup_mtdblock(void)
656{
657 leaving = 1;
658 wake_up(&thr_wq);
659 down(&thread_sem);
660#ifdef CONFIG_DEVFS_FS
661 unregister_mtd_user(¬ifier);
662 devfs_unregister(devfs_dir_handle);
663 devfs_unregister_blkdev(MTD_BLOCK_MAJOR, DEVICE_NAME);
664#else
665 unregister_blkdev(MAJOR_NR,DEVICE_NAME);
666#endif
667 blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
668 blksize_size[MAJOR_NR] = NULL;
669 blk_size[MAJOR_NR] = NULL;
670}
671
672module_init(init_mtdblock);
673module_exit(cleanup_mtdblock);
674
675
676MODULE_LICENSE("GPL");
677MODULE_AUTHOR("Nicolas Pitre <nico@cam.org> et al.");
678MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
679