linux-old/drivers/mtd/mtdblock_ro.c
<<
>>
Prefs
   1/*
   2 * $Id: mtdblock_ro.c,v 1.12 2001/11/20 11:42:33 dwmw2 Exp $
   3 *
   4 * Read-only version of the mtdblock device, without the 
   5 * read/erase/modify/writeback stuff
   6 */
   7
   8#ifdef MTDBLOCK_DEBUG
   9#define DEBUGLVL debug
  10#endif                                                         
  11
  12
  13#include <linux/module.h>
  14#include <linux/types.h>
  15
  16#include <linux/mtd/mtd.h>
  17#include <linux/mtd/compatmac.h>
  18
  19#define MAJOR_NR MTD_BLOCK_MAJOR
  20#define DEVICE_NAME "mtdblock"
  21#define DEVICE_REQUEST mtdblock_request
  22#define DEVICE_NR(device) (device)
  23#define DEVICE_ON(device)
  24#define DEVICE_OFF(device)
  25#define DEVICE_NO_RANDOM
  26#include <linux/blk.h>
  27
  28#if LINUX_VERSION_CODE < 0x20300
  29#define RQFUNC_ARG void
  30#define blkdev_dequeue_request(req) do {CURRENT = req->next;} while (0)
  31#else
  32#define RQFUNC_ARG request_queue_t *q
  33#endif
  34
  35#ifdef MTDBLOCK_DEBUG
  36static int debug = MTDBLOCK_DEBUG;
  37MODULE_PARM(debug, "i");
  38#endif
  39
  40#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,14)
  41#define BLK_INC_USE_COUNT MOD_INC_USE_COUNT
  42#define BLK_DEC_USE_COUNT MOD_DEC_USE_COUNT
  43#else
  44#define BLK_INC_USE_COUNT do {} while(0)
  45#define BLK_DEC_USE_COUNT do {} while(0)
  46#endif
  47
  48static int mtd_sizes[MAX_MTD_DEVICES];
  49
  50
  51static int mtdblock_open(struct inode *inode, struct file *file)
  52{
  53        struct mtd_info *mtd = NULL;
  54
  55        int dev;
  56
  57        DEBUG(1,"mtdblock_open\n");
  58        
  59        if (inode == 0)
  60                return -EINVAL;
  61        
  62        dev = MINOR(inode->i_rdev);
  63        
  64        mtd = get_mtd_device(NULL, dev);
  65        if (!mtd)
  66                return -EINVAL;
  67        if (MTD_ABSENT == mtd->type) {
  68                put_mtd_device(mtd);
  69                return -EINVAL;
  70        }
  71
  72        BLK_INC_USE_COUNT;
  73
  74        mtd_sizes[dev] = mtd->size>>9;
  75
  76        DEBUG(1, "ok\n");
  77
  78        return 0;
  79}
  80
  81static release_t mtdblock_release(struct inode *inode, struct file *file)
  82{
  83        int dev;
  84        struct mtd_info *mtd;
  85
  86        DEBUG(1, "mtdblock_release\n");
  87
  88        if (inode == NULL)
  89                release_return(-ENODEV);
  90   
  91        dev = MINOR(inode->i_rdev);
  92        mtd = __get_mtd_device(NULL, dev);
  93
  94        if (!mtd) {
  95                printk(KERN_WARNING "MTD device is absent on mtd_release!\n");
  96                BLK_DEC_USE_COUNT;
  97                release_return(-ENODEV);
  98        }
  99        
 100        if (mtd->sync)
 101                mtd->sync(mtd);
 102
 103        put_mtd_device(mtd);
 104
 105        DEBUG(1, "ok\n");
 106
 107        BLK_DEC_USE_COUNT;
 108        release_return(0);
 109}  
 110
 111
 112static void mtdblock_request(RQFUNC_ARG)
 113{
 114   struct request *current_request;
 115   unsigned int res = 0;
 116   struct mtd_info *mtd;
 117
 118   while (1)
 119   {
 120      /* Grab the Request and unlink it from the request list, INIT_REQUEST
 121         will execute a return if we are done. */
 122      INIT_REQUEST;
 123      current_request = CURRENT;
 124   
 125      if (MINOR(current_request->rq_dev) >= MAX_MTD_DEVICES)
 126      {
 127         printk("mtd: Unsupported device!\n");
 128         end_request(0);
 129         continue;
 130      }
 131      
 132      // Grab our MTD structure
 133
 134      mtd = __get_mtd_device(NULL, MINOR(current_request->rq_dev));
 135      if (!mtd) {
 136              printk("MTD device %d doesn't appear to exist any more\n", CURRENT_DEV);
 137              end_request(0);
 138      }
 139
 140      if (current_request->sector << 9 > mtd->size ||
 141          (current_request->sector + current_request->current_nr_sectors) << 9 > mtd->size)
 142      {
 143         printk("mtd: Attempt to read past end of device!\n");
 144         printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, 
 145                current_request->sector, current_request->current_nr_sectors);
 146         end_request(0);
 147         continue;
 148      }
 149      
 150      /* Remove the request we are handling from the request list so nobody messes
 151         with it */
 152#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
 153      /* Now drop the lock that the ll_rw_blk functions grabbed for us
 154         and process the request. This is necessary due to the extreme time
 155         we spend processing it. */
 156      spin_unlock_irq(&io_request_lock);
 157#endif
 158
 159      // Handle the request
 160      switch (current_request->cmd)
 161      {
 162         size_t retlen;
 163
 164         case READ:
 165         if (MTD_READ(mtd,current_request->sector<<9, 
 166                      current_request->current_nr_sectors << 9, 
 167                      &retlen, current_request->buffer) == 0)
 168            res = 1;
 169         else
 170            res = 0;
 171         break;
 172         
 173         case WRITE:
 174
 175         /* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector,
 176                current_request->current_nr_sectors);
 177         */
 178
 179         // Read only device
 180         if ((mtd->flags & MTD_CAP_RAM) == 0)
 181         {
 182            res = 0;
 183            break;
 184         }
 185
 186         // Do the write
 187         if (MTD_WRITE(mtd,current_request->sector<<9, 
 188                       current_request->current_nr_sectors << 9, 
 189                       &retlen, current_request->buffer) == 0)
 190            res = 1;
 191         else
 192            res = 0;
 193         break;
 194         
 195         // Shouldn't happen
 196         default:
 197         printk("mtd: unknown request\n");
 198         break;
 199      }
 200
 201      // Grab the lock and re-thread the item onto the linked list
 202#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
 203        spin_lock_irq(&io_request_lock);
 204#endif
 205        end_request(res);
 206   }
 207}
 208
 209
 210
 211static int mtdblock_ioctl(struct inode * inode, struct file * file,
 212                      unsigned int cmd, unsigned long arg)
 213{
 214        struct mtd_info *mtd;
 215
 216        mtd = __get_mtd_device(NULL, MINOR(inode->i_rdev));
 217
 218        if (!mtd) return -EINVAL;
 219
 220        switch (cmd) {
 221        case BLKGETSIZE:   /* Return device size */
 222                return put_user((mtd->size >> 9), (unsigned long *) arg);
 223
 224#ifdef BLKGETSIZE64
 225        case BLKGETSIZE64:
 226                return put_user((u64)mtd->size, (u64 *)arg);
 227#endif
 228
 229        case BLKFLSBUF:
 230#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
 231                if(!capable(CAP_SYS_ADMIN))  return -EACCES;
 232#endif
 233                fsync_dev(inode->i_rdev);
 234                invalidate_buffers(inode->i_rdev);
 235                if (mtd->sync)
 236                        mtd->sync(mtd);
 237                return 0;
 238
 239        default:
 240                return -ENOTTY;
 241        }
 242}
 243
 244#if LINUX_VERSION_CODE < 0x20326
 245static struct file_operations mtd_fops =
 246{
 247        open: mtdblock_open,
 248        ioctl: mtdblock_ioctl,
 249        release: mtdblock_release,
 250        read: block_read,
 251        write: block_write
 252};
 253#else
 254static struct block_device_operations mtd_fops = 
 255{
 256#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
 257        owner: THIS_MODULE,
 258#endif
 259        open: mtdblock_open,
 260        release: mtdblock_release,
 261        ioctl: mtdblock_ioctl
 262};
 263#endif
 264
 265int __init init_mtdblock(void)
 266{
 267        int i;
 268
 269        if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
 270                printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
 271                       MTD_BLOCK_MAJOR);
 272                return -EAGAIN;
 273        }
 274        
 275        /* We fill it in at open() time. */
 276        for (i=0; i< MAX_MTD_DEVICES; i++) {
 277                mtd_sizes[i] = 0;
 278        }
 279        
 280        /* Allow the block size to default to BLOCK_SIZE. */
 281        blksize_size[MAJOR_NR] = NULL;
 282        blk_size[MAJOR_NR] = mtd_sizes;
 283        
 284        blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request);
 285        return 0;
 286}
 287
 288static void __exit cleanup_mtdblock(void)
 289{
 290        unregister_blkdev(MAJOR_NR,DEVICE_NAME);
 291        blk_size[MAJOR_NR] = NULL;
 292        blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR));
 293}
 294
 295module_init(init_mtdblock);
 296module_exit(cleanup_mtdblock);
 297
 298
 299MODULE_LICENSE("GPL");
 300MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
 301MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices");
 302