linux-old/drivers/mtd/devices/blkmtd.c
<<
>>
Prefs
   1/*
   2 * $Id: blkmtd.c,v 1.17 2003/01/24 13:00:24 dwmw2 Exp $
   3 *
   4 * blkmtd.c - use a block device as a fake MTD
   5 *
   6 * Author: Simon Evans <spse@secret.org.uk>
   7 *
   8 * Copyright (C) 2001,2002 Simon Evans
   9 *
  10 * Licence: GPL
  11 *
  12 * How it works:
  13 *      The driver uses raw/io to read/write the device and the page
  14 *      cache to cache access. Writes update the page cache with the
  15 *      new data and mark it dirty and add the page into a kiobuf.
  16 *      When the kiobuf becomes full or the next extry is to an earlier
  17 *      block in the kiobuf then it is flushed to disk. This allows
  18 *      writes to remained ordered and gives a small and simple outgoing
  19 *      write cache.
  20 *
  21 *      It can be loaded Read-Only to prevent erases and writes to the
  22 *      medium.
  23 *
  24 */
  25
  26#include <linux/config.h>
  27#include <linux/module.h>
  28#include <linux/fs.h>
  29#include <linux/blkdev.h>
  30#include <linux/iobuf.h>
  31#include <linux/slab.h>
  32#include <linux/pagemap.h>
  33#include <linux/list.h>
  34#include <linux/mtd/mtd.h>
  35
  36#ifdef CONFIG_MTD_DEBUG
  37#ifdef CONFIG_PROC_FS
  38#  include <linux/proc_fs.h>
  39#  define BLKMTD_PROC_DEBUG
  40   static struct proc_dir_entry *blkmtd_proc;
  41#endif
  42#endif
  43
  44
  45#define err(format, arg...) printk(KERN_ERR "blkmtd: " format "\n" , ## arg)
  46#define info(format, arg...) printk(KERN_INFO "blkmtd: " format "\n" , ## arg)
  47#define warn(format, arg...) printk(KERN_WARNING "blkmtd: " format "\n" , ## arg)
  48#define crit(format, arg...) printk(KERN_CRIT "blkmtd: " format "\n" , ## arg)
  49
  50
  51/* Default erase size in KiB, always make it a multiple of PAGE_SIZE */
  52#define CONFIG_MTD_BLKDEV_ERASESIZE (128 << 10) /* 128KiB */
  53#define VERSION "1.10"
  54
  55/* Info for the block device */
  56struct blkmtd_dev {
  57        struct list_head list;
  58        struct block_device *binding;
  59        struct mtd_info mtd_info;
  60        struct kiobuf *rd_buf, *wr_buf;
  61        long iobuf_locks;
  62        struct semaphore wrbuf_mutex;
  63};
  64
  65
  66/* Static info about the MTD, used in cleanup_module */
  67static LIST_HEAD(blkmtd_device_list);
  68
  69
  70static void blkmtd_sync(struct mtd_info *mtd);
  71
  72#define MAX_DEVICES 4
  73
  74/* Module parameters passed by insmod/modprobe */
  75char *device[MAX_DEVICES];    /* the block device to use */
  76int erasesz[MAX_DEVICES];     /* optional default erase size */
  77int ro[MAX_DEVICES];          /* optional read only flag */
  78int sync;
  79
  80
  81MODULE_LICENSE("GPL");
  82MODULE_AUTHOR("Simon Evans <spse@secret.org.uk>");
  83MODULE_DESCRIPTION("Emulate an MTD using a block device");
  84MODULE_PARM(device, "1-4s");
  85MODULE_PARM_DESC(device, "block device to use");
  86MODULE_PARM(erasesz, "1-4i");
  87MODULE_PARM_DESC(erasesz, "optional erase size to use in KiB. eg 4=4KiB.");
  88MODULE_PARM(ro, "1-4i");
  89MODULE_PARM_DESC(ro, "1=Read only, writes and erases cause errors");
  90MODULE_PARM(sync, "i");
  91MODULE_PARM_DESC(sync, "1=Synchronous writes");
  92
  93
  94/**
  95 * read_pages - read in pages via the page cache
  96 * @dev: device to read from
  97 * @pagenrs: list of page numbers wanted
  98 * @pagelst: storage for struce page * pointers
  99 * @pages: count of pages wanted
 100 *
 101 * Read pages, getting them from the page cache if available
 102 * else reading them in from disk if not. pagelst must be preallocated
 103 * to hold the page count.
 104 */
 105static int read_pages(struct blkmtd_dev *dev, int pagenrs[], struct page **pagelst, int pages)
 106{
 107        kdev_t kdev;
 108        struct page *page;
 109        int cnt = 0;
 110        struct kiobuf *iobuf;
 111        int err = 0;
 112
 113        if(!dev) {
 114                err("read_pages: PANIC dev == NULL");
 115                return -EIO;
 116        }
 117        kdev = to_kdev_t(dev->binding->bd_dev);
 118
 119        DEBUG(2, "read_pages: reading %d pages\n", pages);
 120        if(test_and_set_bit(0, &dev->iobuf_locks)) {
 121                err = alloc_kiovec(1, &iobuf);
 122                if (err) {
 123                        crit("cant allocate kiobuf");
 124                        return -ENOMEM;
 125                }
 126#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
 127                iobuf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
 128                if(iobuf->blocks == NULL) {
 129                        crit("cant allocate iobuf blocks");
 130                        free_kiovec(1, &iobuf);
 131                        return -ENOMEM;
 132                }
 133#endif
 134        } else {
 135                iobuf = dev->rd_buf;
 136        }
 137
 138        iobuf->nr_pages = 0;
 139        iobuf->length = 0;
 140        iobuf->offset = 0;
 141        iobuf->locked = 1;
 142        
 143        for(cnt = 0; cnt < pages; cnt++) {
 144                page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenrs[cnt]);
 145                pagelst[cnt] = page;
 146                if(!PageUptodate(page)) {
 147                                iobuf->blocks[iobuf->nr_pages] = pagenrs[cnt];
 148                                iobuf->maplist[iobuf->nr_pages++] = page;
 149                }
 150        }
 151
 152        if(iobuf->nr_pages) {
 153                iobuf->length = iobuf->nr_pages << PAGE_SHIFT;
 154                err = brw_kiovec(READ, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);
 155                DEBUG(3, "blkmtd: read_pages: finished, err = %d\n", err);
 156                if(err < 0) {
 157                        while(pages--) {
 158                                ClearPageUptodate(pagelst[pages]);
 159                                unlock_page(pagelst[pages]);
 160                                page_cache_release(pagelst[pages]);
 161                        }
 162                } else {
 163                        while(iobuf->nr_pages--) {
 164                                SetPageUptodate(iobuf->maplist[iobuf->nr_pages]);
 165                        }
 166                        err = 0;
 167                }
 168        }
 169
 170
 171        if(iobuf != dev->rd_buf) {
 172#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
 173                kfree(iobuf->blocks);
 174#endif
 175                free_kiovec(1, &iobuf);
 176        } else {
 177                clear_bit(0, &dev->iobuf_locks);
 178        }
 179        DEBUG(2, "read_pages: done, err = %d\n", err);
 180        return err;
 181}
 182
 183
 184/**
 185 * commit_pages - commit pages in the writeout kiobuf to disk
 186 * @dev: device to write to
 187 *
 188 * If the current dev has pages in the dev->wr_buf kiobuf,
 189 * they are written to disk using brw_kiovec()
 190 */
 191static int commit_pages(struct blkmtd_dev *dev)
 192{
 193        struct kiobuf *iobuf = dev->wr_buf;
 194        kdev_t kdev = to_kdev_t(dev->binding->bd_dev);
 195        int err = 0;
 196
 197        iobuf->length = iobuf->nr_pages << PAGE_SHIFT;
 198        iobuf->locked = 1;
 199        if(iobuf->length) {
 200                int i;
 201                DEBUG(2, "blkmtd: commit_pages: nrpages = %d\n", iobuf->nr_pages);
 202                /* Check all the pages are dirty and lock them */
 203                for(i = 0; i < iobuf->nr_pages; i++) {
 204                        struct page *page = iobuf->maplist[i];
 205                        BUG_ON(!PageDirty(page));
 206                        lock_page(page);
 207                }
 208                err = brw_kiovec(WRITE, 1, &iobuf, kdev, iobuf->blocks, PAGE_SIZE);
 209                DEBUG(3, "commit_write: committed %d pages err = %d\n", iobuf->nr_pages, err);
 210                while(iobuf->nr_pages) {
 211                        struct page *page = iobuf->maplist[--iobuf->nr_pages];
 212                        ClearPageDirty(page);
 213                        SetPageUptodate(page);
 214                        unlock_page(page);
 215                        page_cache_release(page);
 216                }
 217        }
 218
 219        DEBUG(2, "blkmtd: sync: end, err = %d\n", err);
 220        iobuf->offset = 0;
 221        iobuf->nr_pages = 0;
 222        iobuf->length = 0;
 223        return err;
 224}
 225
 226
 227/**
 228 * write_pages - write block of data to device via the page cache
 229 * @dev: device to write to
 230 * @buf: data source or NULL if erase (output is set to 0xff)
 231 * @to: offset into output device
 232 * @len: amount to data to write
 233 * @retlen: amount of data written
 234 *
 235 * Grab pages from the page cache and fill them with the source data.
 236 * Non page aligned start and end result in a readin of the page and
 237 * part of the page being modified. Pages are added to the wr_buf kiobuf
 238 * until this becomes full or the next page written to has a lower pagenr
 239 * then the current max pagenr in the kiobuf.
 240 */
 241static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
 242                    size_t len, int *retlen)
 243{
 244        int pagenr, offset;
 245        size_t start_len = 0, end_len;
 246        int pagecnt = 0;
 247        struct kiobuf *iobuf = dev->wr_buf;
 248        int err = 0;
 249        struct page *pagelst[2];
 250        int pagenrs[2];
 251        int readpages = 0;
 252        int ignorepage = -1;
 253
 254        pagenr = to >> PAGE_SHIFT;
 255        offset = to & ~PAGE_MASK;
 256
 257        DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n",
 258              buf, (long)to, len, pagenr, offset);
 259
 260        *retlen = 0;
 261        /* see if we have to do a partial write at the start */
 262        if(offset) {
 263                start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
 264                len -= start_len;
 265        }
 266
 267        /* calculate the length of the other two regions */
 268        end_len = len & ~PAGE_MASK;
 269        len -= end_len;
 270
 271        if(start_len) {
 272                pagenrs[0] = pagenr;
 273                readpages++;
 274                pagecnt++;
 275        }
 276        if(len)
 277                pagecnt += len >> PAGE_SHIFT;
 278        if(end_len) {
 279                pagenrs[readpages] = pagenr + pagecnt;
 280                readpages++;
 281                pagecnt++;
 282        }
 283
 284        DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n",
 285              start_len, len, end_len, pagecnt);
 286
 287        down(&dev->wrbuf_mutex);
 288
 289        if(iobuf->nr_pages && ((pagenr <= iobuf->blocks[iobuf->nr_pages-1])
 290                               || (iobuf->nr_pages + pagecnt) >= KIO_STATIC_PAGES)) {
 291
 292                if((pagenr == iobuf->blocks[iobuf->nr_pages-1])
 293                   && ((iobuf->nr_pages + pagecnt) < KIO_STATIC_PAGES)) {
 294                        iobuf->nr_pages--;
 295                        ignorepage = pagenr;
 296                } else {
 297                        DEBUG(3, "blkmtd: doing writeout pagenr = %d max_pagenr = %ld pagecnt = %d idx = %d\n",
 298                              pagenr, iobuf->blocks[iobuf->nr_pages-1],
 299                              pagecnt, iobuf->nr_pages);
 300                        commit_pages(dev);
 301                }
 302        }
 303        
 304        if(readpages) {
 305                err = read_pages(dev, pagenrs, pagelst, readpages);
 306                if(err < 0)
 307                        goto readin_err;
 308        }
 309
 310        if(start_len) {
 311                /* do partial start region */
 312                struct page *page;
 313
 314                DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
 315                      pagenr, start_len, offset);
 316                page = pagelst[0];
 317                BUG_ON(!buf);
 318                if(PageDirty(page) && pagenr != ignorepage) {
 319                        err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d ignorepage = %d\n",
 320                            to, start_len, len, end_len, pagenr, ignorepage);
 321                        BUG();
 322                }
 323                memcpy(page_address(page)+offset, buf, start_len);
 324                SetPageDirty(page);
 325                SetPageUptodate(page);
 326                unlock_page(page);
 327                buf += start_len;
 328                *retlen = start_len;
 329                err = 0;
 330                iobuf->blocks[iobuf->nr_pages] = pagenr++;
 331                iobuf->maplist[iobuf->nr_pages] = page;
 332                iobuf->nr_pages++;
 333        }
 334
 335        /* Now do the main loop to a page aligned, n page sized output */
 336        if(len) {
 337                int pagesc = len >> PAGE_SHIFT;
 338                DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
 339                      pagenr, pagesc);
 340                while(pagesc) {
 341                        struct page *page;
 342
 343                        /* see if page is in the page cache */
 344                        DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
 345                        page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr);
 346                        if(PageDirty(page) && pagenr != ignorepage) {
 347                                BUG();
 348                        }
 349                        if(!page) {
 350                                warn("write: cant grab cache page %d", pagenr);
 351                                err = -ENOMEM;
 352                                goto write_err;
 353                        }
 354                        if(!buf) {
 355                                memset(page_address(page), 0xff, PAGE_SIZE);
 356                        } else {
 357                                memcpy(page_address(page), buf, PAGE_SIZE);
 358                                buf += PAGE_SIZE;
 359                        }
 360                        iobuf->blocks[iobuf->nr_pages] = pagenr++;
 361                        iobuf->maplist[iobuf->nr_pages] = page;
 362                        iobuf->nr_pages++;
 363                        SetPageDirty(page);
 364                        SetPageUptodate(page);
 365                        unlock_page(page);
 366                        pagesc--;
 367                        *retlen += PAGE_SIZE;
 368                }
 369        }
 370
 371        if(end_len) {
 372                /* do the third region */
 373                struct page *page;
 374                DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
 375                      pagenr, end_len);
 376                page = pagelst[readpages-1];
 377                BUG_ON(!buf);
 378                if(PageDirty(page) && pagenr != ignorepage) {
 379                        err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d ignorepage = %d\n",
 380                            to, start_len, len, end_len, pagenr, ignorepage);
 381                        BUG();
 382                }
 383                memcpy(page_address(page), buf, end_len);
 384                SetPageDirty(page);
 385                SetPageUptodate(page);
 386                unlock_page(page);
 387                DEBUG(3, "blkmtd: write: writing out partial end\n");
 388                *retlen += end_len;
 389                err = 0;
 390                iobuf->blocks[iobuf->nr_pages] = pagenr;
 391                iobuf->maplist[iobuf->nr_pages] = page;
 392                iobuf->nr_pages++;
 393        }
 394
 395        DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
 396
 397        if(sync) {
 398write_err:
 399                commit_pages(dev);
 400        }
 401
 402readin_err:
 403        up(&dev->wrbuf_mutex);
 404        return err;
 405}
 406
 407
 408/* erase a specified part of the device */
 409static int blkmtd_erase(struct mtd_info *mtd, struct erase_info *instr)
 410{
 411        struct blkmtd_dev *dev = mtd->priv;
 412        struct mtd_erase_region_info *einfo = mtd->eraseregions;
 413        int numregions = mtd->numeraseregions;
 414        size_t from;
 415        u_long len;
 416        int err = -EIO;
 417        int retlen;
 418
 419        /* check readonly */
 420        if(!dev->wr_buf) {
 421                err("error: mtd%d trying to erase readonly device %s",
 422                    mtd->index, mtd->name);
 423                instr->state = MTD_ERASE_FAILED;
 424                goto erase_callback;
 425        }
 426
 427        instr->state = MTD_ERASING;
 428        from = instr->addr;
 429        len = instr->len;
 430
 431        /* check erase region has valid start and length */
 432        DEBUG(2, "blkmtd: erase: dev = `%s' from = 0x%x len = 0x%lx\n",
 433              bdevname(dev->binding->bd_dev), from, len);
 434        while(numregions) {
 435                DEBUG(3, "blkmtd: checking erase region = 0x%08X size = 0x%X num = 0x%x\n",
 436                      einfo->offset, einfo->erasesize, einfo->numblocks);
 437                if(from >= einfo->offset
 438                   && from < einfo->offset + (einfo->erasesize * einfo->numblocks)) {
 439                        if(len == einfo->erasesize
 440                           && ( (from - einfo->offset) % einfo->erasesize == 0))
 441                                break;
 442                }
 443                numregions--;
 444                einfo++;
 445        }
 446
 447        if(!numregions) {
 448                /* Not a valid erase block */
 449                err("erase: invalid erase request 0x%lX @ 0x%08X", len, from);
 450                instr->state = MTD_ERASE_FAILED;
 451                err = -EIO;
 452        }
 453
 454        if(instr->state != MTD_ERASE_FAILED) {
 455                /* do the erase */
 456                DEBUG(3, "Doing erase from = %d len = %ld\n", from, len);
 457                err = write_pages(dev, NULL, from, len, &retlen);
 458                if(err < 0) {
 459                        err("erase failed err = %d", err);
 460                        instr->state = MTD_ERASE_FAILED;
 461                } else {
 462                        instr->state = MTD_ERASE_DONE;
 463                        err = 0;
 464                }
 465        }
 466
 467        DEBUG(3, "blkmtd: erase: checking callback\n");
 468 erase_callback:
 469        if (instr->callback) {
 470                (*(instr->callback))(instr);
 471        }
 472        DEBUG(2, "blkmtd: erase: finished (err = %d)\n", err);
 473        return err;
 474}
 475
 476
 477/* read a range of the data via the page cache */
 478static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
 479                       size_t *retlen, u_char *buf)
 480{
 481        struct blkmtd_dev *dev = mtd->priv;
 482        int err = 0;
 483        int offset;
 484        int pagenr, pages;
 485        struct page **pagelst;
 486        int *pagenrs;
 487        int i;
 488
 489        *retlen = 0;
 490
 491        DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
 492              bdevname(dev->binding->bd_dev), (long int)from, len, buf);
 493
 494        pagenr = from >> PAGE_SHIFT;
 495        offset = from - (pagenr << PAGE_SHIFT);
 496
 497        pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
 498        DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",
 499              pagenr, offset, pages);
 500
 501        pagelst = kmalloc(sizeof(struct page *) * pages, GFP_KERNEL);
 502        if(!pagelst)
 503                return -ENOMEM;
 504        pagenrs = kmalloc(sizeof(int) * pages, GFP_KERNEL);
 505        if(!pagenrs) {
 506                kfree(pagelst);
 507                return -ENOMEM;
 508        }
 509        for(i = 0; i < pages; i++)
 510                pagenrs[i] = pagenr+i;
 511
 512        err = read_pages(dev, pagenrs, pagelst, pages);
 513        if(err)
 514                goto readerr;
 515
 516        pagenr = 0;
 517        while(pages) {
 518                struct page *page;
 519                int cpylen;
 520
 521                DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
 522                page = pagelst[pagenr];
 523
 524                cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
 525                if(offset+cpylen > PAGE_SIZE)
 526                        cpylen = PAGE_SIZE-offset;
 527
 528                memcpy(buf + *retlen, page_address(page) + offset, cpylen);
 529                offset = 0;
 530                len -= cpylen;
 531                *retlen += cpylen;
 532                pagenr++;
 533                pages--;
 534                unlock_page(page);
 535                if(!PageDirty(page))
 536                        page_cache_release(page);
 537        }
 538
 539 readerr:
 540        kfree(pagelst);
 541        kfree(pagenrs);
 542        DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err);
 543        return err;
 544}
 545
 546
 547/* write data to the underlying device */
 548static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
 549                        size_t *retlen, const u_char *buf)
 550{
 551        struct blkmtd_dev *dev = mtd->priv;
 552        int err;
 553
 554        *retlen = 0;
 555        if(!len)
 556                return 0;
 557
 558        DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
 559              bdevname(dev->binding->bd_dev), (long int)to, len, buf);
 560
 561        /* handle readonly and out of range numbers */
 562
 563        if(!dev->wr_buf) {
 564                err("error: trying to write to a readonly device %s", mtd->name);
 565                return -EROFS;
 566        }
 567
 568        if(to >= mtd->size) {
 569                return -ENOSPC;
 570        }
 571
 572        if(to + len > mtd->size) {
 573                len = (mtd->size - to);
 574        }
 575
 576        err = write_pages(dev, buf, to, len, retlen);
 577        if(err < 0)
 578                *retlen = 0;
 579        else
 580                err = 0;
 581        DEBUG(2, "blkmtd: write: end, err = %d\n", err);
 582        return err;
 583}
 584
 585
 586/* sync the device - wait until the write queue is empty */
 587static void blkmtd_sync(struct mtd_info *mtd)
 588{
 589        struct blkmtd_dev *dev = mtd->priv;
 590        struct kiobuf *iobuf = dev->wr_buf;
 591
 592        DEBUG(2, "blkmtd: sync: called\n");
 593        if(iobuf == NULL)
 594                return;
 595
 596        DEBUG(3, "blkmtd: kiovec: length = %d nr_pages = %d\n",
 597              iobuf->length, iobuf->nr_pages);
 598        down(&dev->wrbuf_mutex);
 599        if(iobuf->nr_pages)
 600                commit_pages(dev);
 601        up(&dev->wrbuf_mutex);
 602}
 603
 604
 605#ifdef BLKMTD_PROC_DEBUG
 606/* procfs stuff */
 607static int blkmtd_proc_read(char *page, char **start, off_t off,
 608                            int count, int *eof, void *data)
 609{
 610        int len;
 611        struct list_head *temp1, *temp2;
 612
 613        MOD_INC_USE_COUNT;
 614
 615        /* Count the size of the page lists */
 616
 617        len = sprintf(page, "dev\twr_idx\tmax_idx\tnrpages\tclean\tdirty\tlocked\tlru\n");
 618        list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
 619                struct blkmtd_dev *dev = list_entry(temp1,  struct blkmtd_dev,
 620                                                    list);
 621                struct list_head *temp;
 622                struct page *pagei;
 623
 624                int clean = 0, dirty = 0, locked = 0, lru = 0;
 625                /* Count the size of the page lists */
 626                list_for_each(temp, &dev->binding->bd_inode->i_mapping->clean_pages) {
 627                        pagei = list_entry(temp, struct page, list);
 628                        clean++;
 629                        if(PageLocked(pagei))
 630                                locked++;
 631                        if(PageDirty(pagei))
 632                                dirty++;
 633                        if(PageLRU(pagei))
 634                                lru++;
 635                }
 636                list_for_each(temp, &dev->binding->bd_inode->i_mapping->dirty_pages) {
 637                        pagei = list_entry(temp, struct page, list);
 638                        if(PageLocked(pagei))
 639                                locked++;
 640                        if(PageDirty(pagei))
 641                                dirty++;
 642                        if(PageLRU(pagei))
 643                                lru++;
 644                }
 645                list_for_each(temp, &dev->binding->bd_inode->i_mapping->locked_pages) {
 646                        pagei = list_entry(temp, struct page, list);
 647                        if(PageLocked(pagei))
 648                                locked++;
 649                        if(PageDirty(pagei))
 650                                dirty++;
 651                        if(PageLRU(pagei))
 652                                lru++;
 653                }
 654
 655                len += sprintf(page+len, "mtd%d:\t%ld\t%d\t%ld\t%d\t%d\t%d\t%d\n",
 656                               dev->mtd_info.index,
 657                               (dev->wr_buf && dev->wr_buf->nr_pages) ?
 658                               dev->wr_buf->blocks[dev->wr_buf->nr_pages-1] : 0,
 659                               (dev->wr_buf) ? dev->wr_buf->nr_pages : 0,
 660                               dev->binding->bd_inode->i_mapping->nrpages,
 661                               clean, dirty, locked, lru);
 662        }
 663
 664        if(len <= count)
 665                *eof = 1;
 666
 667        MOD_DEC_USE_COUNT;
 668        return len;
 669}
 670#endif
 671
 672
 673static void free_device(struct blkmtd_dev *dev)
 674{
 675        DEBUG(2, "blkmtd: free_device() dev = %p\n", dev);
 676        if(dev) {
 677                del_mtd_device(&dev->mtd_info);
 678                info("mtd%d: [%s] removed", dev->mtd_info.index,
 679                     dev->mtd_info.name + strlen("blkmtd: "));
 680                if(dev->mtd_info.eraseregions)
 681                        kfree(dev->mtd_info.eraseregions);
 682                if(dev->mtd_info.name)
 683                        kfree(dev->mtd_info.name);
 684
 685                if(dev->rd_buf) {
 686                        dev->rd_buf->locked = 0;
 687#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
 688                        if(dev->rd_buf->blocks)
 689                                kfree(dev->rd_buf->blocks);
 690#endif
 691                        free_kiovec(1, &dev->rd_buf);
 692                }
 693                if(dev->wr_buf) {
 694                        dev->wr_buf->locked = 0;
 695#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)                  
 696                        if(dev->wr_buf->blocks)
 697                                kfree(dev->rw_buf->blocks);
 698#endif
 699                        free_kiovec(1, &dev->wr_buf);
 700                }
 701
 702                if(dev->binding) {
 703                        kdev_t kdev = to_kdev_t(dev->binding->bd_dev);
 704                        invalidate_inode_pages(dev->binding->bd_inode);
 705                        set_blocksize(kdev, 1 << 10);
 706                        blkdev_put(dev->binding, BDEV_RAW);
 707                }
 708                kfree(dev);
 709        }
 710}
 711
 712
 713/* For a given size and initial erase size, calculate the number
 714 * and size of each erase region. Goes round the loop twice,
 715 * once to find out how many regions, then allocates space,
 716 * then round the loop again to fill it in.
 717 */
 718static struct mtd_erase_region_info *calc_erase_regions(
 719        size_t erase_size, size_t total_size, int *regions)
 720{
 721        struct mtd_erase_region_info *info = NULL;
 722
 723        DEBUG(2, "calc_erase_regions, es = %d size = %d regions = %d\n",
 724              erase_size, total_size, *regions);
 725        /* Make any user specified erasesize be a power of 2
 726           and at least PAGE_SIZE */
 727        if(erase_size) {
 728                int es = erase_size;
 729                erase_size = 1;
 730                while(es != 1) {
 731                        es >>= 1;
 732                        erase_size <<= 1;
 733                }
 734                if(erase_size < PAGE_SIZE)
 735                        erase_size = PAGE_SIZE;
 736        } else {
 737                erase_size = CONFIG_MTD_BLKDEV_ERASESIZE;
 738        }
 739
 740        *regions = 0;
 741
 742        do {
 743                int tot_size = total_size;
 744                int er_size = erase_size;
 745                int count = 0, offset = 0, regcnt = 0;
 746
 747                while(tot_size) {
 748                        count = tot_size / er_size;
 749                        if(count) {
 750                                tot_size = tot_size % er_size;
 751                                if(info) {
 752                                        DEBUG(2, "adding to erase info off=%d er=%d cnt=%d\n",
 753                                              offset, er_size, count);
 754                                        (info+regcnt)->offset = offset;
 755                                        (info+regcnt)->erasesize = er_size;
 756                                        (info+regcnt)->numblocks = count;
 757                                        (*regions)++;
 758                                }
 759                                regcnt++;
 760                                offset += (count * er_size);
 761                        }
 762                        while(er_size > tot_size)
 763                                er_size >>= 1;
 764                }
 765                if(info == NULL) {
 766                        info = kmalloc(regcnt * sizeof(struct mtd_erase_region_info), GFP_KERNEL);
 767                        if(!info)
 768                                break;
 769                }
 770        } while(!(*regions));
 771        DEBUG(2, "calc_erase_regions done, es = %d size = %d regions = %d\n",
 772              erase_size, total_size, *regions);
 773        return info;
 774}
 775
 776
 777extern kdev_t name_to_kdev_t(char *line) __init;
 778
 779
 780static struct blkmtd_dev *add_device(char *devname, int readonly, int erase_size)
 781{
 782        int maj, min;
 783        kdev_t kdev;
 784        int mode;
 785        struct blkmtd_dev *dev;
 786
 787#ifdef MODULE
 788        struct file *file = NULL;
 789        struct inode *inode;
 790#endif
 791
 792        if(!devname)
 793                return NULL;
 794
 795        /* Get a handle on the device */
 796        mode = (readonly) ? O_RDONLY : O_RDWR;
 797
 798#ifdef MODULE
 799
 800        file = filp_open(devname, mode, 0);
 801        if(IS_ERR(file)) {
 802                err("error: cant open device %s", devname);
 803                DEBUG(2, "blkmtd: filp_open returned %ld\n", PTR_ERR(file));
 804                return NULL;
 805        }
 806
 807        /* determine is this is a block device and
 808         * if so get its major and minor numbers
 809         */
 810        inode = file->f_dentry->d_inode;
 811        if(!S_ISBLK(inode->i_mode)) {
 812                err("%s not a block device", devname);
 813                filp_close(file, NULL);
 814                return NULL;
 815        }
 816        kdev = inode->i_rdev;
 817        filp_close(file, NULL);
 818#else
 819        kdev = name_to_kdev_t(devname);
 820#endif  /* MODULE */
 821
 822        if(!kdev) {
 823                err("bad block device: `%s'", devname);
 824                return NULL;
 825        }
 826
 827        maj = MAJOR(kdev);
 828        min = MINOR(kdev);
 829        DEBUG(1, "blkmtd: found a block device major = %d, minor = %d\n",
 830              maj, min);
 831
 832        if(maj == MTD_BLOCK_MAJOR) {
 833                err("attempting to use an MTD device as a block device");
 834                return NULL;
 835        }
 836
 837        DEBUG(1, "blkmtd: devname = %s\n", bdevname(kdev));
 838
 839        dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL);
 840        if(dev == NULL)
 841                return NULL;
 842
 843        memset(dev, 0, sizeof(struct blkmtd_dev));
 844        if(alloc_kiovec(1, &dev->rd_buf)) {
 845                err("cant allocate read iobuf");
 846                goto devinit_err;
 847        }
 848#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
 849        dev->rd_buf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
 850        if(dev->rd_buf->blocks == NULL) {
 851                crit("cant allocate rd_buf blocks");
 852                goto devinit_err;
 853        }
 854#endif
 855        
 856        if(!readonly) {
 857                if(alloc_kiovec(1, &dev->wr_buf)) {
 858                        err("cant allocate kiobuf - readonly enabled");
 859
 860#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4)
 861                } else {
 862                        dev->wr_buf->blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL);
 863                        if(dev->wr_buf->blocks == NULL) {
 864                                crit("cant allocate wr_buf blocks - readonly enabled");
 865                                free_kiovec(1, &iobuf);
 866                        }
 867#endif
 868                }
 869                if(dev->wr_buf)
 870                        init_MUTEX(&dev->wrbuf_mutex);
 871        }
 872
 873        /* get the block device */
 874        dev->binding = bdget(kdev_t_to_nr(MKDEV(maj, min)));
 875        if(blkdev_get(dev->binding, mode, 0, BDEV_RAW))
 876                goto devinit_err;
 877
 878        if(set_blocksize(kdev, PAGE_SIZE)) {
 879                err("cant set block size to PAGE_SIZE on %s", bdevname(kdev));
 880                goto devinit_err;
 881        }
 882
 883        dev->mtd_info.size = dev->binding->bd_inode->i_size & PAGE_MASK;
 884
 885        /* Setup the MTD structure */
 886        /* make the name contain the block device in */
 887        dev->mtd_info.name = kmalloc(sizeof("blkmtd: ") + strlen(devname), GFP_KERNEL);
 888        if(dev->mtd_info.name == NULL)
 889                goto devinit_err;
 890
 891        sprintf(dev->mtd_info.name, "blkmtd: %s", devname);
 892        dev->mtd_info.eraseregions = calc_erase_regions(erase_size, dev->mtd_info.size,
 893                                                        &dev->mtd_info.numeraseregions);
 894        if(dev->mtd_info.eraseregions == NULL)
 895                goto devinit_err;
 896
 897        dev->mtd_info.erasesize = dev->mtd_info.eraseregions->erasesize;
 898        DEBUG(1, "blkmtd: init: found %d erase regions\n",
 899              dev->mtd_info.numeraseregions);
 900
 901        if(readonly) {
 902                dev->mtd_info.type = MTD_ROM;
 903                dev->mtd_info.flags = MTD_CAP_ROM;
 904        } else {
 905                dev->mtd_info.type = MTD_RAM;
 906                dev->mtd_info.flags = MTD_CAP_RAM;
 907        }
 908        dev->mtd_info.erase = blkmtd_erase;
 909        dev->mtd_info.read = blkmtd_read;
 910        dev->mtd_info.write = blkmtd_write;
 911        dev->mtd_info.sync = blkmtd_sync;
 912        dev->mtd_info.point = 0;
 913        dev->mtd_info.unpoint = 0;
 914        dev->mtd_info.priv = dev;
 915        dev->mtd_info.module = THIS_MODULE;
 916
 917        list_add(&dev->list, &blkmtd_device_list);
 918        if (add_mtd_device(&dev->mtd_info)) {
 919                /* Device didnt get added, so free the entry */
 920                list_del(&dev->list);
 921                free_device(dev);
 922                return NULL;
 923        } else {
 924                info("mtd%d: [%s] erase_size = %dKiB %s",
 925                     dev->mtd_info.index, dev->mtd_info.name + strlen("blkmtd: "),
 926                     dev->mtd_info.erasesize >> 10,
 927                     (dev->wr_buf) ? "" : "(read-only)");
 928        }
 929        
 930        return dev;
 931
 932 devinit_err:
 933        free_device(dev);
 934        return NULL;
 935}
 936
 937
 938/* Cleanup and exit - sync the device and kill of the kernel thread */
 939static void __devexit cleanup_blkmtd(void)
 940{
 941        struct list_head *temp1, *temp2;
 942#ifdef BLKMTD_PROC_DEBUG
 943        if(blkmtd_proc) {
 944                remove_proc_entry("blkmtd_debug", NULL);
 945        }
 946#endif
 947
 948        /* Remove the MTD devices */
 949        list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
 950                struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
 951                                                    list);
 952                blkmtd_sync(&dev->mtd_info);
 953                free_device(dev);
 954        }
 955}
 956
 957#ifndef MODULE
 958
 959/* Handle kernel boot params */
 960
 961
 962static int __init param_blkmtd_device(char *str)
 963{
 964        int i;
 965
 966        for(i = 0; i < MAX_DEVICES; i++) {
 967                device[i] = str;
 968                DEBUG(2, "blkmtd: device setup: %d = %s\n", i, device[i]);
 969                strsep(&str, ",");
 970        }
 971        return 1;
 972}
 973
 974
 975static int __init param_blkmtd_erasesz(char *str)
 976{
 977        int i;
 978        for(i = 0; i < MAX_DEVICES; i++) {
 979                char *val = strsep(&str, ",");
 980                if(val)
 981                        erasesz[i] = simple_strtoul(val, NULL, 0);
 982                DEBUG(2, "blkmtd: erasesz setup: %d = %d\n", i, erasesz[i]);
 983        }
 984
 985        return 1;
 986}
 987
 988
 989static int __init param_blkmtd_ro(char *str)
 990{
 991        int i;
 992        for(i = 0; i < MAX_DEVICES; i++) {
 993                char *val = strsep(&str, ",");
 994                if(val)
 995                        ro[i] = simple_strtoul(val, NULL, 0);
 996                DEBUG(2, "blkmtd: ro setup: %d = %d\n", i, ro[i]);
 997        }
 998
 999        return 1;
1000}
1001
1002
1003static int __init param_blkmtd_sync(char *str)
1004{
1005        if(str[0] == '1')
1006                sync = 1;
1007        return 1;
1008}
1009
1010__setup("blkmtd_device=", param_blkmtd_device);
1011__setup("blkmtd_erasesz=", param_blkmtd_erasesz);
1012__setup("blkmtd_ro=", param_blkmtd_ro);
1013__setup("blkmtd_sync=", param_blkmtd_sync);
1014
1015#endif
1016
1017
1018/* Startup */
1019static int __init init_blkmtd(void)
1020{
1021        int i;
1022
1023        /* Check args - device[0] is the bare minimum*/
1024        if(!device[0]) {
1025                err("error: missing `device' name\n");
1026                return -EINVAL;
1027        }
1028
1029        for(i = 0; i < MAX_DEVICES; i++)
1030                add_device(device[i], ro[i], erasesz[i] << 10);
1031
1032        if(list_empty(&blkmtd_device_list))
1033                goto init_err;
1034
1035        info("version " VERSION);
1036
1037#ifdef BLKMTD_PROC_DEBUG
1038        /* create proc entry */
1039        DEBUG(2, "Creating /proc/blkmtd_debug\n");
1040        blkmtd_proc = create_proc_read_entry("blkmtd_debug", 0444,
1041                                             NULL, blkmtd_proc_read, NULL);
1042        if(blkmtd_proc == NULL) {
1043                err("Cant create /proc/blkmtd_debug");
1044        } else {
1045                blkmtd_proc->owner = THIS_MODULE;
1046        }
1047#endif
1048
1049        if(!list_empty(&blkmtd_device_list))
1050                /* Everything is ok if we got here */
1051                return 0;
1052
1053 init_err:
1054        return -EINVAL;
1055}
1056
1057module_init(init_blkmtd);
1058module_exit(cleanup_blkmtd);
1059