linux/drivers/mtd/mtd_blkdevs.c
<<
>>
Prefs
   1/*
   2 * Interface to Linux block layer for MTD 'translation layers'.
   3 *
   4 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  19 *
  20 */
  21
  22#include <linux/kernel.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/list.h>
  26#include <linux/fs.h>
  27#include <linux/mtd/blktrans.h>
  28#include <linux/mtd/mtd.h>
  29#include <linux/blkdev.h>
  30#include <linux/blkpg.h>
  31#include <linux/spinlock.h>
  32#include <linux/hdreg.h>
  33#include <linux/init.h>
  34#include <linux/mutex.h>
  35#include <linux/kthread.h>
  36#include <asm/uaccess.h>
  37
  38#include "mtdcore.h"
  39
  40static LIST_HEAD(blktrans_majors);
  41static DEFINE_MUTEX(blktrans_ref_mutex);
  42
  43static void blktrans_dev_release(struct kref *kref)
  44{
  45        struct mtd_blktrans_dev *dev =
  46                container_of(kref, struct mtd_blktrans_dev, ref);
  47
  48        dev->disk->private_data = NULL;
  49        blk_cleanup_queue(dev->rq);
  50        put_disk(dev->disk);
  51        list_del(&dev->list);
  52        kfree(dev);
  53}
  54
  55static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
  56{
  57        struct mtd_blktrans_dev *dev;
  58
  59        mutex_lock(&blktrans_ref_mutex);
  60        dev = disk->private_data;
  61
  62        if (!dev)
  63                goto unlock;
  64        kref_get(&dev->ref);
  65unlock:
  66        mutex_unlock(&blktrans_ref_mutex);
  67        return dev;
  68}
  69
  70static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
  71{
  72        mutex_lock(&blktrans_ref_mutex);
  73        kref_put(&dev->ref, blktrans_dev_release);
  74        mutex_unlock(&blktrans_ref_mutex);
  75}
  76
  77
  78static int do_blktrans_request(struct mtd_blktrans_ops *tr,
  79                               struct mtd_blktrans_dev *dev,
  80                               struct request *req)
  81{
  82        unsigned long block, nsect;
  83        char *buf;
  84
  85        block = blk_rq_pos(req) << 9 >> tr->blkshift;
  86        nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
  87
  88        buf = req->buffer;
  89
  90        if (req->cmd_type != REQ_TYPE_FS)
  91                return -EIO;
  92
  93        if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
  94            get_capacity(req->rq_disk))
  95                return -EIO;
  96
  97        if (req->cmd_flags & REQ_DISCARD)
  98                return tr->discard(dev, block, nsect);
  99
 100        switch(rq_data_dir(req)) {
 101        case READ:
 102                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 103                        if (tr->readsect(dev, block, buf))
 104                                return -EIO;
 105                rq_flush_dcache_pages(req);
 106                return 0;
 107        case WRITE:
 108                if (!tr->writesect)
 109                        return -EIO;
 110
 111                rq_flush_dcache_pages(req);
 112                for (; nsect > 0; nsect--, block++, buf += tr->blksize)
 113                        if (tr->writesect(dev, block, buf))
 114                                return -EIO;
 115                return 0;
 116        default:
 117                printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
 118                return -EIO;
 119        }
 120}
 121
 122int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
 123{
 124        if (kthread_should_stop())
 125                return 1;
 126
 127        return dev->bg_stop;
 128}
 129EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
 130
 131static int mtd_blktrans_thread(void *arg)
 132{
 133        struct mtd_blktrans_dev *dev = arg;
 134        struct mtd_blktrans_ops *tr = dev->tr;
 135        struct request_queue *rq = dev->rq;
 136        struct request *req = NULL;
 137        int background_done = 0;
 138
 139        spin_lock_irq(rq->queue_lock);
 140
 141        while (!kthread_should_stop()) {
 142                int res;
 143
 144                dev->bg_stop = false;
 145                if (!req && !(req = blk_fetch_request(rq))) {
 146                        if (tr->background && !background_done) {
 147                                spin_unlock_irq(rq->queue_lock);
 148                                mutex_lock(&dev->lock);
 149                                tr->background(dev);
 150                                mutex_unlock(&dev->lock);
 151                                spin_lock_irq(rq->queue_lock);
 152                                /*
 153                                 * Do background processing just once per idle
 154                                 * period.
 155                                 */
 156                                background_done = !dev->bg_stop;
 157                                continue;
 158                        }
 159                        set_current_state(TASK_INTERRUPTIBLE);
 160
 161                        if (kthread_should_stop())
 162                                set_current_state(TASK_RUNNING);
 163
 164                        spin_unlock_irq(rq->queue_lock);
 165                        schedule();
 166                        spin_lock_irq(rq->queue_lock);
 167                        continue;
 168                }
 169
 170                spin_unlock_irq(rq->queue_lock);
 171
 172                mutex_lock(&dev->lock);
 173                res = do_blktrans_request(dev->tr, dev, req);
 174                mutex_unlock(&dev->lock);
 175
 176                spin_lock_irq(rq->queue_lock);
 177
 178                if (!__blk_end_request_cur(req, res))
 179                        req = NULL;
 180
 181                background_done = 0;
 182        }
 183
 184        if (req)
 185                __blk_end_request_all(req, -EIO);
 186
 187        spin_unlock_irq(rq->queue_lock);
 188
 189        return 0;
 190}
 191
 192static void mtd_blktrans_request(struct request_queue *rq)
 193{
 194        struct mtd_blktrans_dev *dev;
 195        struct request *req = NULL;
 196
 197        dev = rq->queuedata;
 198
 199        if (!dev)
 200                while ((req = blk_fetch_request(rq)) != NULL)
 201                        __blk_end_request_all(req, -ENODEV);
 202        else {
 203                dev->bg_stop = true;
 204                wake_up_process(dev->thread);
 205        }
 206}
 207
 208static int blktrans_open(struct block_device *bdev, fmode_t mode)
 209{
 210        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 211        int ret = 0;
 212
 213        if (!dev)
 214                return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
 215
 216        mutex_lock(&dev->lock);
 217
 218        if (dev->open)
 219                goto unlock;
 220
 221        kref_get(&dev->ref);
 222        __module_get(dev->tr->owner);
 223
 224        if (!dev->mtd)
 225                goto unlock;
 226
 227        if (dev->tr->open) {
 228                ret = dev->tr->open(dev);
 229                if (ret)
 230                        goto error_put;
 231        }
 232
 233        ret = __get_mtd_device(dev->mtd);
 234        if (ret)
 235                goto error_release;
 236        dev->file_mode = mode;
 237
 238unlock:
 239        dev->open++;
 240        mutex_unlock(&dev->lock);
 241        blktrans_dev_put(dev);
 242        return ret;
 243
 244error_release:
 245        if (dev->tr->release)
 246                dev->tr->release(dev);
 247error_put:
 248        module_put(dev->tr->owner);
 249        kref_put(&dev->ref, blktrans_dev_release);
 250        mutex_unlock(&dev->lock);
 251        blktrans_dev_put(dev);
 252        return ret;
 253}
 254
 255static int blktrans_release(struct gendisk *disk, fmode_t mode)
 256{
 257        struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
 258        int ret = 0;
 259
 260        if (!dev)
 261                return ret;
 262
 263        mutex_lock(&dev->lock);
 264
 265        if (--dev->open)
 266                goto unlock;
 267
 268        kref_put(&dev->ref, blktrans_dev_release);
 269        module_put(dev->tr->owner);
 270
 271        if (dev->mtd) {
 272                ret = dev->tr->release ? dev->tr->release(dev) : 0;
 273                __put_mtd_device(dev->mtd);
 274        }
 275unlock:
 276        mutex_unlock(&dev->lock);
 277        blktrans_dev_put(dev);
 278        return ret;
 279}
 280
 281static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 282{
 283        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 284        int ret = -ENXIO;
 285
 286        if (!dev)
 287                return ret;
 288
 289        mutex_lock(&dev->lock);
 290
 291        if (!dev->mtd)
 292                goto unlock;
 293
 294        ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
 295unlock:
 296        mutex_unlock(&dev->lock);
 297        blktrans_dev_put(dev);
 298        return ret;
 299}
 300
 301static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
 302                              unsigned int cmd, unsigned long arg)
 303{
 304        struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
 305        int ret = -ENXIO;
 306
 307        if (!dev)
 308                return ret;
 309
 310        mutex_lock(&dev->lock);
 311
 312        if (!dev->mtd)
 313                goto unlock;
 314
 315        switch (cmd) {
 316        case BLKFLSBUF:
 317                ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
 318                break;
 319        default:
 320                ret = -ENOTTY;
 321        }
 322unlock:
 323        mutex_unlock(&dev->lock);
 324        blktrans_dev_put(dev);
 325        return ret;
 326}
 327
 328static const struct block_device_operations mtd_blktrans_ops = {
 329        .owner          = THIS_MODULE,
 330        .open           = blktrans_open,
 331        .release        = blktrans_release,
 332        .ioctl          = blktrans_ioctl,
 333        .getgeo         = blktrans_getgeo,
 334};
 335
 336int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
 337{
 338        struct mtd_blktrans_ops *tr = new->tr;
 339        struct mtd_blktrans_dev *d;
 340        int last_devnum = -1;
 341        struct gendisk *gd;
 342        int ret;
 343
 344        if (mutex_trylock(&mtd_table_mutex)) {
 345                mutex_unlock(&mtd_table_mutex);
 346                BUG();
 347        }
 348
 349        mutex_lock(&blktrans_ref_mutex);
 350        list_for_each_entry(d, &tr->devs, list) {
 351                if (new->devnum == -1) {
 352                        /* Use first free number */
 353                        if (d->devnum != last_devnum+1) {
 354                                /* Found a free devnum. Plug it in here */
 355                                new->devnum = last_devnum+1;
 356                                list_add_tail(&new->list, &d->list);
 357                                goto added;
 358                        }
 359                } else if (d->devnum == new->devnum) {
 360                        /* Required number taken */
 361                        mutex_unlock(&blktrans_ref_mutex);
 362                        return -EBUSY;
 363                } else if (d->devnum > new->devnum) {
 364                        /* Required number was free */
 365                        list_add_tail(&new->list, &d->list);
 366                        goto added;
 367                }
 368                last_devnum = d->devnum;
 369        }
 370
 371        ret = -EBUSY;
 372        if (new->devnum == -1)
 373                new->devnum = last_devnum+1;
 374
 375        /* Check that the device and any partitions will get valid
 376         * minor numbers and that the disk naming code below can cope
 377         * with this number. */
 378        if (new->devnum > (MINORMASK >> tr->part_bits) ||
 379            (tr->part_bits && new->devnum >= 27 * 26)) {
 380                mutex_unlock(&blktrans_ref_mutex);
 381                goto error1;
 382        }
 383
 384        list_add_tail(&new->list, &tr->devs);
 385 added:
 386        mutex_unlock(&blktrans_ref_mutex);
 387
 388        mutex_init(&new->lock);
 389        kref_init(&new->ref);
 390        if (!tr->writesect)
 391                new->readonly = 1;
 392
 393        /* Create gendisk */
 394        ret = -ENOMEM;
 395        gd = alloc_disk(1 << tr->part_bits);
 396
 397        if (!gd)
 398                goto error2;
 399
 400        new->disk = gd;
 401        gd->private_data = new;
 402        gd->major = tr->major;
 403        gd->first_minor = (new->devnum) << tr->part_bits;
 404        gd->fops = &mtd_blktrans_ops;
 405
 406        if (tr->part_bits)
 407                if (new->devnum < 26)
 408                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 409                                 "%s%c", tr->name, 'a' + new->devnum);
 410                else
 411                        snprintf(gd->disk_name, sizeof(gd->disk_name),
 412                                 "%s%c%c", tr->name,
 413                                 'a' - 1 + new->devnum / 26,
 414                                 'a' + new->devnum % 26);
 415        else
 416                snprintf(gd->disk_name, sizeof(gd->disk_name),
 417                         "%s%d", tr->name, new->devnum);
 418
 419        set_capacity(gd, (new->size * tr->blksize) >> 9);
 420
 421        /* Create the request queue */
 422        spin_lock_init(&new->queue_lock);
 423        new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
 424
 425        if (!new->rq)
 426                goto error3;
 427
 428        new->rq->queuedata = new;
 429        blk_queue_logical_block_size(new->rq, tr->blksize);
 430
 431        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
 432
 433        if (tr->discard) {
 434                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
 435                new->rq->limits.max_discard_sectors = UINT_MAX;
 436        }
 437
 438        gd->queue = new->rq;
 439
 440        /* Create processing thread */
 441        /* TODO: workqueue ? */
 442        new->thread = kthread_run(mtd_blktrans_thread, new,
 443                        "%s%d", tr->name, new->mtd->index);
 444        if (IS_ERR(new->thread)) {
 445                ret = PTR_ERR(new->thread);
 446                goto error4;
 447        }
 448        gd->driverfs_dev = &new->mtd->dev;
 449
 450        if (new->readonly)
 451                set_disk_ro(gd, 1);
 452
 453        add_disk(gd);
 454
 455        if (new->disk_attributes) {
 456                ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
 457                                        new->disk_attributes);
 458                WARN_ON(ret);
 459        }
 460        return 0;
 461error4:
 462        blk_cleanup_queue(new->rq);
 463error3:
 464        put_disk(new->disk);
 465error2:
 466        list_del(&new->list);
 467error1:
 468        return ret;
 469}
 470
 471int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 472{
 473        unsigned long flags;
 474
 475        if (mutex_trylock(&mtd_table_mutex)) {
 476                mutex_unlock(&mtd_table_mutex);
 477                BUG();
 478        }
 479
 480        if (old->disk_attributes)
 481                sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
 482                                                old->disk_attributes);
 483
 484        /* Stop new requests to arrive */
 485        del_gendisk(old->disk);
 486
 487
 488        /* Stop the thread */
 489        kthread_stop(old->thread);
 490
 491        /* Kill current requests */
 492        spin_lock_irqsave(&old->queue_lock, flags);
 493        old->rq->queuedata = NULL;
 494        blk_start_queue(old->rq);
 495        spin_unlock_irqrestore(&old->queue_lock, flags);
 496
 497        /* If the device is currently open, tell trans driver to close it,
 498                then put mtd device, and don't touch it again */
 499        mutex_lock(&old->lock);
 500        if (old->open) {
 501                if (old->tr->release)
 502                        old->tr->release(old);
 503                __put_mtd_device(old->mtd);
 504        }
 505
 506        old->mtd = NULL;
 507
 508        mutex_unlock(&old->lock);
 509        blktrans_dev_put(old);
 510        return 0;
 511}
 512
 513static void blktrans_notify_remove(struct mtd_info *mtd)
 514{
 515        struct mtd_blktrans_ops *tr;
 516        struct mtd_blktrans_dev *dev, *next;
 517
 518        list_for_each_entry(tr, &blktrans_majors, list)
 519                list_for_each_entry_safe(dev, next, &tr->devs, list)
 520                        if (dev->mtd == mtd)
 521                                tr->remove_dev(dev);
 522}
 523
 524static void blktrans_notify_add(struct mtd_info *mtd)
 525{
 526        struct mtd_blktrans_ops *tr;
 527
 528        if (mtd->type == MTD_ABSENT)
 529                return;
 530
 531        list_for_each_entry(tr, &blktrans_majors, list)
 532                tr->add_mtd(tr, mtd);
 533}
 534
 535static struct mtd_notifier blktrans_notifier = {
 536        .add = blktrans_notify_add,
 537        .remove = blktrans_notify_remove,
 538};
 539
 540int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
 541{
 542        struct mtd_info *mtd;
 543        int ret;
 544
 545        /* Register the notifier if/when the first device type is
 546           registered, to prevent the link/init ordering from fucking
 547           us over. */
 548        if (!blktrans_notifier.list.next)
 549                register_mtd_user(&blktrans_notifier);
 550
 551
 552        mutex_lock(&mtd_table_mutex);
 553
 554        ret = register_blkdev(tr->major, tr->name);
 555        if (ret < 0) {
 556                printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 557                       tr->name, tr->major, ret);
 558                mutex_unlock(&mtd_table_mutex);
 559                return ret;
 560        }
 561
 562        if (ret)
 563                tr->major = ret;
 564
 565        tr->blkshift = ffs(tr->blksize) - 1;
 566
 567        INIT_LIST_HEAD(&tr->devs);
 568        list_add(&tr->list, &blktrans_majors);
 569
 570        mtd_for_each_device(mtd)
 571                if (mtd->type != MTD_ABSENT)
 572                        tr->add_mtd(tr, mtd);
 573
 574        mutex_unlock(&mtd_table_mutex);
 575        return 0;
 576}
 577
 578int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 579{
 580        struct mtd_blktrans_dev *dev, *next;
 581
 582        mutex_lock(&mtd_table_mutex);
 583
 584        /* Remove it from the list of active majors */
 585        list_del(&tr->list);
 586
 587        list_for_each_entry_safe(dev, next, &tr->devs, list)
 588                tr->remove_dev(dev);
 589
 590        unregister_blkdev(tr->major, tr->name);
 591        mutex_unlock(&mtd_table_mutex);
 592
 593        BUG_ON(!list_empty(&tr->devs));
 594        return 0;
 595}
 596
 597static void __exit mtd_blktrans_exit(void)
 598{
 599        /* No race here -- if someone's currently in register_mtd_blktrans
 600           we're screwed anyway. */
 601        if (blktrans_notifier.list.next)
 602                unregister_mtd_user(&blktrans_notifier);
 603}
 604
 605module_exit(mtd_blktrans_exit);
 606
 607EXPORT_SYMBOL_GPL(register_mtd_blktrans);
 608EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
 609EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
 610EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
 611
 612MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 613MODULE_LICENSE("GPL");
 614MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
 615
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.