linux/drivers/mtd/mtdchar.c
<<
>>
Prefs
   1/*
   2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 *
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/fs.h>
  22#include <linux/mm.h>
  23#include <linux/err.h>
  24#include <linux/init.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/sched.h>
  29#include <linux/mutex.h>
  30#include <linux/backing-dev.h>
  31#include <linux/compat.h>
  32#include <linux/mount.h>
  33#include <linux/blkpg.h>
  34#include <linux/magic.h>
  35#include <linux/major.h>
  36#include <linux/mtd/mtd.h>
  37#include <linux/mtd/partitions.h>
  38#include <linux/mtd/map.h>
  39
  40#include <asm/uaccess.h>
  41
  42#include "mtdcore.h"
  43
  44static DEFINE_MUTEX(mtd_mutex);
  45
  46/*
  47 * Data structure to hold the pointer to the mtd device as well
  48 * as mode information of various use cases.
  49 */
  50struct mtd_file_info {
  51        struct mtd_info *mtd;
  52        struct inode *ino;
  53        enum mtd_file_modes mode;
  54};
  55
  56static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
  57{
  58        struct mtd_file_info *mfi = file->private_data;
  59        return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
  60}
  61
  62static int count;
  63static struct vfsmount *mnt;
  64static struct file_system_type mtd_inodefs_type;
  65
  66static int mtdchar_open(struct inode *inode, struct file *file)
  67{
  68        int minor = iminor(inode);
  69        int devnum = minor >> 1;
  70        int ret = 0;
  71        struct mtd_info *mtd;
  72        struct mtd_file_info *mfi;
  73        struct inode *mtd_ino;
  74
  75        pr_debug("MTD_open\n");
  76
  77        /* You can't open the RO devices RW */
  78        if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  79                return -EACCES;
  80
  81        ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
  82        if (ret)
  83                return ret;
  84
  85        mutex_lock(&mtd_mutex);
  86        mtd = get_mtd_device(NULL, devnum);
  87
  88        if (IS_ERR(mtd)) {
  89                ret = PTR_ERR(mtd);
  90                goto out;
  91        }
  92
  93        if (mtd->type == MTD_ABSENT) {
  94                ret = -ENODEV;
  95                goto out1;
  96        }
  97
  98        mtd_ino = iget_locked(mnt->mnt_sb, devnum);
  99        if (!mtd_ino) {
 100                ret = -ENOMEM;
 101                goto out1;
 102        }
 103        if (mtd_ino->i_state & I_NEW) {
 104                mtd_ino->i_private = mtd;
 105                mtd_ino->i_mode = S_IFCHR;
 106                mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
 107                unlock_new_inode(mtd_ino);
 108        }
 109        file->f_mapping = mtd_ino->i_mapping;
 110
 111        /* You can't open it RW if it's not a writeable device */
 112        if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
 113                ret = -EACCES;
 114                goto out2;
 115        }
 116
 117        mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
 118        if (!mfi) {
 119                ret = -ENOMEM;
 120                goto out2;
 121        }
 122        mfi->ino = mtd_ino;
 123        mfi->mtd = mtd;
 124        file->private_data = mfi;
 125        mutex_unlock(&mtd_mutex);
 126        return 0;
 127
 128out2:
 129        iput(mtd_ino);
 130out1:
 131        put_mtd_device(mtd);
 132out:
 133        mutex_unlock(&mtd_mutex);
 134        simple_release_fs(&mnt, &count);
 135        return ret;
 136} /* mtdchar_open */
 137
 138/*====================================================================*/
 139
 140static int mtdchar_close(struct inode *inode, struct file *file)
 141{
 142        struct mtd_file_info *mfi = file->private_data;
 143        struct mtd_info *mtd = mfi->mtd;
 144
 145        pr_debug("MTD_close\n");
 146
 147        /* Only sync if opened RW */
 148        if ((file->f_mode & FMODE_WRITE))
 149                mtd_sync(mtd);
 150
 151        iput(mfi->ino);
 152
 153        put_mtd_device(mtd);
 154        file->private_data = NULL;
 155        kfree(mfi);
 156        simple_release_fs(&mnt, &count);
 157
 158        return 0;
 159} /* mtdchar_close */
 160
 161/* Back in June 2001, dwmw2 wrote:
 162 *
 163 *   FIXME: This _really_ needs to die. In 2.5, we should lock the
 164 *   userspace buffer down and use it directly with readv/writev.
 165 *
 166 * The implementation below, using mtd_kmalloc_up_to, mitigates
 167 * allocation failures when the system is under low-memory situations
 168 * or if memory is highly fragmented at the cost of reducing the
 169 * performance of the requested transfer due to a smaller buffer size.
 170 *
 171 * A more complex but more memory-efficient implementation based on
 172 * get_user_pages and iovecs to cover extents of those pages is a
 173 * longer-term goal, as intimated by dwmw2 above. However, for the
 174 * write case, this requires yet more complex head and tail transfer
 175 * handling when those head and tail offsets and sizes are such that
 176 * alignment requirements are not met in the NAND subdriver.
 177 */
 178
 179static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
 180                        loff_t *ppos)
 181{
 182        struct mtd_file_info *mfi = file->private_data;
 183        struct mtd_info *mtd = mfi->mtd;
 184        size_t retlen;
 185        size_t total_retlen=0;
 186        int ret=0;
 187        int len;
 188        size_t size = count;
 189        char *kbuf;
 190
 191        pr_debug("MTD_read\n");
 192
 193        if (*ppos + count > mtd->size)
 194                count = mtd->size - *ppos;
 195
 196        if (!count)
 197                return 0;
 198
 199        kbuf = mtd_kmalloc_up_to(mtd, &size);
 200        if (!kbuf)
 201                return -ENOMEM;
 202
 203        while (count) {
 204                len = min_t(size_t, count, size);
 205
 206                switch (mfi->mode) {
 207                case MTD_FILE_MODE_OTP_FACTORY:
 208                        ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
 209                                                     &retlen, kbuf);
 210                        break;
 211                case MTD_FILE_MODE_OTP_USER:
 212                        ret = mtd_read_user_prot_reg(mtd, *ppos, len,
 213                                                     &retlen, kbuf);
 214                        break;
 215                case MTD_FILE_MODE_RAW:
 216                {
 217                        struct mtd_oob_ops ops;
 218
 219                        ops.mode = MTD_OPS_RAW;
 220                        ops.datbuf = kbuf;
 221                        ops.oobbuf = NULL;
 222                        ops.len = len;
 223
 224                        ret = mtd_read_oob(mtd, *ppos, &ops);
 225                        retlen = ops.retlen;
 226                        break;
 227                }
 228                default:
 229                        ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
 230                }
 231                /* Nand returns -EBADMSG on ECC errors, but it returns
 232                 * the data. For our userspace tools it is important
 233                 * to dump areas with ECC errors!
 234                 * For kernel internal usage it also might return -EUCLEAN
 235                 * to signal the caller that a bitflip has occurred and has
 236                 * been corrected by the ECC algorithm.
 237                 * Userspace software which accesses NAND this way
 238                 * must be aware of the fact that it deals with NAND
 239                 */
 240                if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
 241                        *ppos += retlen;
 242                        if (copy_to_user(buf, kbuf, retlen)) {
 243                                kfree(kbuf);
 244                                return -EFAULT;
 245                        }
 246                        else
 247                                total_retlen += retlen;
 248
 249                        count -= retlen;
 250                        buf += retlen;
 251                        if (retlen == 0)
 252                                count = 0;
 253                }
 254                else {
 255                        kfree(kbuf);
 256                        return ret;
 257                }
 258
 259        }
 260
 261        kfree(kbuf);
 262        return total_retlen;
 263} /* mtdchar_read */
 264
 265static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
 266                        loff_t *ppos)
 267{
 268        struct mtd_file_info *mfi = file->private_data;
 269        struct mtd_info *mtd = mfi->mtd;
 270        size_t size = count;
 271        char *kbuf;
 272        size_t retlen;
 273        size_t total_retlen=0;
 274        int ret=0;
 275        int len;
 276
 277        pr_debug("MTD_write\n");
 278
 279        if (*ppos == mtd->size)
 280                return -ENOSPC;
 281
 282        if (*ppos + count > mtd->size)
 283                count = mtd->size - *ppos;
 284
 285        if (!count)
 286                return 0;
 287
 288        kbuf = mtd_kmalloc_up_to(mtd, &size);
 289        if (!kbuf)
 290                return -ENOMEM;
 291
 292        while (count) {
 293                len = min_t(size_t, count, size);
 294
 295                if (copy_from_user(kbuf, buf, len)) {
 296                        kfree(kbuf);
 297                        return -EFAULT;
 298                }
 299
 300                switch (mfi->mode) {
 301                case MTD_FILE_MODE_OTP_FACTORY:
 302                        ret = -EROFS;
 303                        break;
 304                case MTD_FILE_MODE_OTP_USER:
 305                        ret = mtd_write_user_prot_reg(mtd, *ppos, len,
 306                                                      &retlen, kbuf);
 307                        break;
 308
 309                case MTD_FILE_MODE_RAW:
 310                {
 311                        struct mtd_oob_ops ops;
 312
 313                        ops.mode = MTD_OPS_RAW;
 314                        ops.datbuf = kbuf;
 315                        ops.oobbuf = NULL;
 316                        ops.ooboffs = 0;
 317                        ops.len = len;
 318
 319                        ret = mtd_write_oob(mtd, *ppos, &ops);
 320                        retlen = ops.retlen;
 321                        break;
 322                }
 323
 324                default:
 325                        ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
 326                }
 327
 328                /*
 329                 * Return -ENOSPC only if no data could be written at all.
 330                 * Otherwise just return the number of bytes that actually
 331                 * have been written.
 332                 */
 333                if ((ret == -ENOSPC) && (total_retlen))
 334                        break;
 335
 336                if (!ret) {
 337                        *ppos += retlen;
 338                        total_retlen += retlen;
 339                        count -= retlen;
 340                        buf += retlen;
 341                }
 342                else {
 343                        kfree(kbuf);
 344                        return ret;
 345                }
 346        }
 347
 348        kfree(kbuf);
 349        return total_retlen;
 350} /* mtdchar_write */
 351
 352/*======================================================================
 353
 354    IOCTL calls for getting device parameters.
 355
 356======================================================================*/
 357static void mtdchar_erase_callback (struct erase_info *instr)
 358{
 359        wake_up((wait_queue_head_t *)instr->priv);
 360}
 361
 362static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 363{
 364        struct mtd_info *mtd = mfi->mtd;
 365        size_t retlen;
 366
 367        switch (mode) {
 368        case MTD_OTP_FACTORY:
 369                if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
 370                                -EOPNOTSUPP)
 371                        return -EOPNOTSUPP;
 372
 373                mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
 374                break;
 375        case MTD_OTP_USER:
 376                if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
 377                                -EOPNOTSUPP)
 378                        return -EOPNOTSUPP;
 379
 380                mfi->mode = MTD_FILE_MODE_OTP_USER;
 381                break;
 382        case MTD_OTP_OFF:
 383                mfi->mode = MTD_FILE_MODE_NORMAL;
 384                break;
 385        default:
 386                return -EINVAL;
 387        }
 388
 389        return 0;
 390}
 391
 392static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
 393        uint64_t start, uint32_t length, void __user *ptr,
 394        uint32_t __user *retp)
 395{
 396        struct mtd_file_info *mfi = file->private_data;
 397        struct mtd_oob_ops ops;
 398        uint32_t retlen;
 399        int ret = 0;
 400
 401        if (!(file->f_mode & FMODE_WRITE))
 402                return -EPERM;
 403
 404        if (length > 4096)
 405                return -EINVAL;
 406
 407        if (!mtd->_write_oob)
 408                ret = -EOPNOTSUPP;
 409        else
 410                ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
 411
 412        if (ret)
 413                return ret;
 414
 415        ops.ooblen = length;
 416        ops.ooboffs = start & (mtd->writesize - 1);
 417        ops.datbuf = NULL;
 418        ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 419                MTD_OPS_PLACE_OOB;
 420
 421        if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 422                return -EINVAL;
 423
 424        ops.oobbuf = memdup_user(ptr, length);
 425        if (IS_ERR(ops.oobbuf))
 426                return PTR_ERR(ops.oobbuf);
 427
 428        start &= ~((uint64_t)mtd->writesize - 1);
 429        ret = mtd_write_oob(mtd, start, &ops);
 430
 431        if (ops.oobretlen > 0xFFFFFFFFU)
 432                ret = -EOVERFLOW;
 433        retlen = ops.oobretlen;
 434        if (copy_to_user(retp, &retlen, sizeof(length)))
 435                ret = -EFAULT;
 436
 437        kfree(ops.oobbuf);
 438        return ret;
 439}
 440
 441static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
 442        uint64_t start, uint32_t length, void __user *ptr,
 443        uint32_t __user *retp)
 444{
 445        struct mtd_file_info *mfi = file->private_data;
 446        struct mtd_oob_ops ops;
 447        int ret = 0;
 448
 449        if (length > 4096)
 450                return -EINVAL;
 451
 452        if (!access_ok(VERIFY_WRITE, ptr, length))
 453                return -EFAULT;
 454
 455        ops.ooblen = length;
 456        ops.ooboffs = start & (mtd->writesize - 1);
 457        ops.datbuf = NULL;
 458        ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 459                MTD_OPS_PLACE_OOB;
 460
 461        if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 462                return -EINVAL;
 463
 464        ops.oobbuf = kmalloc(length, GFP_KERNEL);
 465        if (!ops.oobbuf)
 466                return -ENOMEM;
 467
 468        start &= ~((uint64_t)mtd->writesize - 1);
 469        ret = mtd_read_oob(mtd, start, &ops);
 470
 471        if (put_user(ops.oobretlen, retp))
 472                ret = -EFAULT;
 473        else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
 474                                            ops.oobretlen))
 475                ret = -EFAULT;
 476
 477        kfree(ops.oobbuf);
 478
 479        /*
 480         * NAND returns -EBADMSG on ECC errors, but it returns the OOB
 481         * data. For our userspace tools it is important to dump areas
 482         * with ECC errors!
 483         * For kernel internal usage it also might return -EUCLEAN
 484         * to signal the caller that a bitflip has occured and has
 485         * been corrected by the ECC algorithm.
 486         *
 487         * Note: currently the standard NAND function, nand_read_oob_std,
 488         * does not calculate ECC for the OOB area, so do not rely on
 489         * this behavior unless you have replaced it with your own.
 490         */
 491        if (mtd_is_bitflip_or_eccerr(ret))
 492                return 0;
 493
 494        return ret;
 495}
 496
 497/*
 498 * Copies (and truncates, if necessary) data from the larger struct,
 499 * nand_ecclayout, to the smaller, deprecated layout struct,
 500 * nand_ecclayout_user. This is necessary only to support the deprecated
 501 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
 502 * nand_ecclayout flexibly (i.e. the struct may change size in new
 503 * releases without requiring major rewrites).
 504 */
 505static int shrink_ecclayout(const struct nand_ecclayout *from,
 506                struct nand_ecclayout_user *to)
 507{
 508        int i;
 509
 510        if (!from || !to)
 511                return -EINVAL;
 512
 513        memset(to, 0, sizeof(*to));
 514
 515        to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
 516        for (i = 0; i < to->eccbytes; i++)
 517                to->eccpos[i] = from->eccpos[i];
 518
 519        for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
 520                if (from->oobfree[i].length == 0 &&
 521                                from->oobfree[i].offset == 0)
 522                        break;
 523                to->oobavail += from->oobfree[i].length;
 524                to->oobfree[i] = from->oobfree[i];
 525        }
 526
 527        return 0;
 528}
 529
 530static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
 531                           struct blkpg_ioctl_arg __user *arg)
 532{
 533        struct blkpg_ioctl_arg a;
 534        struct blkpg_partition p;
 535
 536        if (!capable(CAP_SYS_ADMIN))
 537                return -EPERM;
 538
 539        if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
 540                return -EFAULT;
 541
 542        if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
 543                return -EFAULT;
 544
 545        switch (a.op) {
 546        case BLKPG_ADD_PARTITION:
 547
 548                /* Only master mtd device must be used to add partitions */
 549                if (mtd_is_partition(mtd))
 550                        return -EINVAL;
 551
 552                /* Sanitize user input */
 553                p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
 554
 555                return mtd_add_partition(mtd, p.devname, p.start, p.length);
 556
 557        case BLKPG_DEL_PARTITION:
 558
 559                if (p.pno < 0)
 560                        return -EINVAL;
 561
 562                return mtd_del_partition(mtd, p.pno);
 563
 564        default:
 565                return -EINVAL;
 566        }
 567}
 568
 569static int mtdchar_write_ioctl(struct mtd_info *mtd,
 570                struct mtd_write_req __user *argp)
 571{
 572        struct mtd_write_req req;
 573        struct mtd_oob_ops ops;
 574        const void __user *usr_data, *usr_oob;
 575        int ret;
 576
 577        if (copy_from_user(&req, argp, sizeof(req)))
 578                return -EFAULT;
 579
 580        usr_data = (const void __user *)(uintptr_t)req.usr_data;
 581        usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
 582        if (!access_ok(VERIFY_READ, usr_data, req.len) ||
 583            !access_ok(VERIFY_READ, usr_oob, req.ooblen))
 584                return -EFAULT;
 585
 586        if (!mtd->_write_oob)
 587                return -EOPNOTSUPP;
 588
 589        ops.mode = req.mode;
 590        ops.len = (size_t)req.len;
 591        ops.ooblen = (size_t)req.ooblen;
 592        ops.ooboffs = 0;
 593
 594        if (usr_data) {
 595                ops.datbuf = memdup_user(usr_data, ops.len);
 596                if (IS_ERR(ops.datbuf))
 597                        return PTR_ERR(ops.datbuf);
 598        } else {
 599                ops.datbuf = NULL;
 600        }
 601
 602        if (usr_oob) {
 603                ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
 604                if (IS_ERR(ops.oobbuf)) {
 605                        kfree(ops.datbuf);
 606                        return PTR_ERR(ops.oobbuf);
 607                }
 608        } else {
 609                ops.oobbuf = NULL;
 610        }
 611
 612        ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
 613
 614        kfree(ops.datbuf);
 615        kfree(ops.oobbuf);
 616
 617        return ret;
 618}
 619
 620static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 621{
 622        struct mtd_file_info *mfi = file->private_data;
 623        struct mtd_info *mtd = mfi->mtd;
 624        void __user *argp = (void __user *)arg;
 625        int ret = 0;
 626        u_long size;
 627        struct mtd_info_user info;
 628
 629        pr_debug("MTD_ioctl\n");
 630
 631        size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
 632        if (cmd & IOC_IN) {
 633                if (!access_ok(VERIFY_READ, argp, size))
 634                        return -EFAULT;
 635        }
 636        if (cmd & IOC_OUT) {
 637                if (!access_ok(VERIFY_WRITE, argp, size))
 638                        return -EFAULT;
 639        }
 640
 641        switch (cmd) {
 642        case MEMGETREGIONCOUNT:
 643                if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
 644                        return -EFAULT;
 645                break;
 646
 647        case MEMGETREGIONINFO:
 648        {
 649                uint32_t ur_idx;
 650                struct mtd_erase_region_info *kr;
 651                struct region_info_user __user *ur = argp;
 652
 653                if (get_user(ur_idx, &(ur->regionindex)))
 654                        return -EFAULT;
 655
 656                if (ur_idx >= mtd->numeraseregions)
 657                        return -EINVAL;
 658
 659                kr = &(mtd->eraseregions[ur_idx]);
 660
 661                if (put_user(kr->offset, &(ur->offset))
 662                    || put_user(kr->erasesize, &(ur->erasesize))
 663                    || put_user(kr->numblocks, &(ur->numblocks)))
 664                        return -EFAULT;
 665
 666                break;
 667        }
 668
 669        case MEMGETINFO:
 670                memset(&info, 0, sizeof(info));
 671                info.type       = mtd->type;
 672                info.flags      = mtd->flags;
 673                info.size       = mtd->size;
 674                info.erasesize  = mtd->erasesize;
 675                info.writesize  = mtd->writesize;
 676                info.oobsize    = mtd->oobsize;
 677                /* The below field is obsolete */
 678                info.padding    = 0;
 679                if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 680                        return -EFAULT;
 681                break;
 682
 683        case MEMERASE:
 684        case MEMERASE64:
 685        {
 686                struct erase_info *erase;
 687
 688                if(!(file->f_mode & FMODE_WRITE))
 689                        return -EPERM;
 690
 691                erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
 692                if (!erase)
 693                        ret = -ENOMEM;
 694                else {
 695                        wait_queue_head_t waitq;
 696                        DECLARE_WAITQUEUE(wait, current);
 697
 698                        init_waitqueue_head(&waitq);
 699
 700                        if (cmd == MEMERASE64) {
 701                                struct erase_info_user64 einfo64;
 702
 703                                if (copy_from_user(&einfo64, argp,
 704                                            sizeof(struct erase_info_user64))) {
 705                                        kfree(erase);
 706                                        return -EFAULT;
 707                                }
 708                                erase->addr = einfo64.start;
 709                                erase->len = einfo64.length;
 710                        } else {
 711                                struct erase_info_user einfo32;
 712
 713                                if (copy_from_user(&einfo32, argp,
 714                                            sizeof(struct erase_info_user))) {
 715                                        kfree(erase);
 716                                        return -EFAULT;
 717                                }
 718                                erase->addr = einfo32.start;
 719                                erase->len = einfo32.length;
 720                        }
 721                        erase->mtd = mtd;
 722                        erase->callback = mtdchar_erase_callback;
 723                        erase->priv = (unsigned long)&waitq;
 724
 725                        /*
 726                          FIXME: Allow INTERRUPTIBLE. Which means
 727                          not having the wait_queue head on the stack.
 728
 729                          If the wq_head is on the stack, and we
 730                          leave because we got interrupted, then the
 731                          wq_head is no longer there when the
 732                          callback routine tries to wake us up.
 733                        */
 734                        ret = mtd_erase(mtd, erase);
 735                        if (!ret) {
 736                                set_current_state(TASK_UNINTERRUPTIBLE);
 737                                add_wait_queue(&waitq, &wait);
 738                                if (erase->state != MTD_ERASE_DONE &&
 739                                    erase->state != MTD_ERASE_FAILED)
 740                                        schedule();
 741                                remove_wait_queue(&waitq, &wait);
 742                                set_current_state(TASK_RUNNING);
 743
 744                                ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
 745                        }
 746                        kfree(erase);
 747                }
 748                break;
 749        }
 750
 751        case MEMWRITEOOB:
 752        {
 753                struct mtd_oob_buf buf;
 754                struct mtd_oob_buf __user *buf_user = argp;
 755
 756                /* NOTE: writes return length to buf_user->length */
 757                if (copy_from_user(&buf, argp, sizeof(buf)))
 758                        ret = -EFAULT;
 759                else
 760                        ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 761                                buf.ptr, &buf_user->length);
 762                break;
 763        }
 764
 765        case MEMREADOOB:
 766        {
 767                struct mtd_oob_buf buf;
 768                struct mtd_oob_buf __user *buf_user = argp;
 769
 770                /* NOTE: writes return length to buf_user->start */
 771                if (copy_from_user(&buf, argp, sizeof(buf)))
 772                        ret = -EFAULT;
 773                else
 774                        ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
 775                                buf.ptr, &buf_user->start);
 776                break;
 777        }
 778
 779        case MEMWRITEOOB64:
 780        {
 781                struct mtd_oob_buf64 buf;
 782                struct mtd_oob_buf64 __user *buf_user = argp;
 783
 784                if (copy_from_user(&buf, argp, sizeof(buf)))
 785                        ret = -EFAULT;
 786                else
 787                        ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 788                                (void __user *)(uintptr_t)buf.usr_ptr,
 789                                &buf_user->length);
 790                break;
 791        }
 792
 793        case MEMREADOOB64:
 794        {
 795                struct mtd_oob_buf64 buf;
 796                struct mtd_oob_buf64 __user *buf_user = argp;
 797
 798                if (copy_from_user(&buf, argp, sizeof(buf)))
 799                        ret = -EFAULT;
 800                else
 801                        ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
 802                                (void __user *)(uintptr_t)buf.usr_ptr,
 803                                &buf_user->length);
 804                break;
 805        }
 806
 807        case MEMWRITE:
 808        {
 809                ret = mtdchar_write_ioctl(mtd,
 810                      (struct mtd_write_req __user *)arg);
 811                break;
 812        }
 813
 814        case MEMLOCK:
 815        {
 816                struct erase_info_user einfo;
 817
 818                if (copy_from_user(&einfo, argp, sizeof(einfo)))
 819                        return -EFAULT;
 820
 821                ret = mtd_lock(mtd, einfo.start, einfo.length);
 822                break;
 823        }
 824
 825        case MEMUNLOCK:
 826        {
 827                struct erase_info_user einfo;
 828
 829                if (copy_from_user(&einfo, argp, sizeof(einfo)))
 830                        return -EFAULT;
 831
 832                ret = mtd_unlock(mtd, einfo.start, einfo.length);
 833                break;
 834        }
 835
 836        case MEMISLOCKED:
 837        {
 838                struct erase_info_user einfo;
 839
 840                if (copy_from_user(&einfo, argp, sizeof(einfo)))
 841                        return -EFAULT;
 842
 843                ret = mtd_is_locked(mtd, einfo.start, einfo.length);
 844                break;
 845        }
 846
 847        /* Legacy interface */
 848        case MEMGETOOBSEL:
 849        {
 850                struct nand_oobinfo oi;
 851
 852                if (!mtd->ecclayout)
 853                        return -EOPNOTSUPP;
 854                if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
 855                        return -EINVAL;
 856
 857                oi.useecc = MTD_NANDECC_AUTOPLACE;
 858                memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
 859                memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
 860                       sizeof(oi.oobfree));
 861                oi.eccbytes = mtd->ecclayout->eccbytes;
 862
 863                if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
 864                        return -EFAULT;
 865                break;
 866        }
 867
 868        case MEMGETBADBLOCK:
 869        {
 870                loff_t offs;
 871
 872                if (copy_from_user(&offs, argp, sizeof(loff_t)))
 873                        return -EFAULT;
 874                return mtd_block_isbad(mtd, offs);
 875                break;
 876        }
 877
 878        case MEMSETBADBLOCK:
 879        {
 880                loff_t offs;
 881
 882                if (copy_from_user(&offs, argp, sizeof(loff_t)))
 883                        return -EFAULT;
 884                return mtd_block_markbad(mtd, offs);
 885                break;
 886        }
 887
 888        case OTPSELECT:
 889        {
 890                int mode;
 891                if (copy_from_user(&mode, argp, sizeof(int)))
 892                        return -EFAULT;
 893
 894                mfi->mode = MTD_FILE_MODE_NORMAL;
 895
 896                ret = otp_select_filemode(mfi, mode);
 897
 898                file->f_pos = 0;
 899                break;
 900        }
 901
 902        case OTPGETREGIONCOUNT:
 903        case OTPGETREGIONINFO:
 904        {
 905                struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
 906                size_t retlen;
 907                if (!buf)
 908                        return -ENOMEM;
 909                switch (mfi->mode) {
 910                case MTD_FILE_MODE_OTP_FACTORY:
 911                        ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
 912                        break;
 913                case MTD_FILE_MODE_OTP_USER:
 914                        ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
 915                        break;
 916                default:
 917                        ret = -EINVAL;
 918                        break;
 919                }
 920                if (!ret) {
 921                        if (cmd == OTPGETREGIONCOUNT) {
 922                                int nbr = retlen / sizeof(struct otp_info);
 923                                ret = copy_to_user(argp, &nbr, sizeof(int));
 924                        } else
 925                                ret = copy_to_user(argp, buf, retlen);
 926                        if (ret)
 927                                ret = -EFAULT;
 928                }
 929                kfree(buf);
 930                break;
 931        }
 932
 933        case OTPLOCK:
 934        {
 935                struct otp_info oinfo;
 936
 937                if (mfi->mode != MTD_FILE_MODE_OTP_USER)
 938                        return -EINVAL;
 939                if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
 940                        return -EFAULT;
 941                ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
 942                break;
 943        }
 944
 945        /* This ioctl is being deprecated - it truncates the ECC layout */
 946        case ECCGETLAYOUT:
 947        {
 948                struct nand_ecclayout_user *usrlay;
 949
 950                if (!mtd->ecclayout)
 951                        return -EOPNOTSUPP;
 952
 953                usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
 954                if (!usrlay)
 955                        return -ENOMEM;
 956
 957                shrink_ecclayout(mtd->ecclayout, usrlay);
 958
 959                if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
 960                        ret = -EFAULT;
 961                kfree(usrlay);
 962                break;
 963        }
 964
 965        case ECCGETSTATS:
 966        {
 967                if (copy_to_user(argp, &mtd->ecc_stats,
 968                                 sizeof(struct mtd_ecc_stats)))
 969                        return -EFAULT;
 970                break;
 971        }
 972
 973        case MTDFILEMODE:
 974        {
 975                mfi->mode = 0;
 976
 977                switch(arg) {
 978                case MTD_FILE_MODE_OTP_FACTORY:
 979                case MTD_FILE_MODE_OTP_USER:
 980                        ret = otp_select_filemode(mfi, arg);
 981                        break;
 982
 983                case MTD_FILE_MODE_RAW:
 984                        if (!mtd_has_oob(mtd))
 985                                return -EOPNOTSUPP;
 986                        mfi->mode = arg;
 987
 988                case MTD_FILE_MODE_NORMAL:
 989                        break;
 990                default:
 991                        ret = -EINVAL;
 992                }
 993                file->f_pos = 0;
 994                break;
 995        }
 996
 997        case BLKPG:
 998        {
 999                ret = mtdchar_blkpg_ioctl(mtd,
1000                      (struct blkpg_ioctl_arg __user *)arg);
1001                break;
1002        }
1003
1004        case BLKRRPART:
1005        {
1006                /* No reread partition feature. Just return ok */
1007                ret = 0;
1008                break;
1009        }
1010
1011        default:
1012                ret = -ENOTTY;
1013        }
1014
1015        return ret;
1016} /* memory_ioctl */
1017
1018static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1019{
1020        int ret;
1021
1022        mutex_lock(&mtd_mutex);
1023        ret = mtdchar_ioctl(file, cmd, arg);
1024        mutex_unlock(&mtd_mutex);
1025
1026        return ret;
1027}
1028
1029#ifdef CONFIG_COMPAT
1030
1031struct mtd_oob_buf32 {
1032        u_int32_t start;
1033        u_int32_t length;
1034        compat_caddr_t ptr;     /* unsigned char* */
1035};
1036
1037#define MEMWRITEOOB32           _IOWR('M', 3, struct mtd_oob_buf32)
1038#define MEMREADOOB32            _IOWR('M', 4, struct mtd_oob_buf32)
1039
1040static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1041        unsigned long arg)
1042{
1043        struct mtd_file_info *mfi = file->private_data;
1044        struct mtd_info *mtd = mfi->mtd;
1045        void __user *argp = compat_ptr(arg);
1046        int ret = 0;
1047
1048        mutex_lock(&mtd_mutex);
1049
1050        switch (cmd) {
1051        case MEMWRITEOOB32:
1052        {
1053                struct mtd_oob_buf32 buf;
1054                struct mtd_oob_buf32 __user *buf_user = argp;
1055
1056                if (copy_from_user(&buf, argp, sizeof(buf)))
1057                        ret = -EFAULT;
1058                else
1059                        ret = mtdchar_writeoob(file, mtd, buf.start,
1060                                buf.length, compat_ptr(buf.ptr),
1061                                &buf_user->length);
1062                break;
1063        }
1064
1065        case MEMREADOOB32:
1066        {
1067                struct mtd_oob_buf32 buf;
1068                struct mtd_oob_buf32 __user *buf_user = argp;
1069
1070                /* NOTE: writes return length to buf->start */
1071                if (copy_from_user(&buf, argp, sizeof(buf)))
1072                        ret = -EFAULT;
1073                else
1074                        ret = mtdchar_readoob(file, mtd, buf.start,
1075                                buf.length, compat_ptr(buf.ptr),
1076                                &buf_user->start);
1077                break;
1078        }
1079        default:
1080                ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1081        }
1082
1083        mutex_unlock(&mtd_mutex);
1084
1085        return ret;
1086}
1087
1088#endif /* CONFIG_COMPAT */
1089
1090/*
1091 * try to determine where a shared mapping can be made
1092 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1093 *   mappings)
1094 */
1095#ifndef CONFIG_MMU
1096static unsigned long mtdchar_get_unmapped_area(struct file *file,
1097                                           unsigned long addr,
1098                                           unsigned long len,
1099                                           unsigned long pgoff,
1100                                           unsigned long flags)
1101{
1102        struct mtd_file_info *mfi = file->private_data;
1103        struct mtd_info *mtd = mfi->mtd;
1104        unsigned long offset;
1105        int ret;
1106
1107        if (addr != 0)
1108                return (unsigned long) -EINVAL;
1109
1110        if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1111                return (unsigned long) -EINVAL;
1112
1113        offset = pgoff << PAGE_SHIFT;
1114        if (offset > mtd->size - len)
1115                return (unsigned long) -EINVAL;
1116
1117        ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1118        return ret == -EOPNOTSUPP ? -ENODEV : ret;
1119}
1120#endif
1121
1122/*
1123 * set up a mapping for shared memory segments
1124 */
1125static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1126{
1127#ifdef CONFIG_MMU
1128        struct mtd_file_info *mfi = file->private_data;
1129        struct mtd_info *mtd = mfi->mtd;
1130        struct map_info *map = mtd->priv;
1131
1132        /* This is broken because it assumes the MTD device is map-based
1133           and that mtd->priv is a valid struct map_info.  It should be
1134           replaced with something that uses the mtd_get_unmapped_area()
1135           operation properly. */
1136        if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1137#ifdef pgprot_noncached
1138                if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1139                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1140#endif
1141                return vm_iomap_memory(vma, map->phys, map->size);
1142        }
1143        return -ENODEV;
1144#else
1145        return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1146#endif
1147}
1148
1149static const struct file_operations mtd_fops = {
1150        .owner          = THIS_MODULE,
1151        .llseek         = mtdchar_lseek,
1152        .read           = mtdchar_read,
1153        .write          = mtdchar_write,
1154        .unlocked_ioctl = mtdchar_unlocked_ioctl,
1155#ifdef CONFIG_COMPAT
1156        .compat_ioctl   = mtdchar_compat_ioctl,
1157#endif
1158        .open           = mtdchar_open,
1159        .release        = mtdchar_close,
1160        .mmap           = mtdchar_mmap,
1161#ifndef CONFIG_MMU
1162        .get_unmapped_area = mtdchar_get_unmapped_area,
1163#endif
1164};
1165
1166static const struct super_operations mtd_ops = {
1167        .drop_inode = generic_delete_inode,
1168        .statfs = simple_statfs,
1169};
1170
1171static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1172                                int flags, const char *dev_name, void *data)
1173{
1174        return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
1175}
1176
1177static struct file_system_type mtd_inodefs_type = {
1178       .name = "mtd_inodefs",
1179       .mount = mtd_inodefs_mount,
1180       .kill_sb = kill_anon_super,
1181};
1182MODULE_ALIAS_FS("mtd_inodefs");
1183
1184int __init init_mtdchar(void)
1185{
1186        int ret;
1187
1188        ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1189                                   "mtd", &mtd_fops);
1190        if (ret < 0) {
1191                pr_err("Can't allocate major number %d for MTD\n",
1192                       MTD_CHAR_MAJOR);
1193                return ret;
1194        }
1195
1196        ret = register_filesystem(&mtd_inodefs_type);
1197        if (ret) {
1198                pr_err("Can't register mtd_inodefs filesystem, error %d\n",
1199                       ret);
1200                goto err_unregister_chdev;
1201        }
1202
1203        return ret;
1204
1205err_unregister_chdev:
1206        __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1207        return ret;
1208}
1209
1210void __exit cleanup_mtdchar(void)
1211{
1212        unregister_filesystem(&mtd_inodefs_type);
1213        __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1214}
1215
1216MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1217
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.