linux-old/drivers/mtd/chips/cfi_cmdset_0001.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   Intel Extended Vendor Command Set (ID 0x0001)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 * $Id: cfi_cmdset_0001.c,v 1.114 2003/03/18 12:28:40 dwmw2 Exp $
   8 *
   9 * 
  10 * 10/10/2000   Nicolas Pitre <nico@cam.org>
  11 *      - completely revamped method functions so they are aware and
  12 *        independent of the flash geometry (buswidth, interleave, etc.)
  13 *      - scalability vs code size is completely set at compile-time
  14 *        (see include/linux/mtd/cfi.h for selection)
  15 *      - optimized write buffer method
  16 * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
  17 *      - reworked lock/unlock/erase support for var size flash
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <asm/io.h>
  25#include <asm/byteorder.h>
  26
  27#include <linux/errno.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/interrupt.h>
  31#include <linux/mtd/map.h>
  32#include <linux/mtd/cfi.h>
  33#include <linux/mtd/compatmac.h>
  34
  35// debugging, turns off buffer write mode #define FORCE_WORD_WRITE
  36
  37static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  38static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  39static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  40static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  41static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  42static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
  43static void cfi_intelext_sync (struct mtd_info *);
  44static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
  45static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
  46static int cfi_intelext_suspend (struct mtd_info *);
  47static void cfi_intelext_resume (struct mtd_info *);
  48
  49static void cfi_intelext_destroy(struct mtd_info *);
  50
  51struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
  52
  53static struct mtd_info *cfi_intelext_setup (struct map_info *);
  54
  55static int do_point (struct mtd_info *mtd, loff_t from, size_t len,
  56                     size_t *retlen, u_char **mtdbuf);
  57static void do_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
  58                        size_t len);
  59
  60static struct mtd_chip_driver cfi_intelext_chipdrv = {
  61        probe: NULL, /* Not usable directly */
  62        destroy: cfi_intelext_destroy,
  63        name: "cfi_cmdset_0001",
  64        module: THIS_MODULE
  65};
  66
  67/* #define DEBUG_LOCK_BITS */
  68/* #define DEBUG_CFI_FEATURES */
  69
  70#ifdef DEBUG_CFI_FEATURES
  71static void cfi_tell_features(struct cfi_pri_intelext *extp)
  72{
  73        int i;
  74        printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
  75        printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
  76        printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
  77        printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
  78        printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
  79        printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
  80        printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
  81        printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
  82        printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
  83        printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
  84        for (i=9; i<32; i++) {
  85                if (extp->FeatureSupport & (1<<i)) 
  86                        printk("     - Unknown Bit %X:      supported\n", i);
  87        }
  88        
  89        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
  90        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
  91        for (i=1; i<8; i++) {
  92                if (extp->SuspendCmdSupport & (1<<i))
  93                        printk("     - Unknown Bit %X:               supported\n", i);
  94        }
  95        
  96        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
  97        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
  98        printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
  99        for (i=2; i<16; i++) {
 100                if (extp->BlkStatusRegMask & (1<<i))
 101                        printk("     - Unknown Bit %X Active: yes\n",i);
 102        }
 103        
 104        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
 105               extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
 106        if (extp->VppOptimal)
 107                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
 108                       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
 109}
 110#endif
 111
 112/* This routine is made available to other mtd code via
 113 * inter_module_register.  It must only be accessed through
 114 * inter_module_get which will bump the use count of this module.  The
 115 * addresses passed back in cfi are valid as long as the use count of
 116 * this module is non-zero, i.e. between inter_module_get and
 117 * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
 118 */
 119struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
 120{
 121        struct cfi_private *cfi = map->fldrv_priv;
 122        int i;
 123        __u32 base = cfi->chips[0].start;
 124
 125        if (cfi->cfi_mode == CFI_MODE_CFI) {
 126                /* 
 127                 * It's a real CFI chip, not one for which the probe
 128                 * routine faked a CFI structure. So we read the feature
 129                 * table from it.
 130                 */
 131                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 132                struct cfi_pri_intelext *extp;
 133                int ofs_factor = cfi->interleave * cfi->device_type;
 134
 135                //printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
 136                if (!adr)
 137                        return NULL;
 138
 139                /* Switch it into Query Mode */
 140                cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
 141
 142                extp = kmalloc(sizeof(*extp), GFP_KERNEL);
 143                if (!extp) {
 144                        printk(KERN_ERR "Failed to allocate memory\n");
 145                        return NULL;
 146                }
 147                
 148                /* Read in the Extended Query Table */
 149                for (i=0; i<sizeof(*extp); i++) {
 150                        ((unsigned char *)extp)[i] = 
 151                                cfi_read_query(map, (base+((adr+i)*ofs_factor)));
 152                }
 153                
 154                if (extp->MajorVersion != '1' || 
 155                    (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
 156                        printk(KERN_WARNING "  Unknown IntelExt Extended Query "
 157                               "version %c.%c.\n",  extp->MajorVersion,
 158                               extp->MinorVersion);
 159                        kfree(extp);
 160                        return NULL;
 161                }
 162                
 163                /* Do some byteswapping if necessary */
 164                extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
 165                extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
 166                extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
 167                        
 168#ifdef DEBUG_CFI_FEATURES
 169                /* Tell the user about it in lots of lovely detail */
 170                cfi_tell_features(extp);
 171#endif  
 172
 173                if(extp->SuspendCmdSupport & 1) {
 174//#define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 175#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
 176/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
 177                        printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
 178                               "erase on write disabled.\n");
 179                        extp->SuspendCmdSupport &= ~1;
 180#else
 181                        printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
 182#endif
 183                }
 184                /* Install our own private info structure */
 185                cfi->cmdset_priv = extp;        
 186        }
 187
 188        for (i=0; i< cfi->numchips; i++) {
 189                cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
 190                cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
 191                cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
 192                cfi->chips[i].ref_point_counter = 0;
 193        }               
 194
 195        map->fldrv = &cfi_intelext_chipdrv;
 196        
 197        /* Make sure it's in read mode */
 198        cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
 199        return cfi_intelext_setup(map);
 200}
 201
 202static struct mtd_info *cfi_intelext_setup(struct map_info *map)
 203{
 204        struct cfi_private *cfi = map->fldrv_priv;
 205        struct mtd_info *mtd;
 206        unsigned long offset = 0;
 207        int i,j;
 208        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 209
 210        mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
 211        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 212
 213        if (!mtd) {
 214                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 215                goto setup_err;
 216        }
 217
 218        memset(mtd, 0, sizeof(*mtd));
 219        mtd->priv = map;
 220        mtd->type = MTD_NORFLASH;
 221        mtd->size = devsize * cfi->numchips;
 222
 223        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 224        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
 225                        * mtd->numeraseregions, GFP_KERNEL);
 226        if (!mtd->eraseregions) { 
 227                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 228                goto setup_err;
 229        }
 230        
 231        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 232                unsigned long ernum, ersize;
 233                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 234                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 235
 236                if (mtd->erasesize < ersize) {
 237                        mtd->erasesize = ersize;
 238                }
 239                for (j=0; j<cfi->numchips; j++) {
 240                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 241                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 242                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 243                }
 244                offset += (ersize * ernum);
 245        }
 246
 247        if (offset != devsize) {
 248                /* Argh */
 249                printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 250                goto setup_err;
 251        }
 252
 253        for (i=0; i<mtd->numeraseregions;i++){
 254                printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
 255                       i,mtd->eraseregions[i].offset,
 256                       mtd->eraseregions[i].erasesize,
 257                       mtd->eraseregions[i].numblocks);
 258        }
 259
 260        /* Also select the correct geometry setup too */ 
 261        mtd->erase = cfi_intelext_erase_varsize;
 262        mtd->read = cfi_intelext_read;
 263
 264        if(map->point && map->unpoint){
 265                mtd->point = do_point;
 266                mtd->unpoint = do_unpoint;
 267        }
 268
 269#ifndef FORCE_WORD_WRITE
 270        if ( cfi->cfiq->BufWriteTimeoutTyp ) {
 271                printk("Using buffer write method\n" );
 272                mtd->write = cfi_intelext_write_buffers;
 273        } else {
 274#else
 275        {
 276#endif
 277                printk("Using word write method\n" );
 278                mtd->write = cfi_intelext_write_words;
 279        }
 280        mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
 281        mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
 282        mtd->sync = cfi_intelext_sync;
 283        mtd->lock = cfi_intelext_lock;
 284        mtd->unlock = cfi_intelext_unlock;
 285        mtd->suspend = cfi_intelext_suspend;
 286        mtd->resume = cfi_intelext_resume;
 287        mtd->flags = MTD_CAP_NORFLASH;
 288        map->fldrv = &cfi_intelext_chipdrv;
 289        MOD_INC_USE_COUNT;
 290        mtd->name = map->name;
 291        return mtd;
 292
 293 setup_err:
 294        if(mtd) {
 295                if(mtd->eraseregions)
 296                        kfree(mtd->eraseregions);
 297                kfree(mtd);
 298        }
 299        kfree(cfi->cmdset_priv);
 300        kfree(cfi->cfiq);
 301        return NULL;
 302}
 303
 304static int do_point_onechip (struct map_info *map,  struct flchip *chip, loff_t adr, size_t len)
 305{
 306        cfi_word status, status_OK;
 307        unsigned long timeo;
 308        DECLARE_WAITQUEUE(wait, current);
 309        unsigned long cmd_addr;
 310        struct cfi_private *cfi = map->fldrv_priv;
 311
 312        adr += chip->start;
 313
 314        /* Ensure cmd read/writes are aligned. */ 
 315        cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
 316
 317        /* Let's determine this according to the interleave only once */
 318        status_OK = CMD(0x80);
 319
 320        timeo = jiffies + HZ;
 321 retry:
 322        spin_lock(chip->mutex);
 323
 324        /* Check that the chip's ready to talk to us.
 325         * If it's in FL_ERASING state, suspend it and make it talk now.
 326         */
 327        switch (chip->state) {
 328
 329        case FL_READY:
 330        case FL_POINT:
 331                break;
 332
 333        case FL_CFI_QUERY:
 334        case FL_JEDEC_QUERY:
 335                cfi_write(map, CMD(0x70), cmd_addr);
 336                chip->state = FL_STATUS;
 337
 338        case FL_STATUS:
 339                status = cfi_read(map, cmd_addr);
 340                if ((status & status_OK) == status_OK) {
 341                        cfi_write(map, CMD(0xff), cmd_addr);
 342                        chip->state = FL_READY;
 343                        break;
 344                }
 345                
 346                /* Urgh. Chip not yet ready to talk to us. */
 347                if (time_after(jiffies, timeo)) {
 348                        spin_unlock(chip->mutex);
 349                        printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %llx\n", (__u64)status);
 350                        return -EIO;
 351                }
 352
 353                /* Latency issues. Drop the lock, wait a while and retry */
 354                spin_unlock(chip->mutex);
 355                cfi_udelay(1);
 356                goto retry;
 357
 358        default:
 359                /* Stick ourselves on a wait queue to be woken when
 360                   someone changes the status */
 361                set_current_state(TASK_UNINTERRUPTIBLE);
 362                add_wait_queue(&chip->wq, &wait);
 363                spin_unlock(chip->mutex);
 364                schedule();
 365                remove_wait_queue(&chip->wq, &wait);
 366                timeo = jiffies + HZ;
 367                goto retry;
 368        }
 369
 370        chip->state = FL_POINT;
 371        chip->ref_point_counter++;
 372        spin_unlock(chip->mutex);
 373        return 0;
 374}
 375static int do_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
 376{
 377        struct map_info *map = mtd->priv;
 378        struct cfi_private *cfi = map->fldrv_priv;
 379        unsigned long ofs;
 380        int chipnum;
 381        int ret = 0;
 382
 383        if (from + len > mtd->size)
 384                return -EINVAL;
 385        
 386        *mtdbuf = map->point(map, from, len);
 387        if(*mtdbuf == NULL)
 388                return -EINVAL; /* can not point this region */
 389        *retlen = 0;
 390
 391        /* Now lock the chip(s) to POINT state */
 392
 393        /* ofs: offset within the first chip that the first read should start */
 394        chipnum = (from >> cfi->chipshift);
 395        ofs = from - (chipnum <<  cfi->chipshift);
 396
 397        while (len) {
 398                unsigned long thislen;
 399
 400                if (chipnum >= cfi->numchips)
 401                        break;
 402
 403                if ((len + ofs -1) >> cfi->chipshift)
 404                        thislen = (1<<cfi->chipshift) - ofs;
 405                else
 406                        thislen = len;
 407
 408                ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
 409                if (ret)
 410                        break;
 411
 412                *retlen += thislen;
 413                len -= thislen;
 414                
 415                ofs = 0;
 416                chipnum++;
 417        }
 418        return 0;
 419}
 420
 421static void do_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
 422{
 423        struct map_info *map = mtd->priv;
 424        struct cfi_private *cfi = map->fldrv_priv;
 425        unsigned long ofs;
 426        int chipnum;
 427
 428        map->unpoint(map, addr, from, len);
 429        /* Now unlock the chip(s) POINT state */
 430
 431        /* ofs: offset within the first chip that the first read should start */
 432        chipnum = (from >> cfi->chipshift);
 433        ofs = from - (chipnum <<  cfi->chipshift);
 434
 435        while (len) {
 436                unsigned long thislen;
 437                struct flchip *chip;
 438
 439                chip = &cfi->chips[chipnum];
 440                if (chipnum >= cfi->numchips)
 441                        break;
 442
 443                if ((len + ofs -1) >> cfi->chipshift)
 444                        thislen = (1<<cfi->chipshift) - ofs;
 445                else
 446                        thislen = len;
 447
 448                spin_lock(chip->mutex);
 449                if(chip->state == FL_POINT){
 450                        chip->ref_point_counter--;
 451                        if(chip->ref_point_counter == 0)
 452                                chip->state = FL_READY;
 453                } else
 454                        printk("Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
 455                wake_up(&chip->wq);
 456                spin_unlock(chip->mutex);
 457
 458                len -= thislen;
 459                ofs = 0;
 460                chipnum++;
 461        }
 462}
 463
 464static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 465{
 466        cfi_word status, status_OK;
 467        unsigned long timeo;
 468        DECLARE_WAITQUEUE(wait, current);
 469        int suspended = 0;
 470        unsigned long cmd_addr;
 471        struct cfi_private *cfi = map->fldrv_priv;
 472
 473        adr += chip->start;
 474
 475        /* Ensure cmd read/writes are aligned. */ 
 476        cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
 477
 478        /* Let's determine this according to the interleave only once */
 479        status_OK = CMD(0x80);
 480
 481        timeo = jiffies + HZ;
 482 retry:
 483        spin_lock(chip->mutex);
 484
 485        /* Check that the chip's ready to talk to us.
 486         * If it's in FL_ERASING state, suspend it and make it talk now.
 487         */
 488        switch (chip->state) {
 489        case FL_ERASING:
 490                if (!cfi->cmdset_priv ||
 491                    !(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
 492                        goto sleep; /* We don't support erase suspend */
 493                
 494                cfi_write (map, CMD(0xb0), cmd_addr);
 495                /* If the flash has finished erasing, then 'erase suspend'
 496                 * appears to make some (28F320) flash devices switch to
 497                 * 'read' mode.  Make sure that we switch to 'read status'
 498                 * mode so we get the right data. --rmk
 499                 */
 500                cfi_write(map, CMD(0x70), cmd_addr);
 501                chip->oldstate = FL_ERASING;
 502                chip->state = FL_ERASE_SUSPENDING;
 503                //              printk("Erase suspending at 0x%lx\n", cmd_addr);
 504                for (;;) {
 505                        status = cfi_read(map, cmd_addr);
 506                        if ((status & status_OK) == status_OK)
 507                                break;
 508                        
 509                        if (time_after(jiffies, timeo)) {
 510                                /* Urgh */
 511                                cfi_write(map, CMD(0xd0), cmd_addr);
 512                                /* make sure we're in 'read status' mode */
 513                                cfi_write(map, CMD(0x70), cmd_addr);
 514                                chip->state = FL_ERASING;
 515                                spin_unlock(chip->mutex);
 516                                printk(KERN_ERR "Chip not ready after erase "
 517                                       "suspended: status = 0x%llx\n", (__u64)status);
 518                                return -EIO;
 519                        }
 520                        
 521                        spin_unlock(chip->mutex);
 522                        cfi_udelay(1);
 523                        spin_lock(chip->mutex);
 524                }
 525                
 526                suspended = 1;
 527                cfi_write(map, CMD(0xff), cmd_addr);
 528                chip->state = FL_READY;
 529                break;
 530        
 531#if 0
 532        case FL_WRITING:
 533                /* Not quite yet */
 534#endif
 535
 536        case FL_READY:
 537        case FL_POINT:
 538                break;
 539
 540        case FL_CFI_QUERY:
 541        case FL_JEDEC_QUERY:
 542                cfi_write(map, CMD(0x70), cmd_addr);
 543                chip->state = FL_STATUS;
 544
 545        case FL_STATUS:
 546                status = cfi_read(map, cmd_addr);
 547                if ((status & status_OK) == status_OK) {
 548                        cfi_write(map, CMD(0xff), cmd_addr);
 549                        chip->state = FL_READY;
 550                        break;
 551                }
 552                
 553                /* Urgh. Chip not yet ready to talk to us. */
 554                if (time_after(jiffies, timeo)) {
 555                        spin_unlock(chip->mutex);
 556                        printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %llx\n", (__u64)status);
 557                        return -EIO;
 558                }
 559
 560                /* Latency issues. Drop the lock, wait a while and retry */
 561                spin_unlock(chip->mutex);
 562                cfi_udelay(1);
 563                goto retry;
 564
 565        default:
 566        sleep:
 567                /* Stick ourselves on a wait queue to be woken when
 568                   someone changes the status */
 569                set_current_state(TASK_UNINTERRUPTIBLE);
 570                add_wait_queue(&chip->wq, &wait);
 571                spin_unlock(chip->mutex);
 572                schedule();
 573                remove_wait_queue(&chip->wq, &wait);
 574                timeo = jiffies + HZ;
 575                goto retry;
 576        }
 577
 578        map->copy_from(map, buf, adr, len);
 579
 580        if (suspended) {
 581                chip->state = chip->oldstate;
 582                /* What if one interleaved chip has finished and the 
 583                   other hasn't? The old code would leave the finished
 584                   one in READY mode. That's bad, and caused -EROFS 
 585                   errors to be returned from do_erase_oneblock because
 586                   that's the only bit it checked for at the time.
 587                   As the state machine appears to explicitly allow 
 588                   sending the 0x70 (Read Status) command to an erasing
 589                   chip and expecting it to be ignored, that's what we 
 590                   do. */
 591                cfi_write(map, CMD(0xd0), cmd_addr);
 592                cfi_write(map, CMD(0x70), cmd_addr);            
 593        }
 594
 595        wake_up(&chip->wq);
 596        spin_unlock(chip->mutex);
 597        return 0;
 598}
 599
 600static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 601{
 602        struct map_info *map = mtd->priv;
 603        struct cfi_private *cfi = map->fldrv_priv;
 604        unsigned long ofs;
 605        int chipnum;
 606        int ret = 0;
 607
 608        /* ofs: offset within the first chip that the first read should start */
 609        chipnum = (from >> cfi->chipshift);
 610        ofs = from - (chipnum <<  cfi->chipshift);
 611
 612        *retlen = 0;
 613
 614        while (len) {
 615                unsigned long thislen;
 616
 617                if (chipnum >= cfi->numchips)
 618                        break;
 619
 620                if ((len + ofs -1) >> cfi->chipshift)
 621                        thislen = (1<<cfi->chipshift) - ofs;
 622                else
 623                        thislen = len;
 624
 625                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
 626                if (ret)
 627                        break;
 628
 629                *retlen += thislen;
 630                len -= thislen;
 631                buf += thislen;
 632                
 633                ofs = 0;
 634                chipnum++;
 635        }
 636        return ret;
 637}
 638
 639static int cfi_intelext_read_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, int base_offst, int reg_sz)
 640{
 641        struct map_info *map = mtd->priv;
 642        struct cfi_private *cfi = map->fldrv_priv;
 643        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
 644        int ofs_factor = cfi->interleave * cfi->device_type;
 645        int   count=len;
 646        struct flchip *chip;
 647        int chip_num,offst;
 648        unsigned long timeo;
 649        DECLARE_WAITQUEUE(wait, current);
 650
 651        chip=0;
 652        /* Calculate which chip & protection register offset we need */
 653        chip_num=((unsigned int)from/reg_sz);
 654        offst=from-(reg_sz*chip_num)+base_offst;
 655
 656        while(count){
 657                
 658                if(chip_num>=cfi->numchips)
 659                        goto out;
 660
 661                /* Make sure that the chip is in the right state */
 662
 663                timeo = jiffies + HZ;
 664                chip=&cfi->chips[chip_num];
 665        retry:          
 666                spin_lock(chip->mutex);
 667        
 668                switch (chip->state) {
 669                case FL_READY:
 670                case FL_STATUS:
 671                case FL_CFI_QUERY:
 672                case FL_JEDEC_QUERY:
 673                        break;
 674                
 675                default:
 676                                /* Stick ourselves on a wait queue to be woken when
 677                                   someone changes the status */
 678                        set_current_state(TASK_UNINTERRUPTIBLE);
 679                        add_wait_queue(&chip->wq, &wait);
 680                        spin_unlock(chip->mutex);
 681                        schedule();
 682                        remove_wait_queue(&chip->wq, &wait);
 683                        timeo = jiffies + HZ;
 684                        goto retry;
 685                }
 686                        
 687                /* Now read the data required from this flash */
 688       
 689                cfi_send_gen_cmd(0x90, 0x55,chip->start, map, cfi, cfi->device_type, NULL);
 690                while(count && ((offst-base_offst)<reg_sz)){
 691                        *buf=map->read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
 692                        buf++;
 693                        offst++;
 694                        count--;
 695                }
 696               
 697                chip->state=FL_CFI_QUERY;
 698                spin_unlock(chip->mutex);
 699                /* Move on to the next chip */
 700                chip_num++;
 701                offst=base_offst;
 702        
 703        }
 704        
 705 out:   
 706        wake_up(&chip->wq);
 707        return len-count;
 708}
 709        
 710static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 711{
 712        struct map_info *map = mtd->priv;
 713        struct cfi_private *cfi = map->fldrv_priv;
 714        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
 715        int base_offst,reg_sz;
 716        
 717        /* Check that we actually have some protection registers */
 718        if(!(extp->FeatureSupport&64)){
 719                printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
 720                return 0;
 721        }
 722
 723        base_offst=(1<<extp->FactProtRegSize);
 724        reg_sz=(1<<extp->UserProtRegSize);
 725
 726        return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
 727}
 728
 729static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 730{
 731        struct map_info *map = mtd->priv;
 732        struct cfi_private *cfi = map->fldrv_priv;
 733        struct cfi_pri_intelext *extp=cfi->cmdset_priv;
 734        int base_offst,reg_sz;
 735        
 736        /* Check that we actually have some protection registers */
 737        if(!(extp->FeatureSupport&64)){
 738                printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
 739                return 0;
 740        }
 741
 742        base_offst=0;
 743        reg_sz=(1<<extp->FactProtRegSize);
 744
 745        return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
 746}
 747
 748
 749static int do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, cfi_word datum)
 750{
 751        struct cfi_private *cfi = map->fldrv_priv;
 752        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
 753        cfi_word status, status_OK;
 754        unsigned long timeo;
 755        DECLARE_WAITQUEUE(wait, current);
 756        int z, suspended=0, ret=0;
 757
 758        adr += chip->start;
 759
 760        /* Let's determine this according to the interleave only once */
 761        status_OK = CMD(0x80);
 762
 763        timeo = jiffies + HZ;
 764 retry:
 765        spin_lock(chip->mutex);
 766
 767        /* Check that the chip's ready to talk to us.
 768         * Later, we can actually think about interrupting it
 769         * if it's in FL_ERASING state.
 770         * Not just yet, though.
 771         */
 772        switch (chip->state) {
 773        case FL_READY:
 774                break;
 775                
 776        case FL_CFI_QUERY:
 777        case FL_JEDEC_QUERY:
 778                cfi_write(map, CMD(0x70), adr);
 779                chip->state = FL_STATUS;
 780
 781        case FL_STATUS:
 782                status = cfi_read(map, adr);
 783                if ((status & status_OK) == status_OK)
 784                        break;
 785                
 786                /* Urgh. Chip not yet ready to talk to us. */
 787                if (time_after(jiffies, timeo)) {
 788                        spin_unlock(chip->mutex);
 789                        printk(KERN_ERR "waiting for chip to be ready timed out in read\n");
 790                        return -EIO;
 791                }
 792
 793                /* Latency issues. Drop the lock, wait a while and retry */
 794                spin_unlock(chip->mutex);
 795                cfi_udelay(1);
 796                goto retry;
 797
 798        case FL_ERASING:
 799                if (!extp || 
 800                    !((extp->FeatureSupport & 2) && (extp->SuspendCmdSupport & 1)))
 801                        goto sleep; /* We don't support erase suspend */
 802                
 803                cfi_write (map, CMD(0xb0), adr);
 804
 805                /* If the flash has finished erasing, then 'erase suspend'
 806                 * appears to make some (28F320) flash devices switch to
 807                 * 'read' mode.  Make sure that we switch to 'read status'
 808                 * mode so we get the right data. --rmk
 809                 */
 810                cfi_write(map, CMD(0x70), adr);
 811                chip->oldstate = FL_ERASING;
 812                chip->state = FL_ERASE_SUSPENDING;
 813                for (;;) {
 814                        status = cfi_read(map, adr);
 815                        if ((status & status_OK) == status_OK)
 816                                break;
 817                        
 818                        if (time_after(jiffies, timeo)) {
 819                                /* Urgh */
 820                                cfi_write(map, CMD(0xd0), adr);
 821                                /* make sure we're in 'read status' mode */
 822                                cfi_write(map, CMD(0x70), adr);
 823                                chip->state = FL_ERASING;
 824                                spin_unlock(chip->mutex);
 825                                printk(KERN_ERR "Chip not ready after erase "
 826                                       "suspended: status = 0x%x\n", status);
 827                                return -EIO;
 828                        }
 829                        
 830                        spin_unlock(chip->mutex);
 831                        cfi_udelay(1);
 832                        spin_lock(chip->mutex);
 833                }
 834                suspended = 1;
 835                chip->state = FL_STATUS;
 836                break;
 837
 838        default:
 839        sleep:
 840                /* Stick ourselves on a wait queue to be woken when
 841                   someone changes the status */
 842                set_current_state(TASK_UNINTERRUPTIBLE);
 843                add_wait_queue(&chip->wq, &wait);
 844                spin_unlock(chip->mutex);
 845                schedule();
 846                remove_wait_queue(&chip->wq, &wait);
 847                timeo = jiffies + HZ;
 848                goto retry;
 849        }
 850
 851        ENABLE_VPP(map);
 852        cfi_write(map, CMD(0x40), adr);
 853        cfi_write(map, datum, adr);
 854        chip->state = FL_WRITING;
 855
 856        spin_unlock(chip->mutex);
 857        cfi_udelay(chip->word_write_time);
 858        spin_lock(chip->mutex);
 859
 860        timeo = jiffies + (HZ/2);
 861        z = 0;
 862        for (;;) {
 863                if (chip->state != FL_WRITING) {
 864                        /* Someone's suspended the write. Sleep */
 865                        set_current_state(TASK_UNINTERRUPTIBLE);
 866                        add_wait_queue(&chip->wq, &wait);
 867                        spin_unlock(chip->mutex);
 868                        schedule();
 869                        remove_wait_queue(&chip->wq, &wait);
 870                        timeo = jiffies + (HZ / 2); /* FIXME */
 871                        spin_lock(chip->mutex);
 872                        continue;
 873                }
 874
 875                status = cfi_read(map, adr);
 876                if ((status & status_OK) == status_OK)
 877                        break;
 878                
 879                /* OK Still waiting */
 880                if (time_after(jiffies, timeo)) {
 881                        chip->state = FL_STATUS;
 882                        DISABLE_VPP(map);
 883                        printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
 884                        ret = -EIO;
 885                        goto out;
 886                }
 887
 888                /* Latency issues. Drop the lock, wait a while and retry */
 889                spin_unlock(chip->mutex);
 890                z++;
 891                cfi_udelay(1);
 892                spin_lock(chip->mutex);
 893        }
 894        if (!z) {
 895                chip->word_write_time--;
 896                if (!chip->word_write_time)
 897                        chip->word_write_time++;
 898        }
 899        if (z > 1) 
 900                chip->word_write_time++;
 901
 902        /* Done and happy. */
 903        chip->state = FL_STATUS;
 904        /* check for lock bit */
 905        if (status & CMD(0x02)) {
 906                /* clear status */
 907                cfi_write(map, CMD(0x50), adr);
 908                /* put back into read status register mode */
 909                cfi_write(map, CMD(0x70), adr);
 910                ret = -EROFS;
 911                goto out;
 912        }
 913 out:
 914        if (suspended) {
 915                chip->state = chip->oldstate;
 916                /* What if one interleaved chip has finished and the 
 917                   other hasn't? The old code would leave the finished
 918                   one in READY mode. That's bad, and caused -EROFS 
 919                   errors to be returned from do_erase_oneblock because
 920                   that's the only bit it checked for at the time.
 921                   As the state machine appears to explicitly allow 
 922                   sending the 0x70 (Read Status) command to an erasing
 923                   chip and expecting it to be ignored, that's what we 
 924                   do. */
 925                cfi_write(map, CMD(0xd0), adr);
 926                cfi_write(map, CMD(0x70), adr);         
 927        } else
 928                DISABLE_VPP(map); /* must not clear the VPP if there is a suspended erase to be resumed */
 929
 930        wake_up(&chip->wq);
 931        spin_unlock(chip->mutex);
 932        return ret;
 933}
 934
 935
 936static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
 937{
 938        struct map_info *map = mtd->priv;
 939        struct cfi_private *cfi = map->fldrv_priv;
 940        int ret = 0;
 941        int chipnum;
 942        unsigned long ofs;
 943
 944        *retlen = 0;
 945        if (!len)
 946                return 0;
 947
 948        chipnum = to >> cfi->chipshift;
 949        ofs = to  - (chipnum << cfi->chipshift);
 950
 951        /* If it's not bus-aligned, do the first byte write */
 952        if (ofs & (CFIDEV_BUSWIDTH-1)) {
 953                unsigned long bus_ofs = ofs & ~(CFIDEV_BUSWIDTH-1);
 954                int gap = ofs - bus_ofs;
 955                int i = 0, n = 0;
 956                u_char tmp_buf[8];
 957                cfi_word datum;
 958
 959                while (gap--)
 960                        tmp_buf[i++] = 0xff;
 961                while (len && i < CFIDEV_BUSWIDTH)
 962                        tmp_buf[i++] = buf[n++], len--;
 963                while (i < CFIDEV_BUSWIDTH)
 964                        tmp_buf[i++] = 0xff;
 965
 966                if (cfi_buswidth_is_2()) {
 967                        datum = *(__u16*)tmp_buf;
 968                } else if (cfi_buswidth_is_4()) {
 969                        datum = *(__u32*)tmp_buf;
 970                } else if (cfi_buswidth_is_8()) {
 971                        datum = *(__u64*)tmp_buf;
 972                } else {
 973                        return -EINVAL;  /* should never happen, but be safe */
 974                }
 975
 976                ret = do_write_oneword(map, &cfi->chips[chipnum],
 977                                               bus_ofs, datum);
 978                if (ret) 
 979                        return ret;
 980                
 981                ofs += n;
 982                buf += n;
 983                (*retlen) += n;
 984
 985                if (ofs >> cfi->chipshift) {
 986                        chipnum ++; 
 987                        ofs = 0;
 988                        if (chipnum == cfi->numchips)
 989                                return 0;
 990                }
 991        }
 992        
 993        while(len >= CFIDEV_BUSWIDTH) {
 994                cfi_word datum;
 995
 996                if (cfi_buswidth_is_1()) {
 997                        datum = *(__u8*)buf;
 998                } else if (cfi_buswidth_is_2()) {
 999                        datum = *(__u16*)buf;
1000                } else if (cfi_buswidth_is_4()) {
1001                        datum = *(__u32*)buf;
1002                } else if (cfi_buswidth_is_8()) {
1003                        datum = *(__u64*)buf;
1004                } else {
1005                        return -EINVAL;
1006                }
1007
1008                ret = do_write_oneword(map, &cfi->chips[chipnum],
1009                                ofs, datum);
1010                if (ret)
1011                        return ret;
1012
1013                ofs += CFIDEV_BUSWIDTH;
1014                buf += CFIDEV_BUSWIDTH;
1015                (*retlen) += CFIDEV_BUSWIDTH;
1016                len -= CFIDEV_BUSWIDTH;
1017
1018                if (ofs >> cfi->chipshift) {
1019                        chipnum ++; 
1020                        ofs = 0;
1021                        if (chipnum == cfi->numchips)
1022                                return 0;
1023                }
1024        }
1025
1026        if (len & (CFIDEV_BUSWIDTH-1)) {
1027                int i = 0, n = 0;
1028                u_char tmp_buf[8];
1029                cfi_word datum;
1030
1031                while (len--)
1032                        tmp_buf[i++] = buf[n++];
1033                while (i < CFIDEV_BUSWIDTH)
1034                        tmp_buf[i++] = 0xff;
1035
1036                if (cfi_buswidth_is_2()) {
1037                        datum = *(__u16*)tmp_buf;
1038                } else if (cfi_buswidth_is_4()) {
1039                        datum = *(__u32*)tmp_buf;
1040                } else if (cfi_buswidth_is_8()) {
1041                        datum = *(__u64*)tmp_buf;
1042                } else {
1043                        return -EINVAL;  /* should never happen, but be safe */
1044                }
1045
1046                ret = do_write_oneword(map, &cfi->chips[chipnum],
1047                                               ofs, datum);
1048                if (ret) 
1049                        return ret;
1050                
1051                (*retlen) += n;
1052        }
1053
1054        return 0;
1055}
1056
1057
1058static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
1059                                  unsigned long adr, const u_char *buf, int len)
1060{
1061        struct cfi_private *cfi = map->fldrv_priv;
1062        struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1063        cfi_word status, status_OK;
1064        unsigned long cmd_adr, timeo;
1065        DECLARE_WAITQUEUE(wait, current);
1066        int wbufsize, z, suspended=0, ret=0;
1067
1068        wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1069        adr += chip->start;
1070        cmd_adr = adr & ~(wbufsize-1);
1071        
1072        /* Let's determine this according to the interleave only once */
1073        status_OK = CMD(0x80);
1074
1075        timeo = jiffies + HZ;
1076 retry:
1077        spin_lock(chip->mutex);
1078
1079        /* Check that the chip's ready to talk to us.
1080         * Later, we can actually think about interrupting it
1081         * if it's in FL_ERASING state.
1082         * Not just yet, though.
1083         */
1084        switch (chip->state) {
1085        case FL_READY:
1086        case FL_CFI_QUERY:
1087        case FL_JEDEC_QUERY:
1088                cfi_write(map, CMD(0x70), cmd_adr);
1089                chip->state = FL_STATUS;
1090
1091        case FL_STATUS:
1092                status = cfi_read(map, cmd_adr);
1093                if ((status & status_OK) == status_OK)
1094                        break;
1095                /* Urgh. Chip not yet ready to talk to us. */
1096                if (time_after(jiffies, timeo)) {
1097                        spin_unlock(chip->mutex);
1098                        printk(KERN_ERR "waiting for chip to be ready timed out in buffer write\n");
1099                        return -EIO;
1100                }
1101
1102                /* Latency issues. Drop the lock, wait a while and retry */
1103                spin_unlock(chip->mutex);
1104                cfi_udelay(1);
1105                goto retry;
1106
1107        case FL_ERASING:
1108                if (!extp || 
1109                    !((extp->FeatureSupport & 2) && (extp->SuspendCmdSupport & 1)))
1110                        goto sleep; /* We don't support erase suspend */
1111                
1112                cfi_write (map, CMD(0xb0), adr);
1113
1114                /* If the flash has finished erasing, then 'erase suspend'
1115                 * appears to make some (28F320) flash devices switch to
1116                 * 'read' mode.  Make sure that we switch to 'read status'
1117                 * mode so we get the right data. --rmk
1118                 */
1119                cfi_write(map, CMD(0x70), adr);
1120                chip->oldstate = FL_ERASING;
1121                chip->state = FL_ERASE_SUSPENDING;
1122                for (;;) {
1123                        status = cfi_read(map, adr);
1124                        if ((status & status_OK) == status_OK)
1125                                break;
1126                        
1127                        if (time_after(jiffies, timeo)) {
1128                                /* Urgh */
1129                                cfi_write(map, CMD(0xd0), adr);
1130                                /* make sure we're in 'read status' mode */
1131                                cfi_write(map, CMD(0x70), adr);
1132                                chip->state = FL_ERASING;
1133                                spin_unlock(chip->mutex);
1134                                printk(KERN_ERR "Chip not ready after erase "
1135                                       "suspended: status = 0x%x\n", status);
1136                                return -EIO;
1137                        }
1138                        
1139                        spin_unlock(chip->mutex);
1140                        cfi_udelay(1);
1141                        spin_lock(chip->mutex);
1142                }
1143                suspended = 1;
1144                chip->state = FL_STATUS;
1145                break;
1146
1147        default:
1148        sleep:
1149                /* Stick ourselves on a wait queue to be woken when
1150                   someone changes the status */
1151                set_current_state(TASK_UNINTERRUPTIBLE);
1152                add_wait_queue(&chip->wq, &wait);
1153                spin_unlock(chip->mutex);
1154                schedule();
1155                remove_wait_queue(&chip->wq, &wait);
1156                timeo = jiffies + HZ;
1157                goto retry;
1158        }
1159        /* We know we're now in FL_STATUS mode, and 'status' is current */
1160        /* 4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1161           [...], the device will not accept any more Write to Buffer commands". 
1162           So we must check here and reset those bits if they're set. Otherwise
1163           we're just pissing in the wind */
1164        if (status & CMD(0x30)) {
1165                printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %x). Clearing.\n", status);
1166                cfi_write(map, CMD(0x50), cmd_adr);
1167                cfi_write(map, CMD(0x70), cmd_adr);
1168        }
1169        ENABLE_VPP(map);
1170        chip->state = FL_WRITING_TO_BUFFER;
1171
1172        z = 0;
1173        for (;;) {
1174                cfi_write(map, CMD(0xe8), cmd_adr);
1175
1176                status = cfi_read(map, cmd_adr);
1177                if ((status & status_OK) == status_OK)
1178                        break;
1179
1180                spin_unlock(chip->mutex);
1181                cfi_udelay(1);
1182                spin_lock(chip->mutex);
1183
1184                if (++z > 20) {
1185                        /* Argh. Not ready for write to buffer */
1186                        cfi_write(map, CMD(0x70), cmd_adr);
1187                        chip->state = FL_STATUS;
1188                        DISABLE_VPP(map);
1189                        printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %llx, status = %llx\n", (__u64)status, (__u64)cfi_read(map, cmd_adr));
1190                        /* Odd. Clear status bits */
1191                        cfi_write(map, CMD(0x50), cmd_adr);
1192                        cfi_write(map, CMD(0x70), cmd_adr);
1193                        ret = -EIO;
1194                        goto out;
1195                }
1196        }
1197
1198        /* Write length of data to come */
1199        cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
1200
1201        /* Write data */
1202        for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
1203                if (cfi_buswidth_is_1()) {
1204                        map->write8 (map, *(__u8*)buf, adr+z);
1205                        buf += sizeof(__u8);
1206                } else if (cfi_buswidth_is_2()) {
1207                        map->write16 (map, *(__u16*)buf, adr+z);
1208                        buf += sizeof(__u16);
1209                } else if (cfi_buswidth_is_4()) {
1210                        map->write32 (map, *(__u32*)buf, adr+z);
1211                        buf += sizeof(__u32);
1212                } else if (cfi_buswidth_is_8()) {
1213                        map->write64 (map, *(__u64*)buf, adr+z);
1214                        buf += sizeof(__u64);
1215                } else {
1216                        DISABLE_VPP(map);
1217                        ret = -EINVAL;
1218                        goto out;
1219                }
1220        }
1221        /* GO GO GO */
1222        cfi_write(map, CMD(0xd0), cmd_adr);
1223        chip->state = FL_WRITING;
1224
1225        spin_unlock(chip->mutex);
1226        cfi_udelay(chip->buffer_write_time);
1227        spin_lock(chip->mutex);
1228
1229        timeo = jiffies + (HZ/2);
1230        z = 0;
1231        for (;;) {
1232                if (chip->state != FL_WRITING) {
1233                        /* Someone's suspended the write. Sleep */
1234                        set_current_state(TASK_UNINTERRUPTIBLE);
1235                        add_wait_queue(&chip->wq, &wait);
1236                        spin_unlock(chip->mutex);
1237                        schedule();
1238                        remove_wait_queue(&chip->wq, &wait);
1239                        timeo = jiffies + (HZ / 2); /* FIXME */
1240                        spin_lock(chip->mutex);
1241                        continue;
1242                }
1243
1244                status = cfi_read(map, cmd_adr);
1245                if ((status & status_OK) == status_OK)
1246                        break;
1247
1248                /* OK Still waiting */
1249                if (time_after(jiffies, timeo)) {
1250                        chip->state = FL_STATUS;
1251                        DISABLE_VPP(map);
1252                        printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1253                        ret = -EIO;
1254                        goto out;
1255                }
1256                
1257                /* Latency issues. Drop the lock, wait a while and retry */
1258                spin_unlock(chip->mutex);
1259                cfi_udelay(1);
1260                z++;
1261                spin_lock(chip->mutex);
1262        }
1263        if (!z) {
1264                chip->buffer_write_time--;
1265                if (!chip->buffer_write_time)
1266                        chip->buffer_write_time++;
1267        }
1268        if (z > 1) 
1269                chip->buffer_write_time++;
1270
1271        /* Done and happy. */
1272        chip->state = FL_STATUS;
1273        /* check for lock bit */
1274        if (status & CMD(0x02)) {
1275                /* clear status */
1276                cfi_write(map, CMD(0x50), cmd_adr);
1277                /* put back into read status register mode */
1278                cfi_write(map, CMD(0x70), adr);
1279                ret = -EROFS;
1280                goto out;
1281        }
1282 out:
1283        if (suspended) {
1284                chip->state = chip->oldstate;
1285                /* What if one interleaved chip has finished and the 
1286                   other hasn't? The old code would leave the finished
1287                   one in READY mode. That's bad, and caused -EROFS 
1288                   errors to be returned from do_erase_oneblock because
1289                   that's the only bit it checked for at the time.
1290                   As the state machine appears to explicitly allow 
1291                   sending the 0x70 (Read Status) command to an erasing
1292                   chip and expecting it to be ignored, that's what we 
1293                   do. */
1294                cfi_write(map, CMD(0xd0), adr);
1295                cfi_write(map, CMD(0x70), adr);         
1296        } else
1297                DISABLE_VPP(map); /* must not clear the VPP if there is a suspended erase to be resumed */
1298
1299        wake_up(&chip->wq);
1300        spin_unlock(chip->mutex);
1301        return ret;
1302}
1303
1304static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1305                                       size_t len, size_t *retlen, const u_char *buf)
1306{
1307        struct map_info *map = mtd->priv;
1308        struct cfi_private *cfi = map->fldrv_priv;
1309        int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
1310        int ret = 0;
1311        int chipnum;
1312        unsigned long ofs;
1313
1314        *retlen = 0;
1315        if (!len)
1316                return 0;
1317
1318        chipnum = to >> cfi->chipshift;
1319        ofs = to  - (chipnum << cfi->chipshift);
1320
1321        /* If it's not bus-aligned, do the first word write */
1322        if (ofs & (CFIDEV_BUSWIDTH-1)) {
1323                size_t local_len = (-ofs)&(CFIDEV_BUSWIDTH-1);
1324                if (local_len > len)
1325                        local_len = len;
1326                ret = cfi_intelext_write_words(mtd, to, local_len,
1327                                               retlen, buf);
1328                if (ret)
1329                        return ret;
1330                ofs += local_len;
1331                buf += local_len;
1332                len -= local_len;
1333
1334                if (ofs >> cfi->chipshift) {
1335                        chipnum ++;
1336                        ofs = 0;
1337                        if (chipnum == cfi->numchips)
1338                                return 0;
1339                }
1340        }
1341
1342        /* Write buffer is worth it only if more than one word to write... */
1343        while(len > CFIDEV_BUSWIDTH) {
1344                /* We must not cross write block boundaries */
1345                int size = wbufsize - (ofs & (wbufsize-1));
1346
1347                if (size > len)
1348                        size = len & ~(CFIDEV_BUSWIDTH-1);
1349                ret = do_write_buffer(map, &cfi->chips[chipnum], 
1350                                      ofs, buf, size);
1351                if (ret)
1352                        return ret;
1353
1354                ofs += size;
1355                buf += size;
1356                (*retlen) += size;
1357                len -= size;
1358
1359                if (ofs >> cfi->chipshift) {
1360                        chipnum ++; 
1361                        ofs = 0;
1362                        if (chipnum == cfi->numchips)
1363                                return 0;
1364                }
1365        }
1366
1367        /* ... and write the remaining bytes */
1368        if (len > 0) {
1369                size_t local_retlen;
1370                ret = cfi_intelext_write_words(mtd, ofs + (chipnum << cfi->chipshift),
1371                                               len, &local_retlen, buf);
1372                if (ret)
1373                        return ret;
1374                (*retlen) += local_retlen;
1375        }
1376
1377        return 0;
1378}
1379
1380typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
1381                              unsigned long adr, void *thunk);
1382
1383static int cfi_intelext_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
1384                                     loff_t ofs, size_t len, void *thunk)
1385{
1386        struct map_info *map = mtd->priv;
1387        struct cfi_private *cfi = map->fldrv_priv;
1388        unsigned long adr;
1389        int chipnum, ret = 0;
1390        int i, first;
1391        struct mtd_erase_region_info *regions = mtd->eraseregions;
1392
1393        if (ofs > mtd->size)
1394                return -EINVAL;
1395
1396        if ((len + ofs) > mtd->size)
1397                return -EINVAL;
1398
1399        /* Check that both start and end of the requested erase are
1400         * aligned with the erasesize at the appropriate addresses.
1401         */
1402
1403        i = 0;
1404
1405        /* Skip all erase regions which are ended before the start of 
1406           the requested erase. Actually, to save on the calculations,
1407           we skip to the first erase region which starts after the
1408           start of the requested erase, and then go back one.
1409        */
1410        
1411        while (i < mtd->numeraseregions && ofs >= regions[i].offset)
1412               i++;
1413        i--;
1414
1415        /* OK, now i is pointing at the erase region in which this 
1416           erase request starts. Check the start of the requested
1417           erase range is aligned with the erase size which is in
1418           effect here.
1419        */
1420
1421        if (ofs & (regions[i].erasesize-1))
1422                return -EINVAL;
1423
1424        /* Remember the erase region we start on */
1425        first = i;
1426
1427        /* Next, check that the end of the requested erase is aligned
1428         * with the erase region at that address.
1429         */
1430
1431        while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset)
1432                i++;
1433
1434        /* As before, drop back one to point at the region in which
1435           the address actually falls
1436        */
1437        i--;
1438        
1439        if ((ofs + len) & (regions[i].erasesize-1))
1440                return -EINVAL;
1441
1442        chipnum = ofs >> cfi->chipshift;
1443        adr = ofs - (chipnum << cfi->chipshift);
1444
1445        i=first;
1446
1447        while(len) {
1448                ret = (*frob)(map, &cfi->chips[chipnum], adr, thunk);
1449                
1450                if (ret)
1451                        return ret;
1452
1453                adr += regions[i].erasesize;
1454                len -= regions[i].erasesize;
1455
1456                if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
1457                        i++;
1458
1459                if (adr >> cfi->chipshift) {
1460                        adr = 0;
1461                        chipnum++;
1462                        
1463                        if (chipnum >= cfi->numchips)
1464                        break;
1465                }
1466        }
1467
1468        return 0;
1469}
1470
1471
1472static int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1473{
1474        struct cfi_private *cfi = map->fldrv_priv;
1475        cfi_word status, status_OK;
1476        unsigned long timeo;
1477        int retries = 3;
1478        DECLARE_WAITQUEUE(wait, current);
1479        int ret = 0;
1480
1481        adr += chip->start;
1482
1483        /* Let's determine this according to the interleave only once */
1484        status_OK = CMD(0x80);
1485
1486        timeo = jiffies + HZ;
1487retry:
1488        spin_lock(chip->mutex);
1489
1490        /* Check that the chip's ready to talk to us. */
1491        switch (chip->state) {
1492        case FL_CFI_QUERY:
1493        case FL_JEDEC_QUERY:
1494        case FL_READY:
1495                cfi_write(map, CMD(0x70), adr);
1496                chip->state = FL_STATUS;
1497
1498        case FL_STATUS:
1499                status = cfi_read(map, adr);
1500                if ((status & status_OK) == status_OK)
1501                        break;
1502                
1503                /* Urgh. Chip not yet ready to talk to us. */
1504                if (time_after(jiffies, timeo)) {
1505                        spin_unlock(chip->mutex);
1506                        printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
1507                        return -EIO;
1508                }
1509
1510                /* Latency issues. Drop the lock, wait a while and retry */
1511                spin_unlock(chip->mutex);
1512                cfi_udelay(1);
1513                goto retry;
1514
1515        default:
1516                /* Stick ourselves on a wait queue to be woken when
1517                   someone changes the status */
1518                set_current_state(TASK_UNINTERRUPTIBLE);
1519                add_wait_queue(&chip->wq, &wait);
1520                spin_unlock(chip->mutex);
1521                schedule();
1522                remove_wait_queue(&chip->wq, &wait);
1523                timeo = jiffies + HZ;
1524                goto retry;
1525        }
1526
1527        ENABLE_VPP(map);
1528        /* Clear the status register first */
1529        cfi_write(map, CMD(0x50), adr);
1530
1531        /* Now erase */
1532        cfi_write(map, CMD(0x20), adr);
1533        cfi_write(map, CMD(0xD0), adr);
1534        chip->state = FL_ERASING;
1535        chip->oldstate = 0;
1536
1537        spin_unlock(chip->mutex);
1538        set_current_state(TASK_UNINTERRUPTIBLE);
1539        schedule_timeout((chip->erase_time*HZ)/(2*1000));
1540        spin_lock(chip->mutex);
1541
1542        /* FIXME. Use a timer to check this, and return immediately. */
1543        /* Once the state machine's known to be working I'll do that */
1544
1545        timeo = jiffies + (HZ*20);
1546        for (;;) {
1547                if (chip->state != FL_ERASING) {
1548                        /* Someone's suspended the erase. Sleep */
1549                        set_current_state(TASK_UNINTERRUPTIBLE);
1550                        add_wait_queue(&chip->wq, &wait);
1551                        spin_unlock(chip->mutex);
1552                        schedule();
1553                        remove_wait_queue(&chip->wq, &wait);
1554                        spin_lock(chip->mutex);
1555                        continue;
1556                }
1557                if (chip->oldstate) {
1558                        /* This erase was suspended and resumed.
1559                           Adjust the timeout */
1560                        timeo = jiffies + (HZ*20); /* FIXME */
1561                        chip->oldstate = 0;
1562                }
1563
1564                status = cfi_read(map, adr);
1565                if ((status & status_OK) == status_OK)
1566                        break;
1567                
1568                /* OK Still waiting */
1569                if (time_after(jiffies, timeo)) {
1570                        cfi_write(map, CMD(0x70), adr);
1571                        chip->state = FL_STATUS;
1572                        printk(KERN_ERR "waiting for erase at %08lx to complete timed out. Xstatus = %llx, status = %llx.\n",
1573                               adr, (__u64)status, (__u64)cfi_read(map, adr));
1574                        /* Clear status bits */
1575                        cfi_write(map, CMD(0x50), adr);
1576                        cfi_write(map, CMD(0x70), adr);
1577                        DISABLE_VPP(map);
1578                        spin_unlock(chip->mutex);
1579                        return -EIO;
1580                }
1581                
1582                /* Latency issues. Drop the lock, wait a while and retry */
1583                spin_unlock(chip->mutex);
1584                set_current_state(TASK_UNINTERRUPTIBLE);
1585                schedule_timeout(1);
1586                spin_lock(chip->mutex);
1587        }
1588        
1589        DISABLE_VPP(map);
1590        ret = 0;
1591
1592        /* We've broken this before. It doesn't hurt to be safe */
1593        cfi_write(map, CMD(0x70), adr);
1594        chip->state = FL_STATUS;
1595        status = cfi_read(map, adr);
1596
1597        /* check for lock bit */
1598        if (status & CMD(0x3a)) {
1599                unsigned char chipstatus = status;
1600                if (status != CMD(status & 0xff)) {
1601                        int i;
1602                        for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
1603                                      chipstatus |= status >> (cfi->device_type * 8);
1604                        }
1605                        printk(KERN_WARNING "Status is not identical for all chips: 0x%llx. Merging to give 0x%02x\n", (__u64)status, chipstatus);
1606                }
1607                /* Reset the error bits */
1608                cfi_write(map, CMD(0x50), adr);
1609                cfi_write(map, CMD(0x70), adr);
1610                
1611                if ((chipstatus & 0x30) == 0x30) {
1612                        printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%llx\n", (__u64)status);
1613                        ret = -EIO;
1614                } else if (chipstatus & 0x02) {
1615                        /* Protection bit set */
1616                        ret = -EROFS;
1617                } else if (chipstatus & 0x8) {
1618                        /* Voltage */
1619                        printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%llx\n", (__u64)status);
1620                        ret = -EIO;
1621                } else if (chipstatus & 0x20) {
1622                        if (retries--) {
1623                                printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx. Retrying...\n", adr, (__u64)status);
1624                                timeo = jiffies + HZ;
1625                                chip->state = FL_STATUS;
1626                                spin_unlock(chip->mutex);
1627                                goto retry;
1628                        }
1629                        printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%llx\n", adr, (__u64)status);
1630                        ret = -EIO;
1631                }
1632        }
1633
1634        wake_up(&chip->wq);
1635        spin_unlock(chip->mutex);
1636        return ret;
1637}
1638
1639int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1640{
1641        unsigned long ofs, len;
1642        int ret;
1643
1644        ofs = instr->addr;
1645        len = instr->len;
1646
1647        ret = cfi_intelext_varsize_frob(mtd, do_erase_oneblock, ofs, len, 0);
1648        if (ret)
1649                return ret;
1650
1651        instr->state = MTD_ERASE_DONE;
1652        if (instr->callback)
1653                instr->callback(instr);
1654        
1655        return 0;
1656}
1657
1658static void cfi_intelext_sync (struct mtd_info *mtd)
1659{
1660        struct map_info *map = mtd->priv;
1661        struct cfi_private *cfi = map->fldrv_priv;
1662        int i;
1663        struct flchip *chip;
1664        int ret = 0;
1665        DECLARE_WAITQUEUE(wait, current);
1666
1667        for (i=0; !ret && i<cfi->numchips; i++) {
1668                chip = &cfi->chips[i];
1669
1670        retry:
1671                spin_lock(chip->mutex);
1672
1673                switch(chip->state) {
1674                case FL_READY:
1675                case FL_STATUS:
1676                case FL_CFI_QUERY:
1677                case FL_JEDEC_QUERY:
1678                        chip->oldstate = chip->state;
1679                        chip->state = FL_SYNCING;
1680                        /* No need to wake_up() on this state change - 
1681                         * as the whole point is that nobody can do anything
1682                         * with the chip now anyway.
1683                         */
1684                case FL_SYNCING:
1685                        spin_unlock(chip->mutex);
1686                        break;
1687
1688                default:
1689                        /* Not an idle state */
1690                        add_wait_queue(&chip->wq, &wait);
1691                        
1692                        spin_unlock(chip->mutex);
1693                        schedule();
1694                        remove_wait_queue(&chip->wq, &wait);
1695                        
1696                        goto retry;
1697                }
1698        }
1699
1700        /* Unlock the chips again */
1701
1702        for (i--; i >=0; i--) {
1703                chip = &cfi->chips[i];
1704
1705                spin_lock(chip->mutex);
1706                
1707                if (chip->state == FL_SYNCING) {
1708                        chip->state = chip->oldstate;
1709                        wake_up(&chip->wq);
1710                }
1711                spin_unlock(chip->mutex);
1712        }
1713}
1714
1715#ifdef DEBUG_LOCK_BITS
1716static int do_printlockstatus_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1717{
1718        struct cfi_private *cfi = map->fldrv_priv;
1719        int ofs_factor = cfi->interleave * cfi->device_type;
1720
1721        cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1722        printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1723               adr, cfi_read_query(map, adr+(2*ofs_factor)));
1724        cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1725        
1726        return 0;
1727}
1728#endif
1729
1730#define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1731#define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1732
1733static int do_xxlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, void *thunk)
1734{
1735        struct cfi_private *cfi = map->fldrv_priv;
1736        cfi_word status, status_OK;
1737        unsigned long timeo = jiffies + HZ;
1738        DECLARE_WAITQUEUE(wait, current);
1739
1740        adr += chip->start;
1741
1742        /* Let's determine this according to the interleave only once */
1743        status_OK = CMD(0x80);
1744
1745        timeo = jiffies + HZ;
1746retry:
1747        spin_lock(chip->mutex);
1748
1749        /* Check that the chip's ready to talk to us. */
1750        switch (chip->state) {
1751        case FL_CFI_QUERY:
1752        case FL_JEDEC_QUERY:
1753        case FL_READY:
1754                cfi_write(map, CMD(0x70), adr);
1755                chip->state = FL_STATUS;
1756
1757        case FL_STATUS:
1758                status = cfi_read(map, adr);
1759                if ((status & status_OK) == status_OK)
1760                        break;
1761                
1762                /* Urgh. Chip not yet ready to talk to us. */
1763                if (time_after(jiffies, timeo)) {
1764                        spin_unlock(chip->mutex);
1765                        printk(KERN_ERR "%s: waiting for chip to be ready timed out\n", __FUNCTION__);
1766                        return -EIO;
1767                }
1768
1769                /* Latency issues. Drop the lock, wait a while and retry */
1770                spin_unlock(chip->mutex);
1771                cfi_udelay(1);
1772                goto retry;
1773
1774        default:
1775                /* Stick ourselves on a wait queue to be woken when
1776                   someone changes the status */
1777                set_current_state(TASK_UNINTERRUPTIBLE);
1778                add_wait_queue(&chip->wq, &wait);
1779                spin_unlock(chip->mutex);
1780                schedule();
1781                remove_wait_queue(&chip->wq, &wait);
1782                timeo = jiffies + HZ;
1783                goto retry;
1784        }
1785
1786        ENABLE_VPP(map);
1787        cfi_write(map, CMD(0x60), adr);
1788
1789        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1790                cfi_write(map, CMD(0x01), adr);
1791                chip->state = FL_LOCKING;
1792        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1793                cfi_write(map, CMD(0xD0), adr);
1794                chip->state = FL_UNLOCKING;
1795        } else
1796                BUG();
1797
1798        spin_unlock(chip->mutex);
1799        schedule_timeout(HZ);
1800        spin_lock(chip->mutex);
1801
1802        /* FIXME. Use a timer to check this, and return immediately. */
1803        /* Once the state machine's known to be working I'll do that */
1804
1805        timeo = jiffies + (HZ*20);
1806        for (;;) {
1807
1808                status = cfi_read(map, adr);
1809                if ((status & status_OK) == status_OK)
1810                        break;
1811                
1812                /* OK Still waiting */
1813                if (time_after(jiffies, timeo)) {
1814                        cfi_write(map, CMD(0x70), adr);
1815                        chip->state = FL_STATUS;
1816                        printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %llx, status = %llx.\n", (__u64)status, (__u64)cfi_read(map, adr));
1817                        DISABLE_VPP(map);
1818                        spin_unlock(chip->mutex);
1819                        return -EIO;
1820                }
1821                
1822                /* Latency issues. Drop the lock, wait a while and retry */
1823                spin_unlock(chip->mutex);
1824                cfi_udelay(1);
1825                spin_lock(chip->mutex);
1826        }
1827        
1828        /* Done and happy. */
1829        chip->state = FL_STATUS;
1830        DISABLE_VPP(map);
1831        wake_up(&chip->wq);
1832        spin_unlock(chip->mutex);
1833        return 0;
1834}
1835
1836static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1837{
1838        int ret;
1839
1840#ifdef DEBUG_LOCK_BITS
1841        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1842               __FUNCTION__, ofs, len);
1843        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1844                                  ofs, len, 0);
1845#endif
1846
1847        ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock, 
1848                                        ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1849        
1850#ifdef DEBUG_LOCK_BITS
1851        printk(KERN_DEBUG __FUNCTION__
1852               "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1853        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1854                                  ofs, len, 0);
1855#endif
1856
1857        return ret;
1858}
1859
1860static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1861{
1862        int ret;
1863
1864#ifdef DEBUG_LOCK_BITS
1865        printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1866               __FUNCTION__, ofs, len);
1867        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock,
1868                                  ofs, len, 0);
1869#endif
1870
1871        ret = cfi_intelext_varsize_frob(mtd, do_xxlock_oneblock,
1872                                        ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1873        
1874#ifdef DEBUG_LOCK_BITS
1875        printk(KERN_DEBUG "%s: lock status after, ret=%d\n", __FUNCTION__, ret);
1876        cfi_intelext_varsize_frob(mtd, do_printlockstatus_oneblock, 
1877                                  ofs, len, 0);
1878#endif
1879        
1880        return ret;
1881}
1882
1883static int cfi_intelext_suspend(struct mtd_info *mtd)
1884{
1885        struct map_info *map = mtd->priv;
1886        struct cfi_private *cfi = map->fldrv_priv;
1887        int i;
1888        struct flchip *chip;
1889        int ret = 0;
1890
1891        for (i=0; !ret && i<cfi->numchips; i++) {
1892                chip = &cfi->chips[i];
1893
1894                spin_lock(chip->mutex);
1895
1896                switch(chip->state) {
1897                case FL_READY:
1898                case FL_STATUS:
1899                case FL_CFI_QUERY:
1900                case FL_JEDEC_QUERY:
1901                        chip->oldstate = chip->state;
1902                        chip->state = FL_PM_SUSPENDED;
1903                        /* No need to wake_up() on this state change - 
1904                         * as the whole point is that nobody can do anything
1905                         * with the chip now anyway.
1906                         */
1907                case FL_PM_SUSPENDED:
1908                        break;
1909
1910                default:
1911                        ret = -EAGAIN;
1912                        break;
1913                }
1914                spin_unlock(chip->mutex);
1915        }
1916
1917        /* Unlock the chips again */
1918
1919        if (ret) {
1920                for (i--; i >=0; i--) {
1921                        chip = &cfi->chips[i];
1922                        
1923                        spin_lock(chip->mutex);
1924                        
1925                        if (chip->state == FL_PM_SUSPENDED) {
1926                                /* No need to force it into a known state here,
1927                                   because we're returning failure, and it didn't
1928                                   get power cycled */
1929                                chip->state = chip->oldstate;
1930                                wake_up(&chip->wq);
1931                        }
1932                        spin_unlock(chip->mutex);
1933                }
1934        } 
1935        
1936        return ret;
1937}
1938
1939static void cfi_intelext_resume(struct mtd_info *mtd)
1940{
1941        struct map_info *map = mtd->priv;
1942        struct cfi_private *cfi = map->fldrv_priv;
1943        int i;
1944        struct flchip *chip;
1945
1946        for (i=0; i<cfi->numchips; i++) {
1947        
1948                chip = &cfi->chips[i];
1949
1950                spin_lock(chip->mutex);
1951                
1952                /* Go to known state. Chip may have been power cycled */
1953                if (chip->state == FL_PM_SUSPENDED) {
1954                        cfi_write(map, CMD(0xFF), 0);
1955                        chip->state = FL_READY;
1956                        wake_up(&chip->wq);
1957                }
1958
1959                spin_unlock(chip->mutex);
1960        }
1961}
1962
1963static void cfi_intelext_destroy(struct mtd_info *mtd)
1964{
1965        struct map_info *map = mtd->priv;
1966        struct cfi_private *cfi = map->fldrv_priv;
1967        kfree(cfi->cmdset_priv);
1968        kfree(cfi->cfiq);
1969        kfree(cfi);
1970        kfree(mtd->eraseregions);
1971}
1972
1973static char im_name_1[]="cfi_cmdset_0001";
1974static char im_name_3[]="cfi_cmdset_0003";
1975
1976int __init cfi_intelext_init(void)
1977{
1978        inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
1979        inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
1980        return 0;
1981}
1982
1983static void __exit cfi_intelext_exit(void)
1984{
1985        inter_module_unregister(im_name_1);
1986        inter_module_unregister(im_name_3);
1987}
1988
1989module_init(cfi_intelext_init);
1990module_exit(cfi_intelext_exit);
1991
1992MODULE_LICENSE("GPL");
1993MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
1994MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
1995