linux-old/drivers/mtd/chips/cfi_cmdset_0020.c
<<
>>
Prefs
   1/*
   2 * Common Flash Interface support:
   3 *   ST Advanced Architecture Command Set (ID 0x0020)
   4 *
   5 * (C) 2000 Red Hat. GPL'd
   6 *
   7 * 
   8 * 10/10/2000   Nicolas Pitre <nico@cam.org>
   9 *      - completely revamped method functions so they are aware and
  10 *        independent of the flash geometry (buswidth, interleave, etc.)
  11 *      - scalability vs code size is completely set at compile-time
  12 *        (see include/linux/mtd/cfi.h for selection)
  13 *      - optimized write buffer method
  14 * 06/21/2002   Joern Engel <joern@wh.fh-wedel.de> and others
  15 *      - modified Intel Command Set 0x0001 to support ST Advanced Architecture
  16 *        (command set 0x0020)
  17 *      - added a writev function
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/kernel.h>
  23#include <linux/sched.h>
  24#include <asm/io.h>
  25#include <asm/byteorder.h>
  26
  27#include <linux/errno.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/interrupt.h>
  31#include <linux/mtd/map.h>
  32#include <linux/mtd/cfi.h>
  33#include <linux/mtd/compatmac.h>
  34
  35
  36static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
  37static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
  38static int cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
  39                unsigned long count, loff_t to, size_t *retlen);
  40static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
  41static void cfi_staa_sync (struct mtd_info *);
  42static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
  43static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
  44static int cfi_staa_suspend (struct mtd_info *);
  45static void cfi_staa_resume (struct mtd_info *);
  46
  47static void cfi_staa_destroy(struct mtd_info *);
  48
  49struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
  50
  51static struct mtd_info *cfi_staa_setup (struct map_info *);
  52
  53static struct mtd_chip_driver cfi_staa_chipdrv = {
  54        probe: NULL, /* Not usable directly */
  55        destroy: cfi_staa_destroy,
  56        name: "cfi_cmdset_0020",
  57        module: THIS_MODULE
  58};
  59
  60/* #define DEBUG_LOCK_BITS */
  61//#define DEBUG_CFI_FEATURES
  62
  63#ifdef DEBUG_CFI_FEATURES
  64static void cfi_tell_features(struct cfi_pri_intelext *extp)
  65{
  66        int i;
  67        printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
  68        printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
  69        printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
  70        printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
  71        printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
  72        printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
  73        printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
  74        printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
  75        printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
  76        printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
  77        for (i=9; i<32; i++) {
  78                if (extp->FeatureSupport & (1<<i)) 
  79                        printk("     - Unknown Bit %X:      supported\n", i);
  80        }
  81        
  82        printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
  83        printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
  84        for (i=1; i<8; i++) {
  85                if (extp->SuspendCmdSupport & (1<<i))
  86                        printk("     - Unknown Bit %X:               supported\n", i);
  87        }
  88        
  89        printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
  90        printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
  91        printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
  92        for (i=2; i<16; i++) {
  93                if (extp->BlkStatusRegMask & (1<<i))
  94                        printk("     - Unknown Bit %X Active: yes\n",i);
  95        }
  96        
  97        printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
  98               extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
  99        if (extp->VppOptimal)
 100                printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
 101                       extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
 102}
 103#endif
 104
 105/* This routine is made available to other mtd code via
 106 * inter_module_register.  It must only be accessed through
 107 * inter_module_get which will bump the use count of this module.  The
 108 * addresses passed back in cfi are valid as long as the use count of
 109 * this module is non-zero, i.e. between inter_module_get and
 110 * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
 111 */
 112struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
 113{
 114        struct cfi_private *cfi = map->fldrv_priv;
 115        int i;
 116        __u32 base = cfi->chips[0].start;
 117
 118        if (cfi->cfi_mode) {
 119                /* 
 120                 * It's a real CFI chip, not one for which the probe
 121                 * routine faked a CFI structure. So we read the feature
 122                 * table from it.
 123                 */
 124                __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
 125                struct cfi_pri_intelext *extp;
 126                int ofs_factor = cfi->interleave * cfi->device_type;
 127
 128                printk(" ST Microelectronics Extended Query Table at 0x%4.4X\n", adr);
 129                if (!adr)
 130                        return NULL;
 131
 132                /* Switch it into Query Mode */
 133                cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL);
 134
 135                extp = kmalloc(sizeof(*extp), GFP_KERNEL);
 136                if (!extp) {
 137                        printk(KERN_ERR "Failed to allocate memory\n");
 138                        return NULL;
 139                }
 140                
 141                /* Read in the Extended Query Table */
 142                for (i=0; i<sizeof(*extp); i++) {
 143                        ((unsigned char *)extp)[i] = 
 144                                cfi_read_query(map, (base+((adr+i)*ofs_factor)));
 145                }
 146                
 147                if (extp->MajorVersion != '1' || 
 148                    (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
 149                    printk(KERN_WARNING "  Unknown staa Extended Query "
 150                           "version %c.%c.\n",  extp->MajorVersion,
 151                           extp->MinorVersion);
 152                    kfree(extp);
 153                    return NULL;
 154                }
 155                
 156                /* Do some byteswapping if necessary */
 157                extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
 158                extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
 159                
 160#ifdef DEBUG_CFI_FEATURES
 161                /* Tell the user about it in lots of lovely detail */
 162                cfi_tell_features(extp);
 163#endif  
 164
 165                /* Install our own private info structure */
 166                cfi->cmdset_priv = extp;
 167        }       
 168
 169        for (i=0; i< cfi->numchips; i++) {
 170                cfi->chips[i].word_write_time = 128;
 171                cfi->chips[i].buffer_write_time = 128;
 172                cfi->chips[i].erase_time = 1024;
 173        }               
 174
 175        map->fldrv = &cfi_staa_chipdrv;
 176        MOD_INC_USE_COUNT;
 177        
 178        /* Make sure it's in read mode */
 179        cfi_send_gen_cmd(0xff, 0x55, base, map, cfi, cfi->device_type, NULL);
 180        return cfi_staa_setup(map);
 181}
 182
 183static struct mtd_info *cfi_staa_setup(struct map_info *map)
 184{
 185        struct cfi_private *cfi = map->fldrv_priv;
 186        struct mtd_info *mtd;
 187        unsigned long offset = 0;
 188        int i,j;
 189        unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
 190
 191        mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
 192        //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
 193
 194        if (!mtd) {
 195                printk(KERN_ERR "Failed to allocate memory for MTD device\n");
 196                kfree(cfi->cmdset_priv);
 197                return NULL;
 198        }
 199
 200        memset(mtd, 0, sizeof(*mtd));
 201        mtd->priv = map;
 202        mtd->type = MTD_NORFLASH;
 203        mtd->size = devsize * cfi->numchips;
 204
 205        mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
 206        mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
 207                        * mtd->numeraseregions, GFP_KERNEL);
 208        if (!mtd->eraseregions) { 
 209                printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
 210                kfree(cfi->cmdset_priv);
 211                return NULL;
 212        }
 213        
 214        for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
 215                unsigned long ernum, ersize;
 216                ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
 217                ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
 218
 219                if (mtd->erasesize < ersize) {
 220                        mtd->erasesize = ersize;
 221                }
 222                for (j=0; j<cfi->numchips; j++) {
 223                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
 224                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
 225                        mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
 226                }
 227                offset += (ersize * ernum);
 228                }
 229
 230                if (offset != devsize) {
 231                        /* Argh */
 232                        printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
 233                        kfree(mtd->eraseregions);
 234                        kfree(cfi->cmdset_priv);
 235                        return NULL;
 236                }
 237
 238                for (i=0; i<mtd->numeraseregions;i++){
 239                        printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
 240                               i,mtd->eraseregions[i].offset,
 241                               mtd->eraseregions[i].erasesize,
 242                               mtd->eraseregions[i].numblocks);
 243                }
 244
 245        /* Also select the correct geometry setup too */ 
 246        mtd->erase = cfi_staa_erase_varsize;
 247        mtd->read = cfi_staa_read;
 248        mtd->write = cfi_staa_write_buffers;
 249        mtd->writev = cfi_staa_writev;
 250        mtd->sync = cfi_staa_sync;
 251        mtd->lock = cfi_staa_lock;
 252        mtd->unlock = cfi_staa_unlock;
 253        mtd->suspend = cfi_staa_suspend;
 254        mtd->resume = cfi_staa_resume;
 255        mtd->flags = MTD_CAP_NORFLASH;
 256        mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */
 257        mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
 258        map->fldrv = &cfi_staa_chipdrv;
 259        MOD_INC_USE_COUNT;
 260        mtd->name = map->name;
 261        return mtd;
 262}
 263
 264
 265static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
 266{
 267        __u32 status, status_OK;
 268        unsigned long timeo;
 269        DECLARE_WAITQUEUE(wait, current);
 270        int suspended = 0;
 271        unsigned long cmd_addr;
 272        struct cfi_private *cfi = map->fldrv_priv;
 273
 274        adr += chip->start;
 275
 276        /* Ensure cmd read/writes are aligned. */ 
 277        cmd_addr = adr & ~(CFIDEV_BUSWIDTH-1); 
 278
 279        /* Let's determine this according to the interleave only once */
 280        status_OK = CMD(0x80);
 281
 282        timeo = jiffies + HZ;
 283 retry:
 284        spin_lock_bh(chip->mutex);
 285
 286        /* Check that the chip's ready to talk to us.
 287         * If it's in FL_ERASING state, suspend it and make it talk now.
 288         */
 289        switch (chip->state) {
 290        case FL_ERASING:
 291                if (!((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2)
 292                        goto sleep; /* We don't support erase suspend */
 293                
 294                cfi_write (map, CMD(0xb0), cmd_addr);
 295                /* If the flash has finished erasing, then 'erase suspend'
 296                 * appears to make some (28F320) flash devices switch to
 297                 * 'read' mode.  Make sure that we switch to 'read status'
 298                 * mode so we get the right data. --rmk
 299                 */
 300                cfi_write(map, CMD(0x70), cmd_addr);
 301                chip->oldstate = FL_ERASING;
 302                chip->state = FL_ERASE_SUSPENDING;
 303                //              printk("Erase suspending at 0x%lx\n", cmd_addr);
 304                for (;;) {
 305                        status = cfi_read(map, cmd_addr);
 306                        if ((status & status_OK) == status_OK)
 307                                break;
 308                        
 309                        if (time_after(jiffies, timeo)) {
 310                                /* Urgh */
 311                                cfi_write(map, CMD(0xd0), cmd_addr);
 312                                /* make sure we're in 'read status' mode */
 313                                cfi_write(map, CMD(0x70), cmd_addr);
 314                                chip->state = FL_ERASING;
 315                                spin_unlock_bh(chip->mutex);
 316                                printk(KERN_ERR "Chip not ready after erase "
 317                                       "suspended: status = 0x%x\n", status);
 318                                return -EIO;
 319                        }
 320                        
 321                        spin_unlock_bh(chip->mutex);
 322                        cfi_udelay(1);
 323                        spin_lock_bh(chip->mutex);
 324                }
 325                
 326                suspended = 1;
 327                cfi_write(map, CMD(0xff), cmd_addr);
 328                chip->state = FL_READY;
 329                break;
 330        
 331#if 0
 332        case FL_WRITING:
 333                /* Not quite yet */
 334#endif
 335
 336        case FL_READY:
 337                break;
 338
 339        case FL_CFI_QUERY:
 340        case FL_JEDEC_QUERY:
 341                cfi_write(map, CMD(0x70), cmd_addr);
 342                chip->state = FL_STATUS;
 343
 344        case FL_STATUS:
 345                status = cfi_read(map, cmd_addr);
 346                if ((status & status_OK) == status_OK) {
 347                        cfi_write(map, CMD(0xff), cmd_addr);
 348                        chip->state = FL_READY;
 349                        break;
 350                }
 351                
 352                /* Urgh. Chip not yet ready to talk to us. */
 353                if (time_after(jiffies, timeo)) {
 354                        spin_unlock_bh(chip->mutex);
 355                        printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %x\n", status);
 356                        return -EIO;
 357                }
 358
 359                /* Latency issues. Drop the lock, wait a while and retry */
 360                spin_unlock_bh(chip->mutex);
 361                cfi_udelay(1);
 362                goto retry;
 363
 364        default:
 365        sleep:
 366                /* Stick ourselves on a wait queue to be woken when
 367                   someone changes the status */
 368                set_current_state(TASK_UNINTERRUPTIBLE);
 369                add_wait_queue(&chip->wq, &wait);
 370                spin_unlock_bh(chip->mutex);
 371                schedule();
 372                remove_wait_queue(&chip->wq, &wait);
 373                timeo = jiffies + HZ;
 374                goto retry;
 375        }
 376
 377        map->copy_from(map, buf, adr, len);
 378
 379        if (suspended) {
 380                chip->state = chip->oldstate;
 381                /* What if one interleaved chip has finished and the 
 382                   other hasn't? The old code would leave the finished
 383                   one in READY mode. That's bad, and caused -EROFS 
 384                   errors to be returned from do_erase_oneblock because
 385                   that's the only bit it checked for at the time.
 386                   As the state machine appears to explicitly allow 
 387                   sending the 0x70 (Read Status) command to an erasing
 388                   chip and expecting it to be ignored, that's what we 
 389                   do. */
 390                cfi_write(map, CMD(0xd0), cmd_addr);
 391                cfi_write(map, CMD(0x70), cmd_addr);            
 392        }
 393
 394        wake_up(&chip->wq);
 395        spin_unlock_bh(chip->mutex);
 396        return 0;
 397}
 398
 399static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
 400{
 401        struct map_info *map = mtd->priv;
 402        struct cfi_private *cfi = map->fldrv_priv;
 403        unsigned long ofs;
 404        int chipnum;
 405        int ret = 0;
 406
 407        /* ofs: offset within the first chip that the first read should start */
 408        chipnum = (from >> cfi->chipshift);
 409        ofs = from - (chipnum <<  cfi->chipshift);
 410
 411        *retlen = 0;
 412
 413        while (len) {
 414                unsigned long thislen;
 415
 416                if (chipnum >= cfi->numchips)
 417                        break;
 418
 419                if ((len + ofs -1) >> cfi->chipshift)
 420                        thislen = (1<<cfi->chipshift) - ofs;
 421                else
 422                        thislen = len;
 423
 424                ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
 425                if (ret)
 426                        break;
 427
 428                *retlen += thislen;
 429                len -= thislen;
 430                buf += thislen;
 431                
 432                ofs = 0;
 433                chipnum++;
 434        }
 435        return ret;
 436}
 437
 438static inline int do_write_buffer(struct map_info *map, struct flchip *chip, 
 439                                  unsigned long adr, const u_char *buf, int len)
 440{
 441        struct cfi_private *cfi = map->fldrv_priv;
 442        __u32 status, status_OK;
 443        unsigned long cmd_adr, timeo;
 444        DECLARE_WAITQUEUE(wait, current);
 445        int wbufsize, z;
 446        
 447        /* M58LW064A requires bus alignment for buffer wriets -- saw */
 448        if (adr & (CFIDEV_BUSWIDTH-1))
 449            return -EINVAL;
 450
 451        wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
 452        adr += chip->start;
 453        cmd_adr = adr & ~(wbufsize-1);
 454        
 455        /* Let's determine this according to the interleave only once */
 456        status_OK = CMD(0x80);
 457        
 458        timeo = jiffies + HZ;
 459 retry:
 460
 461#ifdef DEBUG_CFI_FEATURES
 462       printk("%s: chip->state[%d]\n", __FUNCTION__, chip->state);
 463#endif
 464        spin_lock_bh(chip->mutex);
 465 
 466        /* Check that the chip's ready to talk to us.
 467         * Later, we can actually think about interrupting it
 468         * if it's in FL_ERASING state.
 469         * Not just yet, though.
 470         */
 471        switch (chip->state) {
 472        case FL_READY:
 473                break;
 474                
 475        case FL_CFI_QUERY:
 476        case FL_JEDEC_QUERY:
 477                cfi_write(map, CMD(0x70), cmd_adr);
 478                chip->state = FL_STATUS;
 479#ifdef DEBUG_CFI_FEATURES
 480        printk("%s: 1 status[%x]\n", __FUNCTION__, cfi_read(map, cmd_adr));
 481#endif
 482
 483        case FL_STATUS:
 484                status = cfi_read(map, cmd_adr);
 485                if ((status & status_OK) == status_OK)
 486                        break;
 487                /* Urgh. Chip not yet ready to talk to us. */
 488                if (time_after(jiffies, timeo)) {
 489                        spin_unlock_bh(chip->mutex);
 490                        printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %x, status = %x\n",
 491                               status, cfi_read(map, cmd_adr));
 492                        return -EIO;
 493                }
 494
 495                /* Latency issues. Drop the lock, wait a while and retry */
 496                spin_unlock_bh(chip->mutex);
 497                cfi_udelay(1);
 498                goto retry;
 499
 500        default:
 501                /* Stick ourselves on a wait queue to be woken when
 502                   someone changes the status */
 503                set_current_state(TASK_UNINTERRUPTIBLE);
 504                add_wait_queue(&chip->wq, &wait);
 505                spin_unlock_bh(chip->mutex);
 506                schedule();
 507                remove_wait_queue(&chip->wq, &wait);
 508                timeo = jiffies + HZ;
 509                goto retry;
 510        }
 511
 512        ENABLE_VPP(map);
 513        cfi_write(map, CMD(0xe8), cmd_adr);
 514        chip->state = FL_WRITING_TO_BUFFER;
 515
 516        z = 0;
 517        for (;;) {
 518                status = cfi_read(map, cmd_adr);
 519                if ((status & status_OK) == status_OK)
 520                        break;
 521
 522                spin_unlock_bh(chip->mutex);
 523                cfi_udelay(1);
 524                spin_lock_bh(chip->mutex);
 525
 526                if (++z > 100) {
 527                        /* Argh. Not ready for write to buffer */
 528                        DISABLE_VPP(map);
 529                        cfi_write(map, CMD(0x70), cmd_adr);
 530                        chip->state = FL_STATUS;
 531                        spin_unlock_bh(chip->mutex);
 532                        printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %x\n", status);
 533                        return -EIO;
 534                }
 535        }
 536
 537        /* Write length of data to come */
 538        cfi_write(map, CMD(len/CFIDEV_BUSWIDTH-1), cmd_adr );
 539        
 540        /* Write data */
 541        for (z = 0; z < len; z += CFIDEV_BUSWIDTH) {
 542                if (cfi_buswidth_is_1()) {
 543                        map->write8 (map, *(__u8*)buf, adr+z);
 544                        buf += sizeof(__u8);
 545                } else if (cfi_buswidth_is_2()) {
 546                        map->write16 (map, *(__u16*)buf, adr+z);
 547                        buf += sizeof(__u16);
 548                } else if (cfi_buswidth_is_4()) {
 549                        map->write32 (map, *(__u32*)buf, adr+z);
 550                        buf += sizeof(__u32);
 551                } else {
 552                        DISABLE_VPP(map);
 553                        return -EINVAL;
 554                }
 555        }
 556        /* GO GO GO */
 557        cfi_write(map, CMD(0xd0), cmd_adr);
 558        chip->state = FL_WRITING;
 559
 560        spin_unlock_bh(chip->mutex);
 561        cfi_udelay(chip->buffer_write_time);
 562        spin_lock_bh(chip->mutex);
 563
 564        timeo = jiffies + (HZ/2);
 565        z = 0;
 566        for (;;) {
 567                if (chip->state != FL_WRITING) {
 568                        /* Someone's suspended the write. Sleep */
 569                        set_current_state(TASK_UNINTERRUPTIBLE);
 570                        add_wait_queue(&chip->wq, &wait);
 571                        spin_unlock_bh(chip->mutex);
 572                        schedule();
 573                        remove_wait_queue(&chip->wq, &wait);
 574                        timeo = jiffies + (HZ / 2); /* FIXME */
 575                        spin_lock_bh(chip->mutex);
 576                        continue;
 577                }
 578
 579                status = cfi_read(map, cmd_adr);
 580                if ((status & status_OK) == status_OK)
 581                        break;
 582
 583                /* OK Still waiting */
 584                if (time_after(jiffies, timeo)) {
 585                        /* clear status */
 586                        cfi_write(map, CMD(0x50), cmd_adr);
 587                        /* put back into read status register mode */
 588                        cfi_write(map, CMD(0x70), adr);
 589                        chip->state = FL_STATUS;
 590                        DISABLE_VPP(map);
 591                        spin_unlock_bh(chip->mutex);
 592                        printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
 593                        return -EIO;
 594                }
 595                
 596                /* Latency issues. Drop the lock, wait a while and retry */
 597                spin_unlock_bh(chip->mutex);
 598                cfi_udelay(1);
 599                z++;
 600                spin_lock_bh(chip->mutex);
 601        }
 602        if (!z) {
 603                chip->buffer_write_time--;
 604                if (!chip->buffer_write_time)
 605                        chip->buffer_write_time++;
 606        }
 607        if (z > 1) 
 608                chip->buffer_write_time++;
 609        
 610        /* Done and happy. */
 611        DISABLE_VPP(map);
 612        chip->state = FL_STATUS;
 613
 614        /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
 615        if ((status & CMD(0x02)) || (status & CMD(0x08)) ||
 616            (status & CMD(0x10)) || (status & CMD(0x20))) {
 617#ifdef DEBUG_CFI_FEATURES
 618            printk("%s: 2 status[%x]\n", __FUNCTION__, status);
 619#endif
 620            /* clear status */
 621            cfi_write(map, CMD(0x50), cmd_adr);
 622            /* put back into read status register mode */
 623            cfi_write(map, CMD(0x70), adr);
 624            wake_up(&chip->wq);
 625            spin_unlock_bh(chip->mutex);
 626            return (status & CMD(0x02)) ? -EROFS : -EIO;
 627        }
 628        wake_up(&chip->wq);
 629        spin_unlock_bh(chip->mutex);
 630
 631        return 0;
 632}
 633
 634static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, 
 635                                       size_t len, size_t *retlen, const u_char *buf)
 636{
 637        struct map_info *map = mtd->priv;
 638        struct cfi_private *cfi = map->fldrv_priv;
 639        int wbufsize = CFIDEV_INTERLEAVE << cfi->cfiq->MaxBufWriteSize;
 640        int ret = 0;
 641        int chipnum;
 642        unsigned long ofs;
 643
 644        *retlen = 0;
 645        if (!len)
 646                return 0;
 647
 648        chipnum = to >> cfi->chipshift;
 649        ofs = to  - (chipnum << cfi->chipshift);
 650
 651#ifdef DEBUG_CFI_FEATURES
 652        printk("%s: CFIDEV_BUSWIDTH[%x]\n", __FUNCTION__, CFIDEV_BUSWIDTH);
 653        printk("%s: chipnum[%x] wbufsize[%x]\n", __FUNCTION__, chipnum, wbufsize);
 654        printk("%s: ofs[%x] len[%x]\n", __FUNCTION__, ofs, len);
 655#endif
 656        
 657        /* Write buffer is worth it only if more than one word to write... */
 658        while (len > 0) {
 659                /* We must not cross write block boundaries */
 660                int size = wbufsize - (ofs & (wbufsize-1));
 661
 662                if (size > len)
 663                    size = len;
 664
 665                ret = do_write_buffer(map, &cfi->chips[chipnum], 
 666                                      ofs, buf, size);
 667                if (ret)
 668                        return ret;
 669
 670                ofs += size;
 671                buf += size;
 672                (*retlen) += size;
 673                len -= size;
 674
 675                if (ofs >> cfi->chipshift) {
 676                        chipnum ++; 
 677                        ofs = 0;
 678                        if (chipnum == cfi->numchips)
 679                                return 0;
 680                }
 681        }
 682        
 683        return 0;
 684}
 685
 686/*
 687 * Writev for ECC-Flashes is a little more complicated. We need to maintain
 688 * a small buffer for this.
 689 * XXX: If the buffer size is not a multiple of 2, this will break
 690 */
 691#define ECCBUF_SIZE (mtd->eccsize)
 692#define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
 693#define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
 694static int
 695cfi_staa_writev(struct mtd_info *mtd, const struct iovec *vecs,
 696                unsigned long count, loff_t to, size_t *retlen)
 697{
 698        unsigned long i;
 699        size_t   totlen = 0, thislen;
 700        int      ret = 0;
 701        size_t   buflen = 0;
 702        static char *buffer;
 703
 704        if (!ECCBUF_SIZE) {
 705                /* We should fall back to a general writev implementation.
 706                 * Until that is written, just break.
 707                 */
 708                return -EIO;
 709        }
 710        buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
 711        if (!buffer)
 712                return -ENOMEM;
 713
 714        for (i=0; i<count; i++) {
 715                size_t elem_len = vecs[i].iov_len;
 716                void *elem_base = vecs[i].iov_base;
 717                if (!elem_len) /* FIXME: Might be unnecessary. Check that */
 718                        continue;
 719                if (buflen) { /* cut off head */
 720                        if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
 721                                memcpy(buffer+buflen, elem_base, elem_len);
 722                                buflen += elem_len;
 723                                continue;
 724                        }
 725                        memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
 726                        ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
 727                        totlen += thislen;
 728                        if (ret || thislen != ECCBUF_SIZE)
 729                                goto write_error;
 730                        elem_len -= thislen-buflen;
 731                        elem_base += thislen-buflen;
 732                        to += ECCBUF_SIZE;
 733                }
 734                if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
 735                        ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
 736                        totlen += thislen;
 737                        if (ret || thislen != ECCBUF_DIV(elem_len))
 738                                goto write_error;
 739                        to += thislen;
 740                }
 741                buflen = ECCBUF_MOD(elem_len); /* cut off tail */
 742                if (buflen) {
 743                        memset(buffer, 0xff, ECCBUF_SIZE);
 744                        memcpy(buffer, elem_base + thislen, buflen);
 745                }
 746        }
 747        if (buflen) { /* flush last page, even if not full */
 748                /* This is sometimes intended behaviour, really */
 749                ret = mtd->write(mtd, to, buflen, &thislen, buffer);
 750                totlen += thislen;
 751                if (ret || thislen != ECCBUF_SIZE)
 752                        goto write_error;
 753        }
 754write_error:
 755        if (retlen)
 756                *retlen = totlen;
 757        return ret;
 758}
 759
 760
 761static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
 762{
 763        struct cfi_private *cfi = map->fldrv_priv;
 764        __u32 status, status_OK;
 765        unsigned long timeo;
 766        int retries = 3;
 767        DECLARE_WAITQUEUE(wait, current);
 768        int ret = 0;
 769
 770        adr += chip->start;
 771
 772        /* Let's determine this according to the interleave only once */
 773        status_OK = CMD(0x80);
 774
 775        timeo = jiffies + HZ;
 776retry:
 777        spin_lock_bh(chip->mutex);
 778
 779        /* Check that the chip's ready to talk to us. */
 780        switch (chip->state) {
 781        case FL_CFI_QUERY:
 782        case FL_JEDEC_QUERY:
 783        case FL_READY:
 784                cfi_write(map, CMD(0x70), adr);
 785                chip->state = FL_STATUS;
 786
 787        case FL_STATUS:
 788                status = cfi_read(map, adr);
 789                if ((status & status_OK) == status_OK)
 790                        break;
 791                
 792                /* Urgh. Chip not yet ready to talk to us. */
 793                if (time_after(jiffies, timeo)) {
 794                        spin_unlock_bh(chip->mutex);
 795                        printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
 796                        return -EIO;
 797                }
 798
 799                /* Latency issues. Drop the lock, wait a while and retry */
 800                spin_unlock_bh(chip->mutex);
 801                cfi_udelay(1);
 802                goto retry;
 803
 804        default:
 805                /* Stick ourselves on a wait queue to be woken when
 806                   someone changes the status */
 807                set_current_state(TASK_UNINTERRUPTIBLE);
 808                add_wait_queue(&chip->wq, &wait);
 809                spin_unlock_bh(chip->mutex);
 810                schedule();
 811                remove_wait_queue(&chip->wq, &wait);
 812                timeo = jiffies + HZ;
 813                goto retry;
 814        }
 815
 816        ENABLE_VPP(map);
 817        /* Clear the status register first */
 818        cfi_write(map, CMD(0x50), adr);
 819
 820        /* Now erase */
 821        cfi_write(map, CMD(0x20), adr);
 822        cfi_write(map, CMD(0xD0), adr);
 823        chip->state = FL_ERASING;
 824        
 825        spin_unlock_bh(chip->mutex);
 826        schedule_timeout(HZ);
 827        spin_lock_bh(chip->mutex);
 828
 829        /* FIXME. Use a timer to check this, and return immediately. */
 830        /* Once the state machine's known to be working I'll do that */
 831
 832        timeo = jiffies + (HZ*20);
 833        for (;;) {
 834                if (chip->state != FL_ERASING) {
 835                        /* Someone's suspended the erase. Sleep */
 836                        set_current_state(TASK_UNINTERRUPTIBLE);
 837                        add_wait_queue(&chip->wq, &wait);
 838                        spin_unlock_bh(chip->mutex);
 839                        schedule();
 840                        remove_wait_queue(&chip->wq, &wait);
 841                        timeo = jiffies + (HZ*20); /* FIXME */
 842                        spin_lock_bh(chip->mutex);
 843                        continue;
 844                }
 845
 846                status = cfi_read(map, adr);
 847                if ((status & status_OK) == status_OK)
 848                        break;
 849                
 850                /* OK Still waiting */
 851                if (time_after(jiffies, timeo)) {
 852                        cfi_write(map, CMD(0x70), adr);
 853                        chip->state = FL_STATUS;
 854                        printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
 855                        DISABLE_VPP(map);
 856                        spin_unlock_bh(chip->mutex);
 857                        return -EIO;
 858                }
 859                
 860                /* Latency issues. Drop the lock, wait a while and retry */
 861                spin_unlock_bh(chip->mutex);
 862                cfi_udelay(1);
 863                spin_lock_bh(chip->mutex);
 864        }
 865        
 866        DISABLE_VPP(map);
 867        ret = 0;
 868
 869        /* We've broken this before. It doesn't hurt to be safe */
 870        cfi_write(map, CMD(0x70), adr);
 871        chip->state = FL_STATUS;
 872        status = cfi_read(map, adr);
 873
 874        /* check for lock bit */
 875        if (status & CMD(0x3a)) {
 876                unsigned char chipstatus = status;
 877                if (status != CMD(status & 0xff)) {
 878                        int i;
 879                        for (i = 1; i<CFIDEV_INTERLEAVE; i++) {
 880                                      chipstatus |= status >> (cfi->device_type * 8);
 881                        }
 882                        printk(KERN_WARNING "Status is not identical for all chips: 0x%x. Merging to give 0x%02x\n", status, chipstatus);
 883                }
 884                /* Reset the error bits */
 885                cfi_write(map, CMD(0x50), adr);
 886                cfi_write(map, CMD(0x70), adr);
 887                
 888                if ((chipstatus & 0x30) == 0x30) {
 889                        printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", status);
 890                        ret = -EIO;
 891                } else if (chipstatus & 0x02) {
 892                        /* Protection bit set */
 893                        ret = -EROFS;
 894                } else if (chipstatus & 0x8) {
 895                        /* Voltage */
 896                        printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", status);
 897                        ret = -EIO;
 898                } else if (chipstatus & 0x20) {
 899                        if (retries--) {
 900                                printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, status);
 901                                timeo = jiffies + HZ;
 902                                chip->state = FL_STATUS;
 903                                spin_unlock_bh(chip->mutex);
 904                                goto retry;
 905                        }
 906                        printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, status);
 907                        ret = -EIO;
 908                }
 909        }
 910
 911        wake_up(&chip->wq);
 912        spin_unlock_bh(chip->mutex);
 913        return ret;
 914}
 915
 916int cfi_staa_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
 917{       struct map_info *map = mtd->priv;
 918        struct cfi_private *cfi = map->fldrv_priv;
 919        unsigned long adr, len;
 920        int chipnum, ret = 0;
 921        int i, first;
 922        struct mtd_erase_region_info *regions = mtd->eraseregions;
 923
 924        if (instr->addr > mtd->size)
 925                return -EINVAL;
 926
 927        if ((instr->len + instr->addr) > mtd->size)
 928                return -EINVAL;
 929
 930        /* Check that both start and end of the requested erase are
 931         * aligned with the erasesize at the appropriate addresses.
 932         */
 933
 934        i = 0;
 935
 936        /* Skip all erase regions which are ended before the start of 
 937           the requested erase. Actually, to save on the calculations,
 938           we skip to the first erase region which starts after the
 939           start of the requested erase, and then go back one.
 940        */
 941        
 942        while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
 943               i++;
 944        i--;
 945
 946        /* OK, now i is pointing at the erase region in which this 
 947           erase request starts. Check the start of the requested
 948           erase range is aligned with the erase size which is in
 949           effect here.
 950        */
 951
 952        if (instr->addr & (regions[i].erasesize-1))
 953                return -EINVAL;
 954
 955        /* Remember the erase region we start on */
 956        first = i;
 957
 958        /* Next, check that the end of the requested erase is aligned
 959         * with the erase region at that address.
 960         */
 961
 962        while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
 963                i++;
 964
 965        /* As before, drop back one to point at the region in which
 966           the address actually falls
 967        */
 968        i--;
 969        
 970        if ((instr->addr + instr->len) & (regions[i].erasesize-1))
 971                return -EINVAL;
 972
 973        chipnum = instr->addr >> cfi->chipshift;
 974        adr = instr->addr - (chipnum << cfi->chipshift);
 975        len = instr->len;
 976
 977        i=first;
 978
 979        while(len) {
 980                ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
 981                
 982                if (ret)
 983                        return ret;
 984
 985                adr += regions[i].erasesize;
 986                len -= regions[i].erasesize;
 987
 988                if (adr % (1<< cfi->chipshift) == ((regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
 989                        i++;
 990
 991                if (adr >> cfi->chipshift) {
 992                        adr = 0;
 993                        chipnum++;
 994                        
 995                        if (chipnum >= cfi->numchips)
 996                        break;
 997                }
 998        }
 999                
1000        instr->state = MTD_ERASE_DONE;
1001        if (instr->callback)
1002                instr->callback(instr);
1003        
1004        return 0;
1005}
1006
1007static void cfi_staa_sync (struct mtd_info *mtd)
1008{
1009        struct map_info *map = mtd->priv;
1010        struct cfi_private *cfi = map->fldrv_priv;
1011        int i;
1012        struct flchip *chip;
1013        int ret = 0;
1014        DECLARE_WAITQUEUE(wait, current);
1015
1016        for (i=0; !ret && i<cfi->numchips; i++) {
1017                chip = &cfi->chips[i];
1018
1019        retry:
1020                spin_lock_bh(chip->mutex);
1021
1022                switch(chip->state) {
1023                case FL_READY:
1024                case FL_STATUS:
1025                case FL_CFI_QUERY:
1026                case FL_JEDEC_QUERY:
1027                        chip->oldstate = chip->state;
1028                        chip->state = FL_SYNCING;
1029                        /* No need to wake_up() on this state change - 
1030                         * as the whole point is that nobody can do anything
1031                         * with the chip now anyway.
1032                         */
1033                case FL_SYNCING:
1034                        spin_unlock_bh(chip->mutex);
1035                        break;
1036
1037                default:
1038                        /* Not an idle state */
1039                        add_wait_queue(&chip->wq, &wait);
1040                        
1041                        spin_unlock_bh(chip->mutex);
1042                        schedule();
1043                        remove_wait_queue(&chip->wq, &wait);
1044                        
1045                        goto retry;
1046                }
1047        }
1048
1049        /* Unlock the chips again */
1050
1051        for (i--; i >=0; i--) {
1052                chip = &cfi->chips[i];
1053
1054                spin_lock_bh(chip->mutex);
1055                
1056                if (chip->state == FL_SYNCING) {
1057                        chip->state = chip->oldstate;
1058                        wake_up(&chip->wq);
1059                }
1060                spin_unlock_bh(chip->mutex);
1061        }
1062}
1063
1064static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1065{
1066        struct cfi_private *cfi = map->fldrv_priv;
1067        __u32 status, status_OK;
1068        unsigned long timeo = jiffies + HZ;
1069        DECLARE_WAITQUEUE(wait, current);
1070
1071        adr += chip->start;
1072
1073        /* Let's determine this according to the interleave only once */
1074        status_OK = CMD(0x80);
1075
1076        timeo = jiffies + HZ;
1077retry:
1078        spin_lock_bh(chip->mutex);
1079
1080        /* Check that the chip's ready to talk to us. */
1081        switch (chip->state) {
1082        case FL_CFI_QUERY:
1083        case FL_JEDEC_QUERY:
1084        case FL_READY:
1085                cfi_write(map, CMD(0x70), adr);
1086                chip->state = FL_STATUS;
1087
1088        case FL_STATUS:
1089                status = cfi_read(map, adr);
1090                if ((status & status_OK) == status_OK) 
1091                        break;
1092                
1093                /* Urgh. Chip not yet ready to talk to us. */
1094                if (time_after(jiffies, timeo)) {
1095                        spin_unlock_bh(chip->mutex);
1096                        printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1097                        return -EIO;
1098                }
1099
1100                /* Latency issues. Drop the lock, wait a while and retry */
1101                spin_unlock_bh(chip->mutex);
1102                cfi_udelay(1);
1103                goto retry;
1104
1105        default:
1106                /* Stick ourselves on a wait queue to be woken when
1107                   someone changes the status */
1108                set_current_state(TASK_UNINTERRUPTIBLE);
1109                add_wait_queue(&chip->wq, &wait);
1110                spin_unlock_bh(chip->mutex);
1111                schedule();
1112                remove_wait_queue(&chip->wq, &wait);
1113                timeo = jiffies + HZ;
1114                goto retry;
1115        }
1116
1117        ENABLE_VPP(map);
1118        cfi_write(map, CMD(0x60), adr);
1119        cfi_write(map, CMD(0x01), adr);
1120        chip->state = FL_LOCKING;
1121        
1122        spin_unlock_bh(chip->mutex);
1123        schedule_timeout(HZ);
1124        spin_lock_bh(chip->mutex);
1125
1126        /* FIXME. Use a timer to check this, and return immediately. */
1127        /* Once the state machine's known to be working I'll do that */
1128
1129        timeo = jiffies + (HZ*2);
1130        for (;;) {
1131
1132                status = cfi_read(map, adr);
1133                if ((status & status_OK) == status_OK)
1134                        break;
1135                
1136                /* OK Still waiting */
1137                if (time_after(jiffies, timeo)) {
1138                        cfi_write(map, CMD(0x70), adr);
1139                        chip->state = FL_STATUS;
1140                        printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1141                        DISABLE_VPP(map);
1142                        spin_unlock_bh(chip->mutex);
1143                        return -EIO;
1144                }
1145                
1146                /* Latency issues. Drop the lock, wait a while and retry */
1147                spin_unlock_bh(chip->mutex);
1148                cfi_udelay(1);
1149                spin_lock_bh(chip->mutex);
1150        }
1151        
1152        /* Done and happy. */
1153        chip->state = FL_STATUS;
1154        DISABLE_VPP(map);
1155        wake_up(&chip->wq);
1156        spin_unlock_bh(chip->mutex);
1157        return 0;
1158}
1159static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1160{
1161        struct map_info *map = mtd->priv;
1162        struct cfi_private *cfi = map->fldrv_priv;
1163        unsigned long adr;
1164        int chipnum, ret = 0;
1165#ifdef DEBUG_LOCK_BITS
1166        int ofs_factor = cfi->interleave * cfi->device_type;
1167#endif
1168
1169        if (ofs & (mtd->erasesize - 1))
1170                return -EINVAL;
1171
1172        if (len & (mtd->erasesize -1))
1173                return -EINVAL;
1174
1175        if ((len + ofs) > mtd->size)
1176                return -EINVAL;
1177
1178        chipnum = ofs >> cfi->chipshift;
1179        adr = ofs - (chipnum << cfi->chipshift);
1180
1181        while(len) {
1182
1183#ifdef DEBUG_LOCK_BITS
1184                cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1185                printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1186                cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1187#endif
1188
1189                ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1190
1191#ifdef DEBUG_LOCK_BITS
1192                cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1193                printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1194                cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1195#endif  
1196                
1197                if (ret)
1198                        return ret;
1199
1200                adr += mtd->erasesize;
1201                len -= mtd->erasesize;
1202
1203                if (adr >> cfi->chipshift) {
1204                        adr = 0;
1205                        chipnum++;
1206                        
1207                        if (chipnum >= cfi->numchips)
1208                        break;
1209                }
1210        }
1211        return 0;
1212}
1213static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1214{
1215        struct cfi_private *cfi = map->fldrv_priv;
1216        __u32 status, status_OK;
1217        unsigned long timeo = jiffies + HZ;
1218        DECLARE_WAITQUEUE(wait, current);
1219
1220        adr += chip->start;
1221
1222        /* Let's determine this according to the interleave only once */
1223        status_OK = CMD(0x80);
1224
1225        timeo = jiffies + HZ;
1226retry:
1227        spin_lock_bh(chip->mutex);
1228
1229        /* Check that the chip's ready to talk to us. */
1230        switch (chip->state) {
1231        case FL_CFI_QUERY:
1232        case FL_JEDEC_QUERY:
1233        case FL_READY:
1234                cfi_write(map, CMD(0x70), adr);
1235                chip->state = FL_STATUS;
1236
1237        case FL_STATUS:
1238                status = cfi_read(map, adr);
1239                if ((status & status_OK) == status_OK)
1240                        break;
1241                
1242                /* Urgh. Chip not yet ready to talk to us. */
1243                if (time_after(jiffies, timeo)) {
1244                        spin_unlock_bh(chip->mutex);
1245                        printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1246                        return -EIO;
1247                }
1248
1249                /* Latency issues. Drop the lock, wait a while and retry */
1250                spin_unlock_bh(chip->mutex);
1251                cfi_udelay(1);
1252                goto retry;
1253
1254        default:
1255                /* Stick ourselves on a wait queue to be woken when
1256                   someone changes the status */
1257                set_current_state(TASK_UNINTERRUPTIBLE);
1258                add_wait_queue(&chip->wq, &wait);
1259                spin_unlock_bh(chip->mutex);
1260                schedule();
1261                remove_wait_queue(&chip->wq, &wait);
1262                timeo = jiffies + HZ;
1263                goto retry;
1264        }
1265
1266        ENABLE_VPP(map);
1267        cfi_write(map, CMD(0x60), adr);
1268        cfi_write(map, CMD(0xD0), adr);
1269        chip->state = FL_UNLOCKING;
1270        
1271        spin_unlock_bh(chip->mutex);
1272        schedule_timeout(HZ);
1273        spin_lock_bh(chip->mutex);
1274
1275        /* FIXME. Use a timer to check this, and return immediately. */
1276        /* Once the state machine's known to be working I'll do that */
1277
1278        timeo = jiffies + (HZ*2);
1279        for (;;) {
1280
1281                status = cfi_read(map, adr);
1282                if ((status & status_OK) == status_OK)
1283                        break;
1284                
1285                /* OK Still waiting */
1286                if (time_after(jiffies, timeo)) {
1287                        cfi_write(map, CMD(0x70), adr);
1288                        chip->state = FL_STATUS;
1289                        printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %x, status = %x.\n", status, cfi_read(map, adr));
1290                        DISABLE_VPP(map);
1291                        spin_unlock_bh(chip->mutex);
1292                        return -EIO;
1293                }
1294                
1295                /* Latency issues. Drop the unlock, wait a while and retry */
1296                spin_unlock_bh(chip->mutex);
1297                cfi_udelay(1);
1298                spin_lock_bh(chip->mutex);
1299        }
1300        
1301        /* Done and happy. */
1302        chip->state = FL_STATUS;
1303        DISABLE_VPP(map);
1304        wake_up(&chip->wq);
1305        spin_unlock_bh(chip->mutex);
1306        return 0;
1307}
1308static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1309{
1310        struct map_info *map = mtd->priv;
1311        struct cfi_private *cfi = map->fldrv_priv;
1312        unsigned long adr;
1313        int chipnum, ret = 0;
1314#ifdef DEBUG_LOCK_BITS
1315        int ofs_factor = cfi->interleave * cfi->device_type;
1316#endif
1317
1318        chipnum = ofs >> cfi->chipshift;
1319        adr = ofs - (chipnum << cfi->chipshift);
1320
1321#ifdef DEBUG_LOCK_BITS
1322        {
1323                unsigned long temp_adr = adr;
1324                unsigned long temp_len = len;
1325                 
1326                cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1327                while (temp_len) {
1328                        printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1329                        temp_adr += mtd->erasesize;
1330                        temp_len -= mtd->erasesize;
1331                }
1332                cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1333        }
1334#endif
1335
1336        ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1337
1338#ifdef DEBUG_LOCK_BITS
1339        cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1340        printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1341        cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1342#endif
1343        
1344        return ret;
1345}
1346
1347static int cfi_staa_suspend(struct mtd_info *mtd)
1348{
1349        struct map_info *map = mtd->priv;
1350        struct cfi_private *cfi = map->fldrv_priv;
1351        int i;
1352        struct flchip *chip;
1353        int ret = 0;
1354
1355        for (i=0; !ret && i<cfi->numchips; i++) {
1356                chip = &cfi->chips[i];
1357
1358                spin_lock_bh(chip->mutex);
1359
1360                switch(chip->state) {
1361                case FL_READY:
1362                case FL_STATUS:
1363                case FL_CFI_QUERY:
1364                case FL_JEDEC_QUERY:
1365                        chip->oldstate = chip->state;
1366                        chip->state = FL_PM_SUSPENDED;
1367                        /* No need to wake_up() on this state change - 
1368                         * as the whole point is that nobody can do anything
1369                         * with the chip now anyway.
1370                         */
1371                case FL_PM_SUSPENDED:
1372                        break;
1373
1374                default:
1375                        ret = -EAGAIN;
1376                        break;
1377                }
1378                spin_unlock_bh(chip->mutex);
1379        }
1380
1381        /* Unlock the chips again */
1382
1383        if (ret) {
1384                for (i--; i >=0; i--) {
1385                        chip = &cfi->chips[i];
1386                        
1387                        spin_lock_bh(chip->mutex);
1388                        
1389                        if (chip->state == FL_PM_SUSPENDED) {
1390                                /* No need to force it into a known state here,
1391                                   because we're returning failure, and it didn't
1392                                   get power cycled */
1393                                chip->state = chip->oldstate;
1394                                wake_up(&chip->wq);
1395                        }
1396                        spin_unlock_bh(chip->mutex);
1397                }
1398        } 
1399        
1400        return ret;
1401}
1402
1403static void cfi_staa_resume(struct mtd_info *mtd)
1404{
1405        struct map_info *map = mtd->priv;
1406        struct cfi_private *cfi = map->fldrv_priv;
1407        int i;
1408        struct flchip *chip;
1409
1410        for (i=0; i<cfi->numchips; i++) {
1411        
1412                chip = &cfi->chips[i];
1413
1414                spin_lock_bh(chip->mutex);
1415                
1416                /* Go to known state. Chip may have been power cycled */
1417                if (chip->state == FL_PM_SUSPENDED) {
1418                        cfi_write(map, CMD(0xFF), 0);
1419                        chip->state = FL_READY;
1420                        wake_up(&chip->wq);
1421                }
1422
1423                spin_unlock_bh(chip->mutex);
1424        }
1425}
1426
1427static void cfi_staa_destroy(struct mtd_info *mtd)
1428{
1429        struct map_info *map = mtd->priv;
1430        struct cfi_private *cfi = map->fldrv_priv;
1431        kfree(cfi->cmdset_priv);
1432        kfree(cfi);
1433}
1434
1435#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE)
1436#define cfi_staa_init init_module
1437#define cfi_staa_exit cleanup_module
1438#endif
1439
1440static char im_name[]="cfi_cmdset_0020";
1441
1442mod_init_t cfi_staa_init(void)
1443{
1444        inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1445        return 0;
1446}
1447
1448mod_exit_t cfi_staa_exit(void)
1449{
1450        inter_module_unregister(im_name);
1451}
1452
1453module_init(cfi_staa_init);
1454module_exit(cfi_staa_exit);
1455