1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/types.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <asm/io.h>
28#include <asm/byteorder.h>
29
30#include <linux/errno.h>
31#include <linux/slab.h>
32#include <linux/delay.h>
33#include <linux/interrupt.h>
34#include <linux/reboot.h>
35#include <linux/of.h>
36#include <linux/of_platform.h>
37#include <linux/mtd/map.h>
38#include <linux/mtd/mtd.h>
39#include <linux/mtd/cfi.h>
40#include <linux/mtd/xip.h>
41
42#define AMD_BOOTLOC_BUG
43#define FORCE_WORD_WRITE 0
44
45#define MAX_RETRIES 3
46
47#define SST49LF004B 0x0060
48#define SST49LF040B 0x0050
49#define SST49LF008A 0x005a
50#define AT49BV6416 0x00d6
51
52
53
54
55
56#define CFI_SR_DRB BIT(7)
57#define CFI_SR_ESB BIT(5)
58#define CFI_SR_PSB BIT(4)
59#define CFI_SR_WBASB BIT(3)
60#define CFI_SR_SLSB BIT(1)
61
62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64#if !FORCE_WORD_WRITE
65static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66#endif
67static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
68static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
69static void cfi_amdstd_sync (struct mtd_info *);
70static int cfi_amdstd_suspend (struct mtd_info *);
71static void cfi_amdstd_resume (struct mtd_info *);
72static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
73static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
74 size_t *, struct otp_info *);
75static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
76 size_t *, struct otp_info *);
77static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
78static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
79 size_t *, u_char *);
80static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
81 size_t *, u_char *);
82static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
83 size_t *, const u_char *);
84static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
85
86static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
87 size_t *retlen, const u_char *buf);
88
89static void cfi_amdstd_destroy(struct mtd_info *);
90
91struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
92static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
93
94static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
95static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
96#include "fwh_lock.h"
97
98static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
99static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
100
101static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
102static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
104
105static struct mtd_chip_driver cfi_amdstd_chipdrv = {
106 .probe = NULL,
107 .destroy = cfi_amdstd_destroy,
108 .name = "cfi_cmdset_0002",
109 .module = THIS_MODULE
110};
111
112
113
114
115
116
117static int cfi_use_status_reg(struct cfi_private *cfi)
118{
119 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
120 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
121
122 return extp && extp->MinorVersion >= '5' &&
123 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
124}
125
126static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
127 unsigned long adr)
128{
129 struct cfi_private *cfi = map->fldrv_priv;
130 map_word status;
131
132 if (!cfi_use_status_reg(cfi))
133 return 0;
134
135 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
136 cfi->device_type, NULL);
137 status = map_read(map, adr);
138
139
140 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
141 return 0;
142
143 if (map_word_bitsset(map, status, CMD(0x3a))) {
144 unsigned long chipstatus = MERGESTATUS(status);
145
146 if (chipstatus & CFI_SR_ESB)
147 pr_err("%s erase operation failed, status %lx\n",
148 map->name, chipstatus);
149 if (chipstatus & CFI_SR_PSB)
150 pr_err("%s program operation failed, status %lx\n",
151 map->name, chipstatus);
152 if (chipstatus & CFI_SR_WBASB)
153 pr_err("%s buffer program command aborted, status %lx\n",
154 map->name, chipstatus);
155 if (chipstatus & CFI_SR_SLSB)
156 pr_err("%s sector write protected, status %lx\n",
157 map->name, chipstatus);
158
159
160 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
161 return 1;
162 }
163 return 0;
164}
165
166
167
168
169#ifdef DEBUG_CFI_FEATURES
170static void cfi_tell_features(struct cfi_pri_amdstd *extp)
171{
172 const char* erase_suspend[3] = {
173 "Not supported", "Read only", "Read/write"
174 };
175 const char* top_bottom[6] = {
176 "No WP", "8x8KiB sectors at top & bottom, no WP",
177 "Bottom boot", "Top boot",
178 "Uniform, Bottom WP", "Uniform, Top WP"
179 };
180
181 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
182 printk(" Address sensitive unlock: %s\n",
183 (extp->SiliconRevision & 1) ? "Not required" : "Required");
184
185 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
186 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
187 else
188 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
189
190 if (extp->BlkProt == 0)
191 printk(" Block protection: Not supported\n");
192 else
193 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
194
195
196 printk(" Temporary block unprotect: %s\n",
197 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
198 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
199 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
200 printk(" Burst mode: %s\n",
201 extp->BurstMode ? "Supported" : "Not supported");
202 if (extp->PageMode == 0)
203 printk(" Page mode: Not supported\n");
204 else
205 printk(" Page mode: %d word page\n", extp->PageMode << 2);
206
207 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
208 extp->VppMin >> 4, extp->VppMin & 0xf);
209 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
210 extp->VppMax >> 4, extp->VppMax & 0xf);
211
212 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
213 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
214 else
215 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
216}
217#endif
218
219#ifdef AMD_BOOTLOC_BUG
220
221static void fixup_amd_bootblock(struct mtd_info *mtd)
222{
223 struct map_info *map = mtd->priv;
224 struct cfi_private *cfi = map->fldrv_priv;
225 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
226 __u8 major = extp->MajorVersion;
227 __u8 minor = extp->MinorVersion;
228
229 if (((major << 8) | minor) < 0x3131) {
230
231
232 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
233 map->name, cfi->mfr, cfi->id);
234
235
236
237
238
239
240 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
241
242
243
244
245
246
247
248
249
250
251 (cfi->mfr == CFI_MFR_MACRONIX)) {
252 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
253 " detected\n", map->name);
254 extp->TopBottom = 2;
255 } else
256 if (cfi->id & 0x80) {
257 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
258 extp->TopBottom = 3;
259 } else {
260 extp->TopBottom = 2;
261 }
262
263 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
264 " deduced %s from Device ID\n", map->name, major, minor,
265 extp->TopBottom == 2 ? "bottom" : "top");
266 }
267}
268#endif
269
270#if !FORCE_WORD_WRITE
271static void fixup_use_write_buffers(struct mtd_info *mtd)
272{
273 struct map_info *map = mtd->priv;
274 struct cfi_private *cfi = map->fldrv_priv;
275
276 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
277 return;
278
279 if (cfi->cfiq->BufWriteTimeoutTyp) {
280 pr_debug("Using buffer write method\n");
281 mtd->_write = cfi_amdstd_write_buffers;
282 }
283}
284#endif
285
286
287static void fixup_convert_atmel_pri(struct mtd_info *mtd)
288{
289 struct map_info *map = mtd->priv;
290 struct cfi_private *cfi = map->fldrv_priv;
291 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
292 struct cfi_pri_atmel atmel_pri;
293
294 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
295 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
296
297 if (atmel_pri.Features & 0x02)
298 extp->EraseSuspend = 2;
299
300
301 if (cfi->id == AT49BV6416) {
302 if (atmel_pri.BottomBoot)
303 extp->TopBottom = 3;
304 else
305 extp->TopBottom = 2;
306 } else {
307 if (atmel_pri.BottomBoot)
308 extp->TopBottom = 2;
309 else
310 extp->TopBottom = 3;
311 }
312
313
314 cfi->cfiq->BufWriteTimeoutTyp = 0;
315 cfi->cfiq->BufWriteTimeoutMax = 0;
316}
317
318static void fixup_use_secsi(struct mtd_info *mtd)
319{
320
321 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
322 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
323}
324
325static void fixup_use_erase_chip(struct mtd_info *mtd)
326{
327 struct map_info *map = mtd->priv;
328 struct cfi_private *cfi = map->fldrv_priv;
329 if ((cfi->cfiq->NumEraseRegions == 1) &&
330 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
331 mtd->_erase = cfi_amdstd_erase_chip;
332 }
333
334}
335
336
337
338
339
340static void fixup_use_atmel_lock(struct mtd_info *mtd)
341{
342 mtd->_lock = cfi_atmel_lock;
343 mtd->_unlock = cfi_atmel_unlock;
344 mtd->flags |= MTD_POWERUP_LOCK;
345}
346
347static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
348{
349 struct map_info *map = mtd->priv;
350 struct cfi_private *cfi = map->fldrv_priv;
351
352
353
354
355
356
357
358 cfi->cfiq->NumEraseRegions = 1;
359}
360
361static void fixup_sst39vf(struct mtd_info *mtd)
362{
363 struct map_info *map = mtd->priv;
364 struct cfi_private *cfi = map->fldrv_priv;
365
366 fixup_old_sst_eraseregion(mtd);
367
368 cfi->addr_unlock1 = 0x5555;
369 cfi->addr_unlock2 = 0x2AAA;
370}
371
372static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
373{
374 struct map_info *map = mtd->priv;
375 struct cfi_private *cfi = map->fldrv_priv;
376
377 fixup_old_sst_eraseregion(mtd);
378
379 cfi->addr_unlock1 = 0x555;
380 cfi->addr_unlock2 = 0x2AA;
381
382 cfi->sector_erase_cmd = CMD(0x50);
383}
384
385static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
386{
387 struct map_info *map = mtd->priv;
388 struct cfi_private *cfi = map->fldrv_priv;
389
390 fixup_sst39vf_rev_b(mtd);
391
392
393
394
395
396 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
397 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
398 mtd->name);
399}
400
401static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
402{
403 struct map_info *map = mtd->priv;
404 struct cfi_private *cfi = map->fldrv_priv;
405
406 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
407 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
408 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
409 mtd->name);
410 }
411}
412
413static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
414{
415 struct map_info *map = mtd->priv;
416 struct cfi_private *cfi = map->fldrv_priv;
417
418 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
419 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
420 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
421 mtd->name);
422 }
423}
424
425static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
426{
427 struct map_info *map = mtd->priv;
428 struct cfi_private *cfi = map->fldrv_priv;
429
430
431
432
433
434 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
435 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
436 mtd->name);
437}
438
439
440static struct cfi_fixup cfi_nopri_fixup_table[] = {
441 { CFI_MFR_SST, 0x234a, fixup_sst39vf },
442 { CFI_MFR_SST, 0x234b, fixup_sst39vf },
443 { CFI_MFR_SST, 0x235a, fixup_sst39vf },
444 { CFI_MFR_SST, 0x235b, fixup_sst39vf },
445 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b },
446 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b },
447 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b },
448 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b },
449 { 0, 0, NULL }
450};
451
452static struct cfi_fixup cfi_fixup_table[] = {
453 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
454#ifdef AMD_BOOTLOC_BUG
455 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
456 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
457 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
458#endif
459 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
460 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
461 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
462 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
463 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
464 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
465 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
466 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
467 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
468 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
469 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
470 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize },
471 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize },
472 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize },
473 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize },
474#if !FORCE_WORD_WRITE
475 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
476#endif
477 { 0, 0, NULL }
478};
479static struct cfi_fixup jedec_fixup_table[] = {
480 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
481 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
482 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
483 { 0, 0, NULL }
484};
485
486static struct cfi_fixup fixup_table[] = {
487
488
489
490
491
492 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
493 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
494 { 0, 0, NULL }
495};
496
497
498static void cfi_fixup_major_minor(struct cfi_private *cfi,
499 struct cfi_pri_amdstd *extp)
500{
501 if (cfi->mfr == CFI_MFR_SAMSUNG) {
502 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
503 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
504
505
506
507
508
509 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
510 " Extended Query version to 1.%c\n",
511 extp->MinorVersion);
512 extp->MajorVersion = '1';
513 }
514 }
515
516
517
518
519 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
520 extp->MajorVersion = '1';
521 extp->MinorVersion = '0';
522 }
523}
524
525static int is_m29ew(struct cfi_private *cfi)
526{
527 if (cfi->mfr == CFI_MFR_INTEL &&
528 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
529 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
530 return 1;
531 return 0;
532}
533
534
535
536
537
538
539
540
541
542
543
544static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
545 unsigned long adr)
546{
547 struct cfi_private *cfi = map->fldrv_priv;
548
549 if (is_m29ew(cfi))
550 map_write(map, CMD(0xF0), adr);
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
578{
579
580
581
582
583 if (is_m29ew(cfi))
584 cfi_udelay(500);
585}
586
587struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
588{
589 struct cfi_private *cfi = map->fldrv_priv;
590 struct device_node __maybe_unused *np = map->device_node;
591 struct mtd_info *mtd;
592 int i;
593
594 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
595 if (!mtd)
596 return NULL;
597 mtd->priv = map;
598 mtd->type = MTD_NORFLASH;
599
600
601 mtd->_erase = cfi_amdstd_erase_varsize;
602 mtd->_write = cfi_amdstd_write_words;
603 mtd->_read = cfi_amdstd_read;
604 mtd->_sync = cfi_amdstd_sync;
605 mtd->_suspend = cfi_amdstd_suspend;
606 mtd->_resume = cfi_amdstd_resume;
607 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
608 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
609 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
610 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
611 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
612 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
613 mtd->flags = MTD_CAP_NORFLASH;
614 mtd->name = map->name;
615 mtd->writesize = 1;
616 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
617
618 pr_debug("MTD %s(): write buffer size %d\n", __func__,
619 mtd->writebufsize);
620
621 mtd->_panic_write = cfi_amdstd_panic_write;
622 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
623
624 if (cfi->cfi_mode==CFI_MODE_CFI){
625 unsigned char bootloc;
626 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
627 struct cfi_pri_amdstd *extp;
628
629 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
630 if (extp) {
631
632
633
634
635 cfi_fixup_major_minor(cfi, extp);
636
637
638
639
640
641
642
643
644 if (extp->MajorVersion != '1' ||
645 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
646 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
647 "version %c.%c (%#02x/%#02x).\n",
648 extp->MajorVersion, extp->MinorVersion,
649 extp->MajorVersion, extp->MinorVersion);
650 kfree(extp);
651 kfree(mtd);
652 return NULL;
653 }
654
655 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
656 extp->MajorVersion, extp->MinorVersion);
657
658
659 cfi->cmdset_priv = extp;
660
661
662 cfi_fixup(mtd, cfi_fixup_table);
663
664#ifdef DEBUG_CFI_FEATURES
665
666 cfi_tell_features(extp);
667#endif
668
669#ifdef CONFIG_OF
670 if (np && of_property_read_bool(
671 np, "use-advanced-sector-protection")
672 && extp->BlkProtUnprot == 8) {
673 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
674 mtd->_lock = cfi_ppb_lock;
675 mtd->_unlock = cfi_ppb_unlock;
676 mtd->_is_locked = cfi_ppb_is_locked;
677 }
678#endif
679
680 bootloc = extp->TopBottom;
681 if ((bootloc < 2) || (bootloc > 5)) {
682 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
683 "bank location (%d). Assuming bottom.\n",
684 map->name, bootloc);
685 bootloc = 2;
686 }
687
688 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
689 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
690
691 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
692 int j = (cfi->cfiq->NumEraseRegions-1)-i;
693
694 swap(cfi->cfiq->EraseRegionInfo[i],
695 cfi->cfiq->EraseRegionInfo[j]);
696 }
697 }
698
699 cfi->addr_unlock1 = 0x555;
700 cfi->addr_unlock2 = 0x2aa;
701 }
702 cfi_fixup(mtd, cfi_nopri_fixup_table);
703
704 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
705 kfree(mtd);
706 return NULL;
707 }
708
709 }
710 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
711
712 cfi_fixup(mtd, jedec_fixup_table);
713 }
714
715 cfi_fixup(mtd, fixup_table);
716
717 for (i=0; i< cfi->numchips; i++) {
718 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
719 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
720 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
721
722
723
724
725
726
727 if (cfi->cfiq->BufWriteTimeoutTyp &&
728 cfi->cfiq->BufWriteTimeoutMax)
729 cfi->chips[i].buffer_write_time_max =
730 1 << (cfi->cfiq->BufWriteTimeoutTyp +
731 cfi->cfiq->BufWriteTimeoutMax);
732 else
733 cfi->chips[i].buffer_write_time_max = 0;
734
735 cfi->chips[i].buffer_write_time_max =
736 max(cfi->chips[i].buffer_write_time_max, 2000);
737
738 cfi->chips[i].ref_point_counter = 0;
739 init_waitqueue_head(&(cfi->chips[i].wq));
740 }
741
742 map->fldrv = &cfi_amdstd_chipdrv;
743
744 return cfi_amdstd_setup(mtd);
745}
746struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
747struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
748EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
749EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
750EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
751
752static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
753{
754 struct map_info *map = mtd->priv;
755 struct cfi_private *cfi = map->fldrv_priv;
756 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
757 unsigned long offset = 0;
758 int i,j;
759
760 printk(KERN_NOTICE "number of %s chips: %d\n",
761 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
762
763 mtd->size = devsize * cfi->numchips;
764
765 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
766 mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
767 sizeof(struct mtd_erase_region_info),
768 GFP_KERNEL);
769 if (!mtd->eraseregions)
770 goto setup_err;
771
772 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
773 unsigned long ernum, ersize;
774 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
775 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
776
777 if (mtd->erasesize < ersize) {
778 mtd->erasesize = ersize;
779 }
780 for (j=0; j<cfi->numchips; j++) {
781 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
782 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
783 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
784 }
785 offset += (ersize * ernum);
786 }
787 if (offset != devsize) {
788
789 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
790 goto setup_err;
791 }
792
793 __module_get(THIS_MODULE);
794 register_reboot_notifier(&mtd->reboot_notifier);
795 return mtd;
796
797 setup_err:
798 kfree(mtd->eraseregions);
799 kfree(mtd);
800 kfree(cfi->cmdset_priv);
801 return NULL;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
816 unsigned long addr)
817{
818 struct cfi_private *cfi = map->fldrv_priv;
819 map_word d, t;
820
821 if (cfi_use_status_reg(cfi)) {
822 map_word ready = CMD(CFI_SR_DRB);
823
824
825
826
827 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
828 cfi->device_type, NULL);
829 d = map_read(map, addr);
830
831 return map_word_andequal(map, d, ready, ready);
832 }
833
834 d = map_read(map, addr);
835 t = map_read(map, addr);
836
837 return map_word_equal(map, d, t);
838}
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855static int __xipram chip_good(struct map_info *map, struct flchip *chip,
856 unsigned long addr, map_word expected)
857{
858 struct cfi_private *cfi = map->fldrv_priv;
859 map_word oldd, curd;
860
861 if (cfi_use_status_reg(cfi)) {
862 map_word ready = CMD(CFI_SR_DRB);
863
864
865
866
867
868 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
869 cfi->device_type, NULL);
870 curd = map_read(map, addr);
871
872 return map_word_andequal(map, curd, ready, ready);
873 }
874
875 oldd = map_read(map, addr);
876 curd = map_read(map, addr);
877
878 return map_word_equal(map, oldd, curd) &&
879 map_word_equal(map, curd, expected);
880}
881
882static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
883{
884 DECLARE_WAITQUEUE(wait, current);
885 struct cfi_private *cfi = map->fldrv_priv;
886 unsigned long timeo;
887 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
888
889 resettime:
890 timeo = jiffies + HZ;
891 retry:
892 switch (chip->state) {
893
894 case FL_STATUS:
895 for (;;) {
896 if (chip_ready(map, chip, adr))
897 break;
898
899 if (time_after(jiffies, timeo)) {
900 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
901 return -EIO;
902 }
903 mutex_unlock(&chip->mutex);
904 cfi_udelay(1);
905 mutex_lock(&chip->mutex);
906
907 goto retry;
908 }
909 return 0;
910
911 case FL_READY:
912 case FL_CFI_QUERY:
913 case FL_JEDEC_QUERY:
914 return 0;
915
916 case FL_ERASING:
917 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
918 !(mode == FL_READY || mode == FL_POINT ||
919 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
920 goto sleep;
921
922
923 if ((adr & chip->in_progress_block_mask) ==
924 chip->in_progress_block_addr)
925 goto sleep;
926
927
928
929
930 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
931 chip->oldstate = FL_ERASING;
932 chip->state = FL_ERASE_SUSPENDING;
933 chip->erase_suspended = 1;
934 for (;;) {
935 if (chip_ready(map, chip, adr))
936 break;
937
938 if (time_after(jiffies, timeo)) {
939
940
941
942
943
944 put_chip(map, chip, adr);
945 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
946 return -EIO;
947 }
948
949 mutex_unlock(&chip->mutex);
950 cfi_udelay(1);
951 mutex_lock(&chip->mutex);
952
953
954 }
955 chip->state = FL_READY;
956 return 0;
957
958 case FL_XIP_WHILE_ERASING:
959 if (mode != FL_READY && mode != FL_POINT &&
960 (!cfip || !(cfip->EraseSuspend&2)))
961 goto sleep;
962 chip->oldstate = chip->state;
963 chip->state = FL_READY;
964 return 0;
965
966 case FL_SHUTDOWN:
967
968 return -EIO;
969
970 case FL_POINT:
971
972 if (mode == FL_READY && chip->oldstate == FL_READY)
973 return 0;
974 fallthrough;
975 default:
976 sleep:
977 set_current_state(TASK_UNINTERRUPTIBLE);
978 add_wait_queue(&chip->wq, &wait);
979 mutex_unlock(&chip->mutex);
980 schedule();
981 remove_wait_queue(&chip->wq, &wait);
982 mutex_lock(&chip->mutex);
983 goto resettime;
984 }
985}
986
987
988static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
989{
990 struct cfi_private *cfi = map->fldrv_priv;
991
992 switch(chip->oldstate) {
993 case FL_ERASING:
994 cfi_fixup_m29ew_erase_suspend(map,
995 chip->in_progress_block_addr);
996 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
997 cfi_fixup_m29ew_delay_after_resume(cfi);
998 chip->oldstate = FL_READY;
999 chip->state = FL_ERASING;
1000 break;
1001
1002 case FL_XIP_WHILE_ERASING:
1003 chip->state = chip->oldstate;
1004 chip->oldstate = FL_READY;
1005 break;
1006
1007 case FL_READY:
1008 case FL_STATUS:
1009 break;
1010 default:
1011 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1012 }
1013 wake_up(&chip->wq);
1014}
1015
1016#ifdef CONFIG_MTD_XIP
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static void xip_disable(struct map_info *map, struct flchip *chip,
1030 unsigned long adr)
1031{
1032
1033 (void) map_read(map, adr);
1034 local_irq_disable();
1035}
1036
1037static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1038 unsigned long adr)
1039{
1040 struct cfi_private *cfi = map->fldrv_priv;
1041
1042 if (chip->state != FL_POINT && chip->state != FL_READY) {
1043 map_write(map, CMD(0xf0), adr);
1044 chip->state = FL_READY;
1045 }
1046 (void) map_read(map, adr);
1047 xip_iprefetch();
1048 local_irq_enable();
1049}
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1065 unsigned long adr, int usec)
1066{
1067 struct cfi_private *cfi = map->fldrv_priv;
1068 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1069 map_word status, OK = CMD(0x80);
1070 unsigned long suspended, start = xip_currtime();
1071 flstate_t oldstate;
1072
1073 do {
1074 cpu_relax();
1075 if (xip_irqpending() && extp &&
1076 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1077 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 map_write(map, CMD(0xb0), adr);
1089 usec -= xip_elapsed_since(start);
1090 suspended = xip_currtime();
1091 do {
1092 if (xip_elapsed_since(suspended) > 100000) {
1093
1094
1095
1096
1097
1098
1099 return;
1100 }
1101 status = map_read(map, adr);
1102 } while (!map_word_andequal(map, status, OK, OK));
1103
1104
1105 oldstate = chip->state;
1106 if (!map_word_bitsset(map, status, CMD(0x40)))
1107 break;
1108 chip->state = FL_XIP_WHILE_ERASING;
1109 chip->erase_suspended = 1;
1110 map_write(map, CMD(0xf0), adr);
1111 (void) map_read(map, adr);
1112 xip_iprefetch();
1113 local_irq_enable();
1114 mutex_unlock(&chip->mutex);
1115 xip_iprefetch();
1116 cond_resched();
1117
1118
1119
1120
1121
1122
1123
1124 mutex_lock(&chip->mutex);
1125 while (chip->state != FL_XIP_WHILE_ERASING) {
1126 DECLARE_WAITQUEUE(wait, current);
1127 set_current_state(TASK_UNINTERRUPTIBLE);
1128 add_wait_queue(&chip->wq, &wait);
1129 mutex_unlock(&chip->mutex);
1130 schedule();
1131 remove_wait_queue(&chip->wq, &wait);
1132 mutex_lock(&chip->mutex);
1133 }
1134
1135 local_irq_disable();
1136
1137
1138 cfi_fixup_m29ew_erase_suspend(map, adr);
1139
1140 map_write(map, cfi->sector_erase_cmd, adr);
1141 chip->state = oldstate;
1142 start = xip_currtime();
1143 } else if (usec >= 1000000/HZ) {
1144
1145
1146
1147
1148
1149 xip_cpu_idle();
1150 }
1151 status = map_read(map, adr);
1152 } while (!map_word_andequal(map, status, OK, OK)
1153 && xip_elapsed_since(start) < usec);
1154}
1155
1156#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1157
1158
1159
1160
1161
1162
1163
1164
1165#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1166 INVALIDATE_CACHED_RANGE(map, from, size)
1167
1168#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1169 UDELAY(map, chip, adr, usec)
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188#else
1189
1190#define xip_disable(map, chip, adr)
1191#define xip_enable(map, chip, adr)
1192#define XIP_INVAL_CACHED_RANGE(x...)
1193
1194#define UDELAY(map, chip, adr, usec) \
1195do { \
1196 mutex_unlock(&chip->mutex); \
1197 cfi_udelay(usec); \
1198 mutex_lock(&chip->mutex); \
1199} while (0)
1200
1201#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1202do { \
1203 mutex_unlock(&chip->mutex); \
1204 INVALIDATE_CACHED_RANGE(map, adr, len); \
1205 cfi_udelay(usec); \
1206 mutex_lock(&chip->mutex); \
1207} while (0)
1208
1209#endif
1210
1211static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1212{
1213 unsigned long cmd_addr;
1214 struct cfi_private *cfi = map->fldrv_priv;
1215 int ret;
1216
1217 adr += chip->start;
1218
1219
1220 cmd_addr = adr & ~(map_bankwidth(map)-1);
1221
1222 mutex_lock(&chip->mutex);
1223 ret = get_chip(map, chip, cmd_addr, FL_READY);
1224 if (ret) {
1225 mutex_unlock(&chip->mutex);
1226 return ret;
1227 }
1228
1229 if (chip->state != FL_POINT && chip->state != FL_READY) {
1230 map_write(map, CMD(0xf0), cmd_addr);
1231 chip->state = FL_READY;
1232 }
1233
1234 map_copy_from(map, buf, adr, len);
1235
1236 put_chip(map, chip, cmd_addr);
1237
1238 mutex_unlock(&chip->mutex);
1239 return 0;
1240}
1241
1242
1243static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1244{
1245 struct map_info *map = mtd->priv;
1246 struct cfi_private *cfi = map->fldrv_priv;
1247 unsigned long ofs;
1248 int chipnum;
1249 int ret = 0;
1250
1251
1252 chipnum = (from >> cfi->chipshift);
1253 ofs = from - (chipnum << cfi->chipshift);
1254
1255 while (len) {
1256 unsigned long thislen;
1257
1258 if (chipnum >= cfi->numchips)
1259 break;
1260
1261 if ((len + ofs -1) >> cfi->chipshift)
1262 thislen = (1<<cfi->chipshift) - ofs;
1263 else
1264 thislen = len;
1265
1266 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1267 if (ret)
1268 break;
1269
1270 *retlen += thislen;
1271 len -= thislen;
1272 buf += thislen;
1273
1274 ofs = 0;
1275 chipnum++;
1276 }
1277 return ret;
1278}
1279
1280typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1281 loff_t adr, size_t len, u_char *buf, size_t grouplen);
1282
1283static inline void otp_enter(struct map_info *map, struct flchip *chip,
1284 loff_t adr, size_t len)
1285{
1286 struct cfi_private *cfi = map->fldrv_priv;
1287
1288 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1289 cfi->device_type, NULL);
1290 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1291 cfi->device_type, NULL);
1292 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1293 cfi->device_type, NULL);
1294
1295 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1296}
1297
1298static inline void otp_exit(struct map_info *map, struct flchip *chip,
1299 loff_t adr, size_t len)
1300{
1301 struct cfi_private *cfi = map->fldrv_priv;
1302
1303 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1304 cfi->device_type, NULL);
1305 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1306 cfi->device_type, NULL);
1307 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1308 cfi->device_type, NULL);
1309 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1310 cfi->device_type, NULL);
1311
1312 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1313}
1314
1315static inline int do_read_secsi_onechip(struct map_info *map,
1316 struct flchip *chip, loff_t adr,
1317 size_t len, u_char *buf,
1318 size_t grouplen)
1319{
1320 DECLARE_WAITQUEUE(wait, current);
1321
1322 retry:
1323 mutex_lock(&chip->mutex);
1324
1325 if (chip->state != FL_READY){
1326 set_current_state(TASK_UNINTERRUPTIBLE);
1327 add_wait_queue(&chip->wq, &wait);
1328
1329 mutex_unlock(&chip->mutex);
1330
1331 schedule();
1332 remove_wait_queue(&chip->wq, &wait);
1333
1334 goto retry;
1335 }
1336
1337 adr += chip->start;
1338
1339 chip->state = FL_READY;
1340
1341 otp_enter(map, chip, adr, len);
1342 map_copy_from(map, buf, adr, len);
1343 otp_exit(map, chip, adr, len);
1344
1345 wake_up(&chip->wq);
1346 mutex_unlock(&chip->mutex);
1347
1348 return 0;
1349}
1350
1351static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1352{
1353 struct map_info *map = mtd->priv;
1354 struct cfi_private *cfi = map->fldrv_priv;
1355 unsigned long ofs;
1356 int chipnum;
1357 int ret = 0;
1358
1359
1360
1361 chipnum=from>>3;
1362 ofs=from & 7;
1363
1364 while (len) {
1365 unsigned long thislen;
1366
1367 if (chipnum >= cfi->numchips)
1368 break;
1369
1370 if ((len + ofs -1) >> 3)
1371 thislen = (1<<3) - ofs;
1372 else
1373 thislen = len;
1374
1375 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1376 thislen, buf, 0);
1377 if (ret)
1378 break;
1379
1380 *retlen += thislen;
1381 len -= thislen;
1382 buf += thislen;
1383
1384 ofs = 0;
1385 chipnum++;
1386 }
1387 return ret;
1388}
1389
1390static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1391 unsigned long adr, map_word datum,
1392 int mode);
1393
1394static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1395 size_t len, u_char *buf, size_t grouplen)
1396{
1397 int ret;
1398 while (len) {
1399 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1400 int gap = adr - bus_ofs;
1401 int n = min_t(int, len, map_bankwidth(map) - gap);
1402 map_word datum = map_word_ff(map);
1403
1404 if (n != map_bankwidth(map)) {
1405
1406 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1407 datum = map_read(map, bus_ofs);
1408 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1409 }
1410
1411 datum = map_word_load_partial(map, datum, buf, gap, n);
1412 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1413 if (ret)
1414 return ret;
1415
1416 adr += n;
1417 buf += n;
1418 len -= n;
1419 }
1420
1421 return 0;
1422}
1423
1424static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1425 size_t len, u_char *buf, size_t grouplen)
1426{
1427 struct cfi_private *cfi = map->fldrv_priv;
1428 uint8_t lockreg;
1429 unsigned long timeo;
1430 int ret;
1431
1432
1433 if ((adr != 0) || (len != grouplen))
1434 return -EINVAL;
1435
1436 mutex_lock(&chip->mutex);
1437 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1438 if (ret) {
1439 mutex_unlock(&chip->mutex);
1440 return ret;
1441 }
1442 chip->state = FL_LOCKING;
1443
1444
1445 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1446 cfi->device_type, NULL);
1447 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1448 cfi->device_type, NULL);
1449 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1450 cfi->device_type, NULL);
1451
1452
1453 lockreg = cfi_read_query(map, 0);
1454
1455
1456 lockreg &= ~0x01;
1457
1458
1459
1460 map_write(map, CMD(0xA0), chip->start);
1461 map_write(map, CMD(lockreg), chip->start);
1462
1463
1464 timeo = jiffies + msecs_to_jiffies(2);
1465 for (;;) {
1466 if (chip_ready(map, chip, adr))
1467 break;
1468
1469 if (time_after(jiffies, timeo)) {
1470 pr_err("Waiting for chip to be ready timed out.\n");
1471 ret = -EIO;
1472 break;
1473 }
1474 UDELAY(map, chip, 0, 1);
1475 }
1476
1477
1478 map_write(map, CMD(0x90), chip->start);
1479 map_write(map, CMD(0x00), chip->start);
1480
1481 chip->state = FL_READY;
1482 put_chip(map, chip, chip->start);
1483 mutex_unlock(&chip->mutex);
1484
1485 return ret;
1486}
1487
1488static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1489 size_t *retlen, u_char *buf,
1490 otp_op_t action, int user_regs)
1491{
1492 struct map_info *map = mtd->priv;
1493 struct cfi_private *cfi = map->fldrv_priv;
1494 int ofs_factor = cfi->interleave * cfi->device_type;
1495 unsigned long base;
1496 int chipnum;
1497 struct flchip *chip;
1498 uint8_t otp, lockreg;
1499 int ret;
1500
1501 size_t user_size, factory_size, otpsize;
1502 loff_t user_offset, factory_offset, otpoffset;
1503 int user_locked = 0, otplocked;
1504
1505 *retlen = 0;
1506
1507 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1508 chip = &cfi->chips[chipnum];
1509 factory_size = 0;
1510 user_size = 0;
1511
1512
1513 if (is_m29ew(cfi)) {
1514 base = chip->start;
1515
1516
1517
1518 mutex_lock(&chip->mutex);
1519 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1520 if (ret) {
1521 mutex_unlock(&chip->mutex);
1522 return ret;
1523 }
1524 cfi_qry_mode_on(base, map, cfi);
1525 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1526 cfi_qry_mode_off(base, map, cfi);
1527 put_chip(map, chip, base);
1528 mutex_unlock(&chip->mutex);
1529
1530 if (otp & 0x80) {
1531
1532 factory_offset = 0;
1533 factory_size = 0x100;
1534 } else {
1535
1536 user_offset = 0;
1537 user_size = 0x100;
1538
1539 mutex_lock(&chip->mutex);
1540 ret = get_chip(map, chip, base, FL_LOCKING);
1541 if (ret) {
1542 mutex_unlock(&chip->mutex);
1543 return ret;
1544 }
1545
1546
1547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1548 chip->start, map, cfi,
1549 cfi->device_type, NULL);
1550 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1551 chip->start, map, cfi,
1552 cfi->device_type, NULL);
1553 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1554 chip->start, map, cfi,
1555 cfi->device_type, NULL);
1556
1557 lockreg = cfi_read_query(map, 0);
1558
1559 map_write(map, CMD(0x90), chip->start);
1560 map_write(map, CMD(0x00), chip->start);
1561 put_chip(map, chip, chip->start);
1562 mutex_unlock(&chip->mutex);
1563
1564 user_locked = ((lockreg & 0x01) == 0x00);
1565 }
1566 }
1567
1568 otpsize = user_regs ? user_size : factory_size;
1569 if (!otpsize)
1570 continue;
1571 otpoffset = user_regs ? user_offset : factory_offset;
1572 otplocked = user_regs ? user_locked : 1;
1573
1574 if (!action) {
1575
1576 struct otp_info *otpinfo;
1577 len -= sizeof(*otpinfo);
1578 if (len <= 0)
1579 return -ENOSPC;
1580 otpinfo = (struct otp_info *)buf;
1581 otpinfo->start = from;
1582 otpinfo->length = otpsize;
1583 otpinfo->locked = otplocked;
1584 buf += sizeof(*otpinfo);
1585 *retlen += sizeof(*otpinfo);
1586 from += otpsize;
1587 } else if ((from < otpsize) && (len > 0)) {
1588 size_t size;
1589 size = (len < otpsize - from) ? len : otpsize - from;
1590 ret = action(map, chip, otpoffset + from, size, buf,
1591 otpsize);
1592 if (ret < 0)
1593 return ret;
1594
1595 buf += size;
1596 len -= size;
1597 *retlen += size;
1598 from = 0;
1599 } else {
1600 from -= otpsize;
1601 }
1602 }
1603 return 0;
1604}
1605
1606static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1607 size_t *retlen, struct otp_info *buf)
1608{
1609 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1610 NULL, 0);
1611}
1612
1613static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1614 size_t *retlen, struct otp_info *buf)
1615{
1616 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1617 NULL, 1);
1618}
1619
1620static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1621 size_t len, size_t *retlen,
1622 u_char *buf)
1623{
1624 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1625 buf, do_read_secsi_onechip, 0);
1626}
1627
1628static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1629 size_t len, size_t *retlen,
1630 u_char *buf)
1631{
1632 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1633 buf, do_read_secsi_onechip, 1);
1634}
1635
1636static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1637 size_t len, size_t *retlen,
1638 const u_char *buf)
1639{
1640 return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1641 do_otp_write, 1);
1642}
1643
1644static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1645 size_t len)
1646{
1647 size_t retlen;
1648 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1649 do_otp_lock, 1);
1650}
1651
1652static int __xipram do_write_oneword_once(struct map_info *map,
1653 struct flchip *chip,
1654 unsigned long adr, map_word datum,
1655 int mode, struct cfi_private *cfi)
1656{
1657 unsigned long timeo;
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667 unsigned long uWriteTimeout = (HZ / 1000) + 1;
1668 int ret = 0;
1669
1670 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1671 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1672 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1673 map_write(map, datum, adr);
1674 chip->state = mode;
1675
1676 INVALIDATE_CACHE_UDELAY(map, chip,
1677 adr, map_bankwidth(map),
1678 chip->word_write_time);
1679
1680
1681 timeo = jiffies + uWriteTimeout;
1682 for (;;) {
1683 if (chip->state != mode) {
1684
1685 DECLARE_WAITQUEUE(wait, current);
1686
1687 set_current_state(TASK_UNINTERRUPTIBLE);
1688 add_wait_queue(&chip->wq, &wait);
1689 mutex_unlock(&chip->mutex);
1690 schedule();
1691 remove_wait_queue(&chip->wq, &wait);
1692 timeo = jiffies + (HZ / 2);
1693 mutex_lock(&chip->mutex);
1694 continue;
1695 }
1696
1697
1698
1699
1700
1701 if (time_after(jiffies, timeo) &&
1702 !chip_good(map, chip, adr, datum)) {
1703 xip_enable(map, chip, adr);
1704 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1705 xip_disable(map, chip, adr);
1706 ret = -EIO;
1707 break;
1708 }
1709
1710 if (chip_good(map, chip, adr, datum)) {
1711 if (cfi_check_err_status(map, chip, adr))
1712 ret = -EIO;
1713 break;
1714 }
1715
1716
1717 UDELAY(map, chip, adr, 1);
1718 }
1719
1720 return ret;
1721}
1722
1723static int __xipram do_write_oneword_start(struct map_info *map,
1724 struct flchip *chip,
1725 unsigned long adr, int mode)
1726{
1727 int ret;
1728
1729 mutex_lock(&chip->mutex);
1730
1731 ret = get_chip(map, chip, adr, mode);
1732 if (ret) {
1733 mutex_unlock(&chip->mutex);
1734 return ret;
1735 }
1736
1737 if (mode == FL_OTP_WRITE)
1738 otp_enter(map, chip, adr, map_bankwidth(map));
1739
1740 return ret;
1741}
1742
1743static void __xipram do_write_oneword_done(struct map_info *map,
1744 struct flchip *chip,
1745 unsigned long adr, int mode)
1746{
1747 if (mode == FL_OTP_WRITE)
1748 otp_exit(map, chip, adr, map_bankwidth(map));
1749
1750 chip->state = FL_READY;
1751 DISABLE_VPP(map);
1752 put_chip(map, chip, adr);
1753
1754 mutex_unlock(&chip->mutex);
1755}
1756
1757static int __xipram do_write_oneword_retry(struct map_info *map,
1758 struct flchip *chip,
1759 unsigned long adr, map_word datum,
1760 int mode)
1761{
1762 struct cfi_private *cfi = map->fldrv_priv;
1763 int ret = 0;
1764 map_word oldd;
1765 int retry_cnt = 0;
1766
1767
1768
1769
1770
1771
1772
1773 oldd = map_read(map, adr);
1774 if (map_word_equal(map, oldd, datum)) {
1775 pr_debug("MTD %s(): NOP\n", __func__);
1776 return ret;
1777 }
1778
1779 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1780 ENABLE_VPP(map);
1781 xip_disable(map, chip, adr);
1782
1783 retry:
1784 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1785 if (ret) {
1786
1787 map_write(map, CMD(0xF0), chip->start);
1788
1789
1790 if (++retry_cnt <= MAX_RETRIES) {
1791 ret = 0;
1792 goto retry;
1793 }
1794 }
1795 xip_enable(map, chip, adr);
1796
1797 return ret;
1798}
1799
1800static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1801 unsigned long adr, map_word datum,
1802 int mode)
1803{
1804 int ret;
1805
1806 adr += chip->start;
1807
1808 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1809 datum.x[0]);
1810
1811 ret = do_write_oneword_start(map, chip, adr, mode);
1812 if (ret)
1813 return ret;
1814
1815 ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1816
1817 do_write_oneword_done(map, chip, adr, mode);
1818
1819 return ret;
1820}
1821
1822
1823static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1824 size_t *retlen, const u_char *buf)
1825{
1826 struct map_info *map = mtd->priv;
1827 struct cfi_private *cfi = map->fldrv_priv;
1828 int ret;
1829 int chipnum;
1830 unsigned long ofs, chipstart;
1831 DECLARE_WAITQUEUE(wait, current);
1832
1833 chipnum = to >> cfi->chipshift;
1834 ofs = to - (chipnum << cfi->chipshift);
1835 chipstart = cfi->chips[chipnum].start;
1836
1837
1838 if (ofs & (map_bankwidth(map)-1)) {
1839 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1840 int i = ofs - bus_ofs;
1841 int n = 0;
1842 map_word tmp_buf;
1843
1844 retry:
1845 mutex_lock(&cfi->chips[chipnum].mutex);
1846
1847 if (cfi->chips[chipnum].state != FL_READY) {
1848 set_current_state(TASK_UNINTERRUPTIBLE);
1849 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1850
1851 mutex_unlock(&cfi->chips[chipnum].mutex);
1852
1853 schedule();
1854 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1855 goto retry;
1856 }
1857
1858
1859 tmp_buf = map_read(map, bus_ofs+chipstart);
1860
1861 mutex_unlock(&cfi->chips[chipnum].mutex);
1862
1863
1864 n = min_t(int, len, map_bankwidth(map)-i);
1865
1866 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1867
1868 ret = do_write_oneword(map, &cfi->chips[chipnum],
1869 bus_ofs, tmp_buf, FL_WRITING);
1870 if (ret)
1871 return ret;
1872
1873 ofs += n;
1874 buf += n;
1875 (*retlen) += n;
1876 len -= n;
1877
1878 if (ofs >> cfi->chipshift) {
1879 chipnum ++;
1880 ofs = 0;
1881 if (chipnum == cfi->numchips)
1882 return 0;
1883 }
1884 }
1885
1886
1887 while(len >= map_bankwidth(map)) {
1888 map_word datum;
1889
1890 datum = map_word_load(map, buf);
1891
1892 ret = do_write_oneword(map, &cfi->chips[chipnum],
1893 ofs, datum, FL_WRITING);
1894 if (ret)
1895 return ret;
1896
1897 ofs += map_bankwidth(map);
1898 buf += map_bankwidth(map);
1899 (*retlen) += map_bankwidth(map);
1900 len -= map_bankwidth(map);
1901
1902 if (ofs >> cfi->chipshift) {
1903 chipnum ++;
1904 ofs = 0;
1905 if (chipnum == cfi->numchips)
1906 return 0;
1907 chipstart = cfi->chips[chipnum].start;
1908 }
1909 }
1910
1911
1912 if (len & (map_bankwidth(map)-1)) {
1913 map_word tmp_buf;
1914
1915 retry1:
1916 mutex_lock(&cfi->chips[chipnum].mutex);
1917
1918 if (cfi->chips[chipnum].state != FL_READY) {
1919 set_current_state(TASK_UNINTERRUPTIBLE);
1920 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1921
1922 mutex_unlock(&cfi->chips[chipnum].mutex);
1923
1924 schedule();
1925 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1926 goto retry1;
1927 }
1928
1929 tmp_buf = map_read(map, ofs + chipstart);
1930
1931 mutex_unlock(&cfi->chips[chipnum].mutex);
1932
1933 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1934
1935 ret = do_write_oneword(map, &cfi->chips[chipnum],
1936 ofs, tmp_buf, FL_WRITING);
1937 if (ret)
1938 return ret;
1939
1940 (*retlen) += len;
1941 }
1942
1943 return 0;
1944}
1945
1946#if !FORCE_WORD_WRITE
1947static int __xipram do_write_buffer_wait(struct map_info *map,
1948 struct flchip *chip, unsigned long adr,
1949 map_word datum)
1950{
1951 unsigned long timeo;
1952 unsigned long u_write_timeout;
1953 int ret = 0;
1954
1955
1956
1957
1958
1959 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1960 timeo = jiffies + u_write_timeout;
1961
1962 for (;;) {
1963 if (chip->state != FL_WRITING) {
1964
1965 DECLARE_WAITQUEUE(wait, current);
1966
1967 set_current_state(TASK_UNINTERRUPTIBLE);
1968 add_wait_queue(&chip->wq, &wait);
1969 mutex_unlock(&chip->mutex);
1970 schedule();
1971 remove_wait_queue(&chip->wq, &wait);
1972 timeo = jiffies + (HZ / 2);
1973 mutex_lock(&chip->mutex);
1974 continue;
1975 }
1976
1977
1978
1979
1980
1981 if (time_after(jiffies, timeo) &&
1982 !chip_good(map, chip, adr, datum)) {
1983 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1984 __func__, adr);
1985 ret = -EIO;
1986 break;
1987 }
1988
1989 if (chip_good(map, chip, adr, datum)) {
1990 if (cfi_check_err_status(map, chip, adr))
1991 ret = -EIO;
1992 break;
1993 }
1994
1995
1996 UDELAY(map, chip, adr, 1);
1997 }
1998
1999 return ret;
2000}
2001
2002static void __xipram do_write_buffer_reset(struct map_info *map,
2003 struct flchip *chip,
2004 struct cfi_private *cfi)
2005{
2006
2007
2008
2009
2010
2011
2012
2013
2014 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2015 cfi->device_type, NULL);
2016 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2017 cfi->device_type, NULL);
2018 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2019 cfi->device_type, NULL);
2020
2021
2022}
2023
2024
2025
2026
2027static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2028 unsigned long adr, const u_char *buf,
2029 int len)
2030{
2031 struct cfi_private *cfi = map->fldrv_priv;
2032 int ret;
2033 unsigned long cmd_adr;
2034 int z, words;
2035 map_word datum;
2036
2037 adr += chip->start;
2038 cmd_adr = adr;
2039
2040 mutex_lock(&chip->mutex);
2041 ret = get_chip(map, chip, adr, FL_WRITING);
2042 if (ret) {
2043 mutex_unlock(&chip->mutex);
2044 return ret;
2045 }
2046
2047 datum = map_word_load(map, buf);
2048
2049 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2050 __func__, adr, datum.x[0]);
2051
2052 XIP_INVAL_CACHED_RANGE(map, adr, len);
2053 ENABLE_VPP(map);
2054 xip_disable(map, chip, cmd_adr);
2055
2056 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2057 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2058
2059
2060 map_write(map, CMD(0x25), cmd_adr);
2061
2062 chip->state = FL_WRITING_TO_BUFFER;
2063
2064
2065 words = len / map_bankwidth(map);
2066 map_write(map, CMD(words - 1), cmd_adr);
2067
2068 z = 0;
2069 while(z < words * map_bankwidth(map)) {
2070 datum = map_word_load(map, buf);
2071 map_write(map, datum, adr + z);
2072
2073 z += map_bankwidth(map);
2074 buf += map_bankwidth(map);
2075 }
2076 z -= map_bankwidth(map);
2077
2078 adr += z;
2079
2080
2081 map_write(map, CMD(0x29), cmd_adr);
2082 chip->state = FL_WRITING;
2083
2084 INVALIDATE_CACHE_UDELAY(map, chip,
2085 adr, map_bankwidth(map),
2086 chip->word_write_time);
2087
2088 ret = do_write_buffer_wait(map, chip, adr, datum);
2089 if (ret)
2090 do_write_buffer_reset(map, chip, cfi);
2091
2092 xip_enable(map, chip, adr);
2093
2094 chip->state = FL_READY;
2095 DISABLE_VPP(map);
2096 put_chip(map, chip, adr);
2097 mutex_unlock(&chip->mutex);
2098
2099 return ret;
2100}
2101
2102
2103static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2104 size_t *retlen, const u_char *buf)
2105{
2106 struct map_info *map = mtd->priv;
2107 struct cfi_private *cfi = map->fldrv_priv;
2108 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2109 int ret;
2110 int chipnum;
2111 unsigned long ofs;
2112
2113 chipnum = to >> cfi->chipshift;
2114 ofs = to - (chipnum << cfi->chipshift);
2115
2116
2117 if (ofs & (map_bankwidth(map)-1)) {
2118 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2119 if (local_len > len)
2120 local_len = len;
2121 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2122 local_len, retlen, buf);
2123 if (ret)
2124 return ret;
2125 ofs += local_len;
2126 buf += local_len;
2127 len -= local_len;
2128
2129 if (ofs >> cfi->chipshift) {
2130 chipnum ++;
2131 ofs = 0;
2132 if (chipnum == cfi->numchips)
2133 return 0;
2134 }
2135 }
2136
2137
2138 while (len >= map_bankwidth(map) * 2) {
2139
2140 int size = wbufsize - (ofs & (wbufsize-1));
2141
2142 if (size > len)
2143 size = len;
2144 if (size % map_bankwidth(map))
2145 size -= size % map_bankwidth(map);
2146
2147 ret = do_write_buffer(map, &cfi->chips[chipnum],
2148 ofs, buf, size);
2149 if (ret)
2150 return ret;
2151
2152 ofs += size;
2153 buf += size;
2154 (*retlen) += size;
2155 len -= size;
2156
2157 if (ofs >> cfi->chipshift) {
2158 chipnum ++;
2159 ofs = 0;
2160 if (chipnum == cfi->numchips)
2161 return 0;
2162 }
2163 }
2164
2165 if (len) {
2166 size_t retlen_dregs = 0;
2167
2168 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2169 len, &retlen_dregs, buf);
2170
2171 *retlen += retlen_dregs;
2172 return ret;
2173 }
2174
2175 return 0;
2176}
2177#endif
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2188 unsigned long adr)
2189{
2190 struct cfi_private *cfi = map->fldrv_priv;
2191 int retries = 10;
2192 int i;
2193
2194
2195
2196
2197
2198 if (chip->state == FL_READY && chip_ready(map, chip, adr))
2199 return 0;
2200
2201
2202
2203
2204
2205
2206
2207 while (retries > 0) {
2208 const unsigned long timeo = (HZ / 1000) + 1;
2209
2210
2211 map_write(map, CMD(0xF0), chip->start);
2212
2213
2214 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2215 if (chip_ready(map, chip, adr))
2216 return 0;
2217
2218 udelay(1);
2219 }
2220
2221 retries--;
2222 }
2223
2224
2225 return -EBUSY;
2226}
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2240 unsigned long adr, map_word datum)
2241{
2242 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2243 struct cfi_private *cfi = map->fldrv_priv;
2244 int retry_cnt = 0;
2245 map_word oldd;
2246 int ret;
2247 int i;
2248
2249 adr += chip->start;
2250
2251 ret = cfi_amdstd_panic_wait(map, chip, adr);
2252 if (ret)
2253 return ret;
2254
2255 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2256 __func__, adr, datum.x[0]);
2257
2258
2259
2260
2261
2262
2263
2264 oldd = map_read(map, adr);
2265 if (map_word_equal(map, oldd, datum)) {
2266 pr_debug("MTD %s(): NOP\n", __func__);
2267 goto op_done;
2268 }
2269
2270 ENABLE_VPP(map);
2271
2272retry:
2273 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2274 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2275 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2276 map_write(map, datum, adr);
2277
2278 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2279 if (chip_ready(map, chip, adr))
2280 break;
2281
2282 udelay(1);
2283 }
2284
2285 if (!chip_good(map, chip, adr, datum) ||
2286 cfi_check_err_status(map, chip, adr)) {
2287
2288 map_write(map, CMD(0xF0), chip->start);
2289
2290
2291 if (++retry_cnt <= MAX_RETRIES)
2292 goto retry;
2293
2294 ret = -EIO;
2295 }
2296
2297op_done:
2298 DISABLE_VPP(map);
2299 return ret;
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2316 size_t *retlen, const u_char *buf)
2317{
2318 struct map_info *map = mtd->priv;
2319 struct cfi_private *cfi = map->fldrv_priv;
2320 unsigned long ofs, chipstart;
2321 int ret;
2322 int chipnum;
2323
2324 chipnum = to >> cfi->chipshift;
2325 ofs = to - (chipnum << cfi->chipshift);
2326 chipstart = cfi->chips[chipnum].start;
2327
2328
2329 if (ofs & (map_bankwidth(map) - 1)) {
2330 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2331 int i = ofs - bus_ofs;
2332 int n = 0;
2333 map_word tmp_buf;
2334
2335 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2336 if (ret)
2337 return ret;
2338
2339
2340 tmp_buf = map_read(map, bus_ofs + chipstart);
2341
2342
2343 n = min_t(int, len, map_bankwidth(map) - i);
2344
2345 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2346
2347 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2348 bus_ofs, tmp_buf);
2349 if (ret)
2350 return ret;
2351
2352 ofs += n;
2353 buf += n;
2354 (*retlen) += n;
2355 len -= n;
2356
2357 if (ofs >> cfi->chipshift) {
2358 chipnum++;
2359 ofs = 0;
2360 if (chipnum == cfi->numchips)
2361 return 0;
2362 }
2363 }
2364
2365
2366 while (len >= map_bankwidth(map)) {
2367 map_word datum;
2368
2369 datum = map_word_load(map, buf);
2370
2371 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2372 ofs, datum);
2373 if (ret)
2374 return ret;
2375
2376 ofs += map_bankwidth(map);
2377 buf += map_bankwidth(map);
2378 (*retlen) += map_bankwidth(map);
2379 len -= map_bankwidth(map);
2380
2381 if (ofs >> cfi->chipshift) {
2382 chipnum++;
2383 ofs = 0;
2384 if (chipnum == cfi->numchips)
2385 return 0;
2386
2387 chipstart = cfi->chips[chipnum].start;
2388 }
2389 }
2390
2391
2392 if (len & (map_bankwidth(map) - 1)) {
2393 map_word tmp_buf;
2394
2395 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2396 if (ret)
2397 return ret;
2398
2399 tmp_buf = map_read(map, ofs + chipstart);
2400
2401 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2402
2403 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2404 ofs, tmp_buf);
2405 if (ret)
2406 return ret;
2407
2408 (*retlen) += len;
2409 }
2410
2411 return 0;
2412}
2413
2414
2415
2416
2417
2418
2419static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2420{
2421 struct cfi_private *cfi = map->fldrv_priv;
2422 unsigned long timeo = jiffies + HZ;
2423 unsigned long int adr;
2424 DECLARE_WAITQUEUE(wait, current);
2425 int ret;
2426 int retry_cnt = 0;
2427
2428 adr = cfi->addr_unlock1;
2429
2430 mutex_lock(&chip->mutex);
2431 ret = get_chip(map, chip, adr, FL_ERASING);
2432 if (ret) {
2433 mutex_unlock(&chip->mutex);
2434 return ret;
2435 }
2436
2437 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2438 __func__, chip->start);
2439
2440 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2441 ENABLE_VPP(map);
2442 xip_disable(map, chip, adr);
2443
2444 retry:
2445 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2446 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2447 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2448 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2449 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2450 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2451
2452 chip->state = FL_ERASING;
2453 chip->erase_suspended = 0;
2454 chip->in_progress_block_addr = adr;
2455 chip->in_progress_block_mask = ~(map->size - 1);
2456
2457 INVALIDATE_CACHE_UDELAY(map, chip,
2458 adr, map->size,
2459 chip->erase_time*500);
2460
2461 timeo = jiffies + (HZ*20);
2462
2463 for (;;) {
2464 if (chip->state != FL_ERASING) {
2465
2466 set_current_state(TASK_UNINTERRUPTIBLE);
2467 add_wait_queue(&chip->wq, &wait);
2468 mutex_unlock(&chip->mutex);
2469 schedule();
2470 remove_wait_queue(&chip->wq, &wait);
2471 mutex_lock(&chip->mutex);
2472 continue;
2473 }
2474 if (chip->erase_suspended) {
2475
2476
2477 timeo = jiffies + (HZ*20);
2478 chip->erase_suspended = 0;
2479 }
2480
2481 if (chip_good(map, chip, adr, map_word_ff(map))) {
2482 if (cfi_check_err_status(map, chip, adr))
2483 ret = -EIO;
2484 break;
2485 }
2486
2487 if (time_after(jiffies, timeo)) {
2488 printk(KERN_WARNING "MTD %s(): software timeout\n",
2489 __func__);
2490 ret = -EIO;
2491 break;
2492 }
2493
2494
2495 UDELAY(map, chip, adr, 1000000/HZ);
2496 }
2497
2498 if (ret) {
2499
2500 map_write(map, CMD(0xF0), chip->start);
2501
2502
2503 if (++retry_cnt <= MAX_RETRIES) {
2504 ret = 0;
2505 goto retry;
2506 }
2507 }
2508
2509 chip->state = FL_READY;
2510 xip_enable(map, chip, adr);
2511 DISABLE_VPP(map);
2512 put_chip(map, chip, adr);
2513 mutex_unlock(&chip->mutex);
2514
2515 return ret;
2516}
2517
2518
2519static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2520{
2521 struct cfi_private *cfi = map->fldrv_priv;
2522 unsigned long timeo = jiffies + HZ;
2523 DECLARE_WAITQUEUE(wait, current);
2524 int ret;
2525 int retry_cnt = 0;
2526
2527 adr += chip->start;
2528
2529 mutex_lock(&chip->mutex);
2530 ret = get_chip(map, chip, adr, FL_ERASING);
2531 if (ret) {
2532 mutex_unlock(&chip->mutex);
2533 return ret;
2534 }
2535
2536 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2537 __func__, adr);
2538
2539 XIP_INVAL_CACHED_RANGE(map, adr, len);
2540 ENABLE_VPP(map);
2541 xip_disable(map, chip, adr);
2542
2543 retry:
2544 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2545 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2546 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2547 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2548 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2549 map_write(map, cfi->sector_erase_cmd, adr);
2550
2551 chip->state = FL_ERASING;
2552 chip->erase_suspended = 0;
2553 chip->in_progress_block_addr = adr;
2554 chip->in_progress_block_mask = ~(len - 1);
2555
2556 INVALIDATE_CACHE_UDELAY(map, chip,
2557 adr, len,
2558 chip->erase_time*500);
2559
2560 timeo = jiffies + (HZ*20);
2561
2562 for (;;) {
2563 if (chip->state != FL_ERASING) {
2564
2565 set_current_state(TASK_UNINTERRUPTIBLE);
2566 add_wait_queue(&chip->wq, &wait);
2567 mutex_unlock(&chip->mutex);
2568 schedule();
2569 remove_wait_queue(&chip->wq, &wait);
2570 mutex_lock(&chip->mutex);
2571 continue;
2572 }
2573 if (chip->erase_suspended) {
2574
2575
2576 timeo = jiffies + (HZ*20);
2577 chip->erase_suspended = 0;
2578 }
2579
2580 if (chip_good(map, chip, adr, map_word_ff(map))) {
2581 if (cfi_check_err_status(map, chip, adr))
2582 ret = -EIO;
2583 break;
2584 }
2585
2586 if (time_after(jiffies, timeo)) {
2587 printk(KERN_WARNING "MTD %s(): software timeout\n",
2588 __func__);
2589 ret = -EIO;
2590 break;
2591 }
2592
2593
2594 UDELAY(map, chip, adr, 1000000/HZ);
2595 }
2596
2597 if (ret) {
2598
2599 map_write(map, CMD(0xF0), chip->start);
2600
2601
2602 if (++retry_cnt <= MAX_RETRIES) {
2603 ret = 0;
2604 goto retry;
2605 }
2606 }
2607
2608 chip->state = FL_READY;
2609 xip_enable(map, chip, adr);
2610 DISABLE_VPP(map);
2611 put_chip(map, chip, adr);
2612 mutex_unlock(&chip->mutex);
2613 return ret;
2614}
2615
2616
2617static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2618{
2619 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2620 instr->len, NULL);
2621}
2622
2623
2624static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2625{
2626 struct map_info *map = mtd->priv;
2627 struct cfi_private *cfi = map->fldrv_priv;
2628
2629 if (instr->addr != 0)
2630 return -EINVAL;
2631
2632 if (instr->len != mtd->size)
2633 return -EINVAL;
2634
2635 return do_erase_chip(map, &cfi->chips[0]);
2636}
2637
2638static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2639 unsigned long adr, int len, void *thunk)
2640{
2641 struct cfi_private *cfi = map->fldrv_priv;
2642 int ret;
2643
2644 mutex_lock(&chip->mutex);
2645 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2646 if (ret)
2647 goto out_unlock;
2648 chip->state = FL_LOCKING;
2649
2650 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2651
2652 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2653 cfi->device_type, NULL);
2654 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2655 cfi->device_type, NULL);
2656 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2657 cfi->device_type, NULL);
2658 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2659 cfi->device_type, NULL);
2660 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2661 cfi->device_type, NULL);
2662 map_write(map, CMD(0x40), chip->start + adr);
2663
2664 chip->state = FL_READY;
2665 put_chip(map, chip, adr + chip->start);
2666 ret = 0;
2667
2668out_unlock:
2669 mutex_unlock(&chip->mutex);
2670 return ret;
2671}
2672
2673static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2674 unsigned long adr, int len, void *thunk)
2675{
2676 struct cfi_private *cfi = map->fldrv_priv;
2677 int ret;
2678
2679 mutex_lock(&chip->mutex);
2680 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2681 if (ret)
2682 goto out_unlock;
2683 chip->state = FL_UNLOCKING;
2684
2685 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2686
2687 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2688 cfi->device_type, NULL);
2689 map_write(map, CMD(0x70), adr);
2690
2691 chip->state = FL_READY;
2692 put_chip(map, chip, adr + chip->start);
2693 ret = 0;
2694
2695out_unlock:
2696 mutex_unlock(&chip->mutex);
2697 return ret;
2698}
2699
2700static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2701{
2702 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2703}
2704
2705static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2706{
2707 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2708}
2709
2710
2711
2712
2713
2714struct ppb_lock {
2715 struct flchip *chip;
2716 unsigned long adr;
2717 int locked;
2718};
2719
2720#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2721#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2722#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2723
2724static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2725 struct flchip *chip,
2726 unsigned long adr, int len, void *thunk)
2727{
2728 struct cfi_private *cfi = map->fldrv_priv;
2729 unsigned long timeo;
2730 int ret;
2731
2732 adr += chip->start;
2733 mutex_lock(&chip->mutex);
2734 ret = get_chip(map, chip, adr, FL_LOCKING);
2735 if (ret) {
2736 mutex_unlock(&chip->mutex);
2737 return ret;
2738 }
2739
2740 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2741
2742 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2743 cfi->device_type, NULL);
2744 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2745 cfi->device_type, NULL);
2746
2747 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2748 cfi->device_type, NULL);
2749
2750 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2751 chip->state = FL_LOCKING;
2752 map_write(map, CMD(0xA0), adr);
2753 map_write(map, CMD(0x00), adr);
2754 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2755
2756
2757
2758
2759 chip->state = FL_UNLOCKING;
2760 map_write(map, CMD(0x80), chip->start);
2761 map_write(map, CMD(0x30), chip->start);
2762 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2763 chip->state = FL_JEDEC_QUERY;
2764
2765 ret = !cfi_read_query(map, adr);
2766 } else
2767 BUG();
2768
2769
2770
2771
2772 timeo = jiffies + msecs_to_jiffies(2000);
2773 for (;;) {
2774 if (chip_ready(map, chip, adr))
2775 break;
2776
2777 if (time_after(jiffies, timeo)) {
2778 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2779 ret = -EIO;
2780 break;
2781 }
2782
2783 UDELAY(map, chip, adr, 1);
2784 }
2785
2786
2787 map_write(map, CMD(0x90), chip->start);
2788 map_write(map, CMD(0x00), chip->start);
2789
2790 chip->state = FL_READY;
2791 put_chip(map, chip, adr);
2792 mutex_unlock(&chip->mutex);
2793
2794 return ret;
2795}
2796
2797static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2798 uint64_t len)
2799{
2800 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2801 DO_XXLOCK_ONEBLOCK_LOCK);
2802}
2803
2804static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2805 uint64_t len)
2806{
2807 struct mtd_erase_region_info *regions = mtd->eraseregions;
2808 struct map_info *map = mtd->priv;
2809 struct cfi_private *cfi = map->fldrv_priv;
2810 struct ppb_lock *sect;
2811 unsigned long adr;
2812 loff_t offset;
2813 uint64_t length;
2814 int chipnum;
2815 int i;
2816 int sectors;
2817 int ret;
2818 int max_sectors;
2819
2820
2821
2822
2823
2824
2825
2826 max_sectors = 0;
2827 for (i = 0; i < mtd->numeraseregions; i++)
2828 max_sectors += regions[i].numblocks;
2829
2830 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2831 if (!sect)
2832 return -ENOMEM;
2833
2834
2835
2836
2837
2838 i = 0;
2839 chipnum = 0;
2840 adr = 0;
2841 sectors = 0;
2842 offset = 0;
2843 length = mtd->size;
2844
2845 while (length) {
2846 int size = regions[i].erasesize;
2847
2848
2849
2850
2851
2852
2853 if ((offset < ofs) || (offset >= (ofs + len))) {
2854 sect[sectors].chip = &cfi->chips[chipnum];
2855 sect[sectors].adr = adr;
2856 sect[sectors].locked = do_ppb_xxlock(
2857 map, &cfi->chips[chipnum], adr, 0,
2858 DO_XXLOCK_ONEBLOCK_GETLOCK);
2859 }
2860
2861 adr += size;
2862 offset += size;
2863 length -= size;
2864
2865 if (offset == regions[i].offset + size * regions[i].numblocks)
2866 i++;
2867
2868 if (adr >> cfi->chipshift) {
2869 if (offset >= (ofs + len))
2870 break;
2871 adr = 0;
2872 chipnum++;
2873
2874 if (chipnum >= cfi->numchips)
2875 break;
2876 }
2877
2878 sectors++;
2879 if (sectors >= max_sectors) {
2880 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2881 max_sectors);
2882 kfree(sect);
2883 return -EINVAL;
2884 }
2885 }
2886
2887
2888 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2889 DO_XXLOCK_ONEBLOCK_UNLOCK);
2890 if (ret) {
2891 kfree(sect);
2892 return ret;
2893 }
2894
2895
2896
2897
2898
2899 for (i = 0; i < sectors; i++) {
2900 if (sect[i].locked)
2901 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2902 DO_XXLOCK_ONEBLOCK_LOCK);
2903 }
2904
2905 kfree(sect);
2906 return ret;
2907}
2908
2909static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2910 uint64_t len)
2911{
2912 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2913 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2914}
2915
2916static void cfi_amdstd_sync (struct mtd_info *mtd)
2917{
2918 struct map_info *map = mtd->priv;
2919 struct cfi_private *cfi = map->fldrv_priv;
2920 int i;
2921 struct flchip *chip;
2922 int ret = 0;
2923 DECLARE_WAITQUEUE(wait, current);
2924
2925 for (i=0; !ret && i<cfi->numchips; i++) {
2926 chip = &cfi->chips[i];
2927
2928 retry:
2929 mutex_lock(&chip->mutex);
2930
2931 switch(chip->state) {
2932 case FL_READY:
2933 case FL_STATUS:
2934 case FL_CFI_QUERY:
2935 case FL_JEDEC_QUERY:
2936 chip->oldstate = chip->state;
2937 chip->state = FL_SYNCING;
2938
2939
2940
2941
2942 fallthrough;
2943 case FL_SYNCING:
2944 mutex_unlock(&chip->mutex);
2945 break;
2946
2947 default:
2948
2949 set_current_state(TASK_UNINTERRUPTIBLE);
2950 add_wait_queue(&chip->wq, &wait);
2951
2952 mutex_unlock(&chip->mutex);
2953
2954 schedule();
2955
2956 remove_wait_queue(&chip->wq, &wait);
2957
2958 goto retry;
2959 }
2960 }
2961
2962
2963
2964 for (i--; i >=0; i--) {
2965 chip = &cfi->chips[i];
2966
2967 mutex_lock(&chip->mutex);
2968
2969 if (chip->state == FL_SYNCING) {
2970 chip->state = chip->oldstate;
2971 wake_up(&chip->wq);
2972 }
2973 mutex_unlock(&chip->mutex);
2974 }
2975}
2976
2977
2978static int cfi_amdstd_suspend(struct mtd_info *mtd)
2979{
2980 struct map_info *map = mtd->priv;
2981 struct cfi_private *cfi = map->fldrv_priv;
2982 int i;
2983 struct flchip *chip;
2984 int ret = 0;
2985
2986 for (i=0; !ret && i<cfi->numchips; i++) {
2987 chip = &cfi->chips[i];
2988
2989 mutex_lock(&chip->mutex);
2990
2991 switch(chip->state) {
2992 case FL_READY:
2993 case FL_STATUS:
2994 case FL_CFI_QUERY:
2995 case FL_JEDEC_QUERY:
2996 chip->oldstate = chip->state;
2997 chip->state = FL_PM_SUSPENDED;
2998
2999
3000
3001
3002 break;
3003 case FL_PM_SUSPENDED:
3004 break;
3005
3006 default:
3007 ret = -EAGAIN;
3008 break;
3009 }
3010 mutex_unlock(&chip->mutex);
3011 }
3012
3013
3014
3015 if (ret) {
3016 for (i--; i >=0; i--) {
3017 chip = &cfi->chips[i];
3018
3019 mutex_lock(&chip->mutex);
3020
3021 if (chip->state == FL_PM_SUSPENDED) {
3022 chip->state = chip->oldstate;
3023 wake_up(&chip->wq);
3024 }
3025 mutex_unlock(&chip->mutex);
3026 }
3027 }
3028
3029 return ret;
3030}
3031
3032
3033static void cfi_amdstd_resume(struct mtd_info *mtd)
3034{
3035 struct map_info *map = mtd->priv;
3036 struct cfi_private *cfi = map->fldrv_priv;
3037 int i;
3038 struct flchip *chip;
3039
3040 for (i=0; i<cfi->numchips; i++) {
3041
3042 chip = &cfi->chips[i];
3043
3044 mutex_lock(&chip->mutex);
3045
3046 if (chip->state == FL_PM_SUSPENDED) {
3047 chip->state = FL_READY;
3048 map_write(map, CMD(0xF0), chip->start);
3049 wake_up(&chip->wq);
3050 }
3051 else
3052 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3053
3054 mutex_unlock(&chip->mutex);
3055 }
3056}
3057
3058
3059
3060
3061
3062
3063
3064
3065static int cfi_amdstd_reset(struct mtd_info *mtd)
3066{
3067 struct map_info *map = mtd->priv;
3068 struct cfi_private *cfi = map->fldrv_priv;
3069 int i, ret;
3070 struct flchip *chip;
3071
3072 for (i = 0; i < cfi->numchips; i++) {
3073
3074 chip = &cfi->chips[i];
3075
3076 mutex_lock(&chip->mutex);
3077
3078 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3079 if (!ret) {
3080 map_write(map, CMD(0xF0), chip->start);
3081 chip->state = FL_SHUTDOWN;
3082 put_chip(map, chip, chip->start);
3083 }
3084
3085 mutex_unlock(&chip->mutex);
3086 }
3087
3088 return 0;
3089}
3090
3091
3092static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3093 void *v)
3094{
3095 struct mtd_info *mtd;
3096
3097 mtd = container_of(nb, struct mtd_info, reboot_notifier);
3098 cfi_amdstd_reset(mtd);
3099 return NOTIFY_DONE;
3100}
3101
3102
3103static void cfi_amdstd_destroy(struct mtd_info *mtd)
3104{
3105 struct map_info *map = mtd->priv;
3106 struct cfi_private *cfi = map->fldrv_priv;
3107
3108 cfi_amdstd_reset(mtd);
3109 unregister_reboot_notifier(&mtd->reboot_notifier);
3110 kfree(cfi->cmdset_priv);
3111 kfree(cfi->cfiq);
3112 kfree(cfi);
3113 kfree(mtd->eraseregions);
3114}
3115
3116MODULE_LICENSE("GPL");
3117MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3118MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3119MODULE_ALIAS("cfi_cmdset_0006");
3120MODULE_ALIAS("cfi_cmdset_0701");
3121