linux/drivers/mtd/nand/denali.c
<<
>>
Prefs
   1/*
   2 * NAND Flash Controller Device Driver
   3 * Copyright \xC2\xA9 2009-2010, Intel Corporation and its suppliers.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17 *
  18 */
  19#include <linux/interrupt.h>
  20#include <linux/delay.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/wait.h>
  23#include <linux/mutex.h>
  24#include <linux/slab.h>
  25#include <linux/mtd/mtd.h>
  26#include <linux/module.h>
  27
  28#include "denali.h"
  29
  30MODULE_LICENSE("GPL");
  31
  32/*
  33 * We define a module parameter that allows the user to override
  34 * the hardware and decide what timing mode should be used.
  35 */
  36#define NAND_DEFAULT_TIMINGS    -1
  37
  38static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  39module_param(onfi_timing_mode, int, S_IRUGO);
  40MODULE_PARM_DESC(onfi_timing_mode,
  41           "Overrides default ONFI setting. -1 indicates use default timings");
  42
  43#define DENALI_NAND_NAME    "denali-nand"
  44
  45/*
  46 * We define a macro here that combines all interrupts this driver uses into
  47 * a single constant value, for convenience.
  48 */
  49#define DENALI_IRQ_ALL  (INTR_STATUS__DMA_CMD_COMP | \
  50                        INTR_STATUS__ECC_TRANSACTION_DONE | \
  51                        INTR_STATUS__ECC_ERR | \
  52                        INTR_STATUS__PROGRAM_FAIL | \
  53                        INTR_STATUS__LOAD_COMP | \
  54                        INTR_STATUS__PROGRAM_COMP | \
  55                        INTR_STATUS__TIME_OUT | \
  56                        INTR_STATUS__ERASE_FAIL | \
  57                        INTR_STATUS__RST_COMP | \
  58                        INTR_STATUS__ERASE_COMP)
  59
  60/*
  61 * indicates whether or not the internal value for the flash bank is
  62 * valid or not
  63 */
  64#define CHIP_SELECT_INVALID     -1
  65
  66#define SUPPORT_8BITECC         1
  67
  68/*
  69 * This macro divides two integers and rounds fractional values up
  70 * to the nearest integer value.
  71 */
  72#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  73
  74/*
  75 * this macro allows us to convert from an MTD structure to our own
  76 * device context (denali) structure.
  77 */
  78#define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
  79
  80/*
  81 * These constants are defined by the driver to enable common driver
  82 * configuration options.
  83 */
  84#define SPARE_ACCESS            0x41
  85#define MAIN_ACCESS             0x42
  86#define MAIN_SPARE_ACCESS       0x43
  87#define PIPELINE_ACCESS         0x2000
  88
  89#define DENALI_READ     0
  90#define DENALI_WRITE    0x100
  91
  92/* types of device accesses. We can issue commands and get status */
  93#define COMMAND_CYCLE   0
  94#define ADDR_CYCLE      1
  95#define STATUS_CYCLE    2
  96
  97/*
  98 * this is a helper macro that allows us to
  99 * format the bank into the proper bits for the controller
 100 */
 101#define BANK(x) ((x) << 24)
 102
 103/* forward declarations */
 104static void clear_interrupts(struct denali_nand_info *denali);
 105static uint32_t wait_for_irq(struct denali_nand_info *denali,
 106                                                        uint32_t irq_mask);
 107static void denali_irq_enable(struct denali_nand_info *denali,
 108                                                        uint32_t int_mask);
 109static uint32_t read_interrupt_status(struct denali_nand_info *denali);
 110
 111/*
 112 * Certain operations for the denali NAND controller use an indexed mode to
 113 * read/write data. The operation is performed by writing the address value
 114 * of the command to the device memory followed by the data. This function
 115 * abstracts this common operation.
 116 */
 117static void index_addr(struct denali_nand_info *denali,
 118                                uint32_t address, uint32_t data)
 119{
 120        iowrite32(address, denali->flash_mem);
 121        iowrite32(data, denali->flash_mem + 0x10);
 122}
 123
 124/* Perform an indexed read of the device */
 125static void index_addr_read_data(struct denali_nand_info *denali,
 126                                 uint32_t address, uint32_t *pdata)
 127{
 128        iowrite32(address, denali->flash_mem);
 129        *pdata = ioread32(denali->flash_mem + 0x10);
 130}
 131
 132/*
 133 * We need to buffer some data for some of the NAND core routines.
 134 * The operations manage buffering that data.
 135 */
 136static void reset_buf(struct denali_nand_info *denali)
 137{
 138        denali->buf.head = denali->buf.tail = 0;
 139}
 140
 141static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
 142{
 143        denali->buf.buf[denali->buf.tail++] = byte;
 144}
 145
 146/* reads the status of the device */
 147static void read_status(struct denali_nand_info *denali)
 148{
 149        uint32_t cmd;
 150
 151        /* initialize the data buffer to store status */
 152        reset_buf(denali);
 153
 154        cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
 155        if (cmd)
 156                write_byte_to_buf(denali, NAND_STATUS_WP);
 157        else
 158                write_byte_to_buf(denali, 0);
 159}
 160
 161/* resets a specific device connected to the core */
 162static void reset_bank(struct denali_nand_info *denali)
 163{
 164        uint32_t irq_status;
 165        uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
 166
 167        clear_interrupts(denali);
 168
 169        iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
 170
 171        irq_status = wait_for_irq(denali, irq_mask);
 172
 173        if (irq_status & INTR_STATUS__TIME_OUT)
 174                dev_err(denali->dev, "reset bank failed.\n");
 175}
 176
 177/* Reset the flash controller */
 178static uint16_t denali_nand_reset(struct denali_nand_info *denali)
 179{
 180        int i;
 181
 182        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 183                __FILE__, __LINE__, __func__);
 184
 185        for (i = 0; i < denali->max_banks; i++)
 186                iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
 187                denali->flash_reg + INTR_STATUS(i));
 188
 189        for (i = 0; i < denali->max_banks; i++) {
 190                iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
 191                while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
 192                        (INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
 193                        cpu_relax();
 194                if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
 195                        INTR_STATUS__TIME_OUT)
 196                        dev_dbg(denali->dev,
 197                        "NAND Reset operation timed out on bank %d\n", i);
 198        }
 199
 200        for (i = 0; i < denali->max_banks; i++)
 201                iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
 202                          denali->flash_reg + INTR_STATUS(i));
 203
 204        return PASS;
 205}
 206
 207/*
 208 * this routine calculates the ONFI timing values for a given mode and
 209 * programs the clocking register accordingly. The mode is determined by
 210 * the get_onfi_nand_para routine.
 211 */
 212static void nand_onfi_timing_set(struct denali_nand_info *denali,
 213                                                                uint16_t mode)
 214{
 215        uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
 216        uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
 217        uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
 218        uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
 219        uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
 220        uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
 221        uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
 222        uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
 223        uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
 224        uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
 225        uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
 226        uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
 227
 228        uint16_t TclsRising = 1;
 229        uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
 230        uint16_t dv_window = 0;
 231        uint16_t en_lo, en_hi;
 232        uint16_t acc_clks;
 233        uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
 234
 235        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 236                __FILE__, __LINE__, __func__);
 237
 238        en_lo = CEIL_DIV(Trp[mode], CLK_X);
 239        en_hi = CEIL_DIV(Treh[mode], CLK_X);
 240#if ONFI_BLOOM_TIME
 241        if ((en_hi * CLK_X) < (Treh[mode] + 2))
 242                en_hi++;
 243#endif
 244
 245        if ((en_lo + en_hi) * CLK_X < Trc[mode])
 246                en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
 247
 248        if ((en_lo + en_hi) < CLK_MULTI)
 249                en_lo += CLK_MULTI - en_lo - en_hi;
 250
 251        while (dv_window < 8) {
 252                data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
 253
 254                data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
 255
 256                data_invalid = data_invalid_rhoh < data_invalid_rloh ?
 257                                        data_invalid_rhoh : data_invalid_rloh;
 258
 259                dv_window = data_invalid - Trea[mode];
 260
 261                if (dv_window < 8)
 262                        en_lo++;
 263        }
 264
 265        acc_clks = CEIL_DIV(Trea[mode], CLK_X);
 266
 267        while (acc_clks * CLK_X - Trea[mode] < 3)
 268                acc_clks++;
 269
 270        if (data_invalid - acc_clks * CLK_X < 2)
 271                dev_warn(denali->dev, "%s, Line %d: Warning!\n",
 272                         __FILE__, __LINE__);
 273
 274        addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
 275        re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
 276        re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
 277        we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
 278        cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
 279        if (!TclsRising)
 280                cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
 281        if (cs_cnt == 0)
 282                cs_cnt = 1;
 283
 284        if (Tcea[mode]) {
 285                while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
 286                        cs_cnt++;
 287        }
 288
 289#if MODE5_WORKAROUND
 290        if (mode == 5)
 291                acc_clks = 5;
 292#endif
 293
 294        /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
 295        if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
 296                ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
 297                acc_clks = 6;
 298
 299        iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
 300        iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
 301        iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
 302        iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
 303        iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
 304        iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
 305        iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
 306        iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
 307}
 308
 309/* queries the NAND device to see what ONFI modes it supports. */
 310static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
 311{
 312        int i;
 313
 314        /*
 315         * we needn't to do a reset here because driver has already
 316         * reset all the banks before
 317         */
 318        if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
 319                ONFI_TIMING_MODE__VALUE))
 320                return FAIL;
 321
 322        for (i = 5; i > 0; i--) {
 323                if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
 324                        (0x01 << i))
 325                        break;
 326        }
 327
 328        nand_onfi_timing_set(denali, i);
 329
 330        /*
 331         * By now, all the ONFI devices we know support the page cache
 332         * rw feature. So here we enable the pipeline_rw_ahead feature
 333         */
 334        /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
 335        /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE);  */
 336
 337        return PASS;
 338}
 339
 340static void get_samsung_nand_para(struct denali_nand_info *denali,
 341                                                        uint8_t device_id)
 342{
 343        if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
 344                /* Set timing register values according to datasheet */
 345                iowrite32(5, denali->flash_reg + ACC_CLKS);
 346                iowrite32(20, denali->flash_reg + RE_2_WE);
 347                iowrite32(12, denali->flash_reg + WE_2_RE);
 348                iowrite32(14, denali->flash_reg + ADDR_2_DATA);
 349                iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
 350                iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
 351                iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
 352        }
 353}
 354
 355static void get_toshiba_nand_para(struct denali_nand_info *denali)
 356{
 357        uint32_t tmp;
 358
 359        /*
 360         * Workaround to fix a controller bug which reports a wrong
 361         * spare area size for some kind of Toshiba NAND device
 362         */
 363        if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
 364                (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
 365                iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 366                tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
 367                        ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 368                iowrite32(tmp,
 369                                denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
 370#if SUPPORT_15BITECC
 371                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
 372#elif SUPPORT_8BITECC
 373                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 374#endif
 375        }
 376}
 377
 378static void get_hynix_nand_para(struct denali_nand_info *denali,
 379                                                        uint8_t device_id)
 380{
 381        uint32_t main_size, spare_size;
 382
 383        switch (device_id) {
 384        case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
 385        case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
 386                iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
 387                iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
 388                iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
 389                main_size = 4096 *
 390                        ioread32(denali->flash_reg + DEVICES_CONNECTED);
 391                spare_size = 224 *
 392                        ioread32(denali->flash_reg + DEVICES_CONNECTED);
 393                iowrite32(main_size,
 394                                denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
 395                iowrite32(spare_size,
 396                                denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
 397                iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
 398#if SUPPORT_15BITECC
 399                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
 400#elif SUPPORT_8BITECC
 401                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
 402#endif
 403                break;
 404        default:
 405                dev_warn(denali->dev,
 406                         "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
 407                         "Will use default parameter values instead.\n",
 408                         device_id);
 409        }
 410}
 411
 412/*
 413 * determines how many NAND chips are connected to the controller. Note for
 414 * Intel CE4100 devices we don't support more than one device.
 415 */
 416static void find_valid_banks(struct denali_nand_info *denali)
 417{
 418        uint32_t id[denali->max_banks];
 419        int i;
 420
 421        denali->total_used_banks = 1;
 422        for (i = 0; i < denali->max_banks; i++) {
 423                index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
 424                index_addr(denali, MODE_11 | (i << 24) | 1, 0);
 425                index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
 426
 427                dev_dbg(denali->dev,
 428                        "Return 1st ID for bank[%d]: %x\n", i, id[i]);
 429
 430                if (i == 0) {
 431                        if (!(id[i] & 0x0ff))
 432                                break; /* WTF? */
 433                } else {
 434                        if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
 435                                denali->total_used_banks++;
 436                        else
 437                                break;
 438                }
 439        }
 440
 441        if (denali->platform == INTEL_CE4100) {
 442                /*
 443                 * Platform limitations of the CE4100 device limit
 444                 * users to a single chip solution for NAND.
 445                 * Multichip support is not enabled.
 446                 */
 447                if (denali->total_used_banks != 1) {
 448                        dev_err(denali->dev,
 449                                "Sorry, Intel CE4100 only supports a single NAND device.\n");
 450                        BUG();
 451                }
 452        }
 453        dev_dbg(denali->dev,
 454                "denali->total_used_banks: %d\n", denali->total_used_banks);
 455}
 456
 457/*
 458 * Use the configuration feature register to determine the maximum number of
 459 * banks that the hardware supports.
 460 */
 461static void detect_max_banks(struct denali_nand_info *denali)
 462{
 463        uint32_t features = ioread32(denali->flash_reg + FEATURES);
 464
 465        denali->max_banks = 2 << (features & FEATURES__N_BANKS);
 466}
 467
 468static void detect_partition_feature(struct denali_nand_info *denali)
 469{
 470        /*
 471         * For MRST platform, denali->fwblks represent the
 472         * number of blocks firmware is taken,
 473         * FW is in protect partition and MTD driver has no
 474         * permission to access it. So let driver know how many
 475         * blocks it can't touch.
 476         */
 477        if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
 478                if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
 479                        PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
 480                        denali->fwblks =
 481                            ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
 482                              MIN_MAX_BANK__MIN_VALUE) *
 483                             denali->blksperchip)
 484                            +
 485                            (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
 486                            MIN_BLK_ADDR__VALUE);
 487                } else {
 488                        denali->fwblks = SPECTRA_START_BLOCK;
 489                }
 490        } else {
 491                denali->fwblks = SPECTRA_START_BLOCK;
 492        }
 493}
 494
 495static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
 496{
 497        uint16_t status = PASS;
 498        uint32_t id_bytes[8], addr;
 499        uint8_t maf_id, device_id;
 500        int i;
 501
 502        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 503                        __FILE__, __LINE__, __func__);
 504
 505        /*
 506         * Use read id method to get device ID and other params.
 507         * For some NAND chips, controller can't report the correct
 508         * device ID by reading from DEVICE_ID register
 509         */
 510        addr = MODE_11 | BANK(denali->flash_bank);
 511        index_addr(denali, addr | 0, 0x90);
 512        index_addr(denali, addr | 1, 0);
 513        for (i = 0; i < 8; i++)
 514                index_addr_read_data(denali, addr | 2, &id_bytes[i]);
 515        maf_id = id_bytes[0];
 516        device_id = id_bytes[1];
 517
 518        if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
 519                ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
 520                if (FAIL == get_onfi_nand_para(denali))
 521                        return FAIL;
 522        } else if (maf_id == 0xEC) { /* Samsung NAND */
 523                get_samsung_nand_para(denali, device_id);
 524        } else if (maf_id == 0x98) { /* Toshiba NAND */
 525                get_toshiba_nand_para(denali);
 526        } else if (maf_id == 0xAD) { /* Hynix NAND */
 527                get_hynix_nand_para(denali, device_id);
 528        }
 529
 530        dev_info(denali->dev,
 531                        "Dump timing register values:\n"
 532                        "acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
 533                        "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
 534                        "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
 535                        ioread32(denali->flash_reg + ACC_CLKS),
 536                        ioread32(denali->flash_reg + RE_2_WE),
 537                        ioread32(denali->flash_reg + RE_2_RE),
 538                        ioread32(denali->flash_reg + WE_2_RE),
 539                        ioread32(denali->flash_reg + ADDR_2_DATA),
 540                        ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
 541                        ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
 542                        ioread32(denali->flash_reg + CS_SETUP_CNT));
 543
 544        find_valid_banks(denali);
 545
 546        detect_partition_feature(denali);
 547
 548        /*
 549         * If the user specified to override the default timings
 550         * with a specific ONFI mode, we apply those changes here.
 551         */
 552        if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
 553                nand_onfi_timing_set(denali, onfi_timing_mode);
 554
 555        return status;
 556}
 557
 558static void denali_set_intr_modes(struct denali_nand_info *denali,
 559                                        uint16_t INT_ENABLE)
 560{
 561        dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
 562                __FILE__, __LINE__, __func__);
 563
 564        if (INT_ENABLE)
 565                iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
 566        else
 567                iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
 568}
 569
 570/*
 571 * validation function to verify that the controlling software is making
 572 * a valid request
 573 */
 574static inline bool is_flash_bank_valid(int flash_bank)
 575{
 576        return flash_bank >= 0 && flash_bank < 4;
 577}
 578
 579static void denali_irq_init(struct denali_nand_info *denali)
 580{
 581        uint32_t int_mask;
 582        int i;
 583
 584        /* Disable global interrupts */
 585        denali_set_intr_modes(denali, false);
 586
 587        int_mask = DENALI_IRQ_ALL;
 588
 589        /* Clear all status bits */
 590        for (i = 0; i < denali->max_banks; ++i)
 591                iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
 592
 593        denali_irq_enable(denali, int_mask);
 594}
 595
 596static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
 597{
 598        denali_set_intr_modes(denali, false);
 599        free_irq(irqnum, denali);
 600}
 601
 602static void denali_irq_enable(struct denali_nand_info *denali,
 603                                                        uint32_t int_mask)
 604{
 605        int i;
 606
 607        for (i = 0; i < denali->max_banks; ++i)
 608                iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
 609}
 610
 611/*
 612 * This function only returns when an interrupt that this driver cares about
 613 * occurs. This is to reduce the overhead of servicing interrupts
 614 */
 615static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
 616{
 617        return read_interrupt_status(denali) & DENALI_IRQ_ALL;
 618}
 619
 620/* Interrupts are cleared by writing a 1 to the appropriate status bit */
 621static inline void clear_interrupt(struct denali_nand_info *denali,
 622                                                        uint32_t irq_mask)
 623{
 624        uint32_t intr_status_reg;
 625
 626        intr_status_reg = INTR_STATUS(denali->flash_bank);
 627
 628        iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
 629}
 630
 631static void clear_interrupts(struct denali_nand_info *denali)
 632{
 633        uint32_t status;
 634
 635        spin_lock_irq(&denali->irq_lock);
 636
 637        status = read_interrupt_status(denali);
 638        clear_interrupt(denali, status);
 639
 640        denali->irq_status = 0x0;
 641        spin_unlock_irq(&denali->irq_lock);
 642}
 643
 644static uint32_t read_interrupt_status(struct denali_nand_info *denali)
 645{
 646        uint32_t intr_status_reg;
 647
 648        intr_status_reg = INTR_STATUS(denali->flash_bank);
 649
 650        return ioread32(denali->flash_reg + intr_status_reg);
 651}
 652
 653/*
 654 * This is the interrupt service routine. It handles all interrupts
 655 * sent to this device. Note that on CE4100, this is a shared interrupt.
 656 */
 657static irqreturn_t denali_isr(int irq, void *dev_id)
 658{
 659        struct denali_nand_info *denali = dev_id;
 660        uint32_t irq_status;
 661        irqreturn_t result = IRQ_NONE;
 662
 663        spin_lock(&denali->irq_lock);
 664
 665        /* check to see if a valid NAND chip has been selected. */
 666        if (is_flash_bank_valid(denali->flash_bank)) {
 667                /*
 668                 * check to see if controller generated the interrupt,
 669                 * since this is a shared interrupt
 670                 */
 671                irq_status = denali_irq_detected(denali);
 672                if (irq_status != 0) {
 673                        /* handle interrupt */
 674                        /* first acknowledge it */
 675                        clear_interrupt(denali, irq_status);
 676                        /*
 677                         * store the status in the device context for someone
 678                         * to read
 679                         */
 680                        denali->irq_status |= irq_status;
 681                        /* notify anyone who cares that it happened */
 682                        complete(&denali->complete);
 683                        /* tell the OS that we've handled this */
 684                        result = IRQ_HANDLED;
 685                }
 686        }
 687        spin_unlock(&denali->irq_lock);
 688        return result;
 689}
 690#define BANK(x) ((x) << 24)
 691
 692static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
 693{
 694        unsigned long comp_res;
 695        uint32_t intr_status;
 696        unsigned long timeout = msecs_to_jiffies(1000);
 697
 698        do {
 699                comp_res =
 700                        wait_for_completion_timeout(&denali->complete, timeout);
 701                spin_lock_irq(&denali->irq_lock);
 702                intr_status = denali->irq_status;
 703
 704                if (intr_status & irq_mask) {
 705                        denali->irq_status &= ~irq_mask;
 706                        spin_unlock_irq(&denali->irq_lock);
 707                        /* our interrupt was detected */
 708                        break;
 709                }
 710
 711                /*
 712                 * these are not the interrupts you are looking for -
 713                 * need to wait again
 714                 */
 715                spin_unlock_irq(&denali->irq_lock);
 716        } while (comp_res != 0);
 717
 718        if (comp_res == 0) {
 719                /* timeout */
 720                pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
 721                                intr_status, irq_mask);
 722
 723                intr_status = 0;
 724        }
 725        return intr_status;
 726}
 727
 728/*
 729 * This helper function setups the registers for ECC and whether or not
 730 * the spare area will be transferred.
 731 */
 732static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
 733                                bool transfer_spare)
 734{
 735        int ecc_en_flag, transfer_spare_flag;
 736
 737        /* set ECC, transfer spare bits if needed */
 738        ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
 739        transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
 740
 741        /* Enable spare area/ECC per user's request. */
 742        iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
 743        iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
 744}
 745
 746/*
 747 * sends a pipeline command operation to the controller. See the Denali NAND
 748 * controller's user guide for more information (section 4.2.3.6).
 749 */
 750static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
 751                                    bool ecc_en, bool transfer_spare,
 752                                    int access_type, int op)
 753{
 754        int status = PASS;
 755        uint32_t page_count = 1;
 756        uint32_t addr, cmd, irq_status, irq_mask;
 757
 758        if (op == DENALI_READ)
 759                irq_mask = INTR_STATUS__LOAD_COMP;
 760        else if (op == DENALI_WRITE)
 761                irq_mask = 0;
 762        else
 763                BUG();
 764
 765        setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
 766
 767        clear_interrupts(denali);
 768
 769        addr = BANK(denali->flash_bank) | denali->page;
 770
 771        if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
 772                cmd = MODE_01 | addr;
 773                iowrite32(cmd, denali->flash_mem);
 774        } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
 775                /* read spare area */
 776                cmd = MODE_10 | addr;
 777                index_addr(denali, cmd, access_type);
 778
 779                cmd = MODE_01 | addr;
 780                iowrite32(cmd, denali->flash_mem);
 781        } else if (op == DENALI_READ) {
 782                /* setup page read request for access type */
 783                cmd = MODE_10 | addr;
 784                index_addr(denali, cmd, access_type);
 785
 786                /*
 787                 * page 33 of the NAND controller spec indicates we should not
 788                 * use the pipeline commands in Spare area only mode.
 789                 * So we don't.
 790                 */
 791                if (access_type == SPARE_ACCESS) {
 792                        cmd = MODE_01 | addr;
 793                        iowrite32(cmd, denali->flash_mem);
 794                } else {
 795                        index_addr(denali, cmd,
 796                                        PIPELINE_ACCESS | op | page_count);
 797
 798                        /*
 799                         * wait for command to be accepted
 800                         * can always use status0 bit as the
 801                         * mask is identical for each bank.
 802                         */
 803                        irq_status = wait_for_irq(denali, irq_mask);
 804
 805                        if (irq_status == 0) {
 806                                dev_err(denali->dev,
 807                                        "cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
 808                                        cmd, denali->page, addr);
 809                                status = FAIL;
 810                        } else {
 811                                cmd = MODE_01 | addr;
 812                                iowrite32(cmd, denali->flash_mem);
 813                        }
 814                }
 815        }
 816        return status;
 817}
 818
 819/* helper function that simply writes a buffer to the flash */
 820static int write_data_to_flash_mem(struct denali_nand_info *denali,
 821                                   const uint8_t *buf, int len)
 822{
 823        uint32_t *buf32;
 824        int i;
 825
 826        /*
 827         * verify that the len is a multiple of 4.
 828         * see comment in read_data_from_flash_mem()
 829         */
 830        BUG_ON((len % 4) != 0);
 831
 832        /* write the data to the flash memory */
 833        buf32 = (uint32_t *)buf;
 834        for (i = 0; i < len / 4; i++)
 835                iowrite32(*buf32++, denali->flash_mem + 0x10);
 836        return i * 4; /* intent is to return the number of bytes read */
 837}
 838
 839/* helper function that simply reads a buffer from the flash */
 840static int read_data_from_flash_mem(struct denali_nand_info *denali,
 841                                    uint8_t *buf, int len)
 842{
 843        uint32_t *buf32;
 844        int i;
 845
 846        /*
 847         * we assume that len will be a multiple of 4, if not it would be nice
 848         * to know about it ASAP rather than have random failures...
 849         * This assumption is based on the fact that this function is designed
 850         * to be used to read flash pages, which are typically multiples of 4.
 851         */
 852        BUG_ON((len % 4) != 0);
 853
 854        /* transfer the data from the flash */
 855        buf32 = (uint32_t *)buf;
 856        for (i = 0; i < len / 4; i++)
 857                *buf32++ = ioread32(denali->flash_mem + 0x10);
 858        return i * 4; /* intent is to return the number of bytes read */
 859}
 860
 861/* writes OOB data to the device */
 862static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
 863{
 864        struct denali_nand_info *denali = mtd_to_denali(mtd);
 865        uint32_t irq_status;
 866        uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
 867                                                INTR_STATUS__PROGRAM_FAIL;
 868        int status = 0;
 869
 870        denali->page = page;
 871
 872        if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
 873                                                        DENALI_WRITE) == PASS) {
 874                write_data_to_flash_mem(denali, buf, mtd->oobsize);
 875
 876                /* wait for operation to complete */
 877                irq_status = wait_for_irq(denali, irq_mask);
 878
 879                if (irq_status == 0) {
 880                        dev_err(denali->dev, "OOB write failed\n");
 881                        status = -EIO;
 882                }
 883        } else {
 884                dev_err(denali->dev, "unable to send pipeline command\n");
 885                status = -EIO;
 886        }
 887        return status;
 888}
 889
 890/* reads OOB data from the device */
 891static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
 892{
 893        struct denali_nand_info *denali = mtd_to_denali(mtd);
 894        uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
 895        uint32_t irq_status, addr, cmd;
 896
 897        denali->page = page;
 898
 899        if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
 900                                                        DENALI_READ) == PASS) {
 901                read_data_from_flash_mem(denali, buf, mtd->oobsize);
 902
 903                /*
 904                 * wait for command to be accepted
 905                 * can always use status0 bit as the
 906                 * mask is identical for each bank.
 907                 */
 908                irq_status = wait_for_irq(denali, irq_mask);
 909
 910                if (irq_status == 0)
 911                        dev_err(denali->dev, "page on OOB timeout %d\n",
 912                                        denali->page);
 913
 914                /*
 915                 * We set the device back to MAIN_ACCESS here as I observed
 916                 * instability with the controller if you do a block erase
 917                 * and the last transaction was a SPARE_ACCESS. Block erase
 918                 * is reliable (according to the MTD test infrastructure)
 919                 * if you are in MAIN_ACCESS.
 920                 */
 921                addr = BANK(denali->flash_bank) | denali->page;
 922                cmd = MODE_10 | addr;
 923                index_addr(denali, cmd, MAIN_ACCESS);
 924        }
 925}
 926
 927/*
 928 * this function examines buffers to see if they contain data that
 929 * indicate that the buffer is part of an erased region of flash.
 930 */
 931static bool is_erased(uint8_t *buf, int len)
 932{
 933        int i;
 934
 935        for (i = 0; i < len; i++)
 936                if (buf[i] != 0xFF)
 937                        return false;
 938        return true;
 939}
 940#define ECC_SECTOR_SIZE 512
 941
 942#define ECC_SECTOR(x)   (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
 943#define ECC_BYTE(x)     (((x) & ECC_ERROR_ADDRESS__OFFSET))
 944#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
 945#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
 946#define ECC_ERR_DEVICE(x)       (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
 947#define ECC_LAST_ERR(x)         ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
 948
 949static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
 950                       uint32_t irq_status, unsigned int *max_bitflips)
 951{
 952        bool check_erased_page = false;
 953        unsigned int bitflips = 0;
 954
 955        if (irq_status & INTR_STATUS__ECC_ERR) {
 956                /* read the ECC errors. we'll ignore them for now */
 957                uint32_t err_address, err_correction_info, err_byte,
 958                         err_sector, err_device, err_correction_value;
 959                denali_set_intr_modes(denali, false);
 960
 961                do {
 962                        err_address = ioread32(denali->flash_reg +
 963                                                ECC_ERROR_ADDRESS);
 964                        err_sector = ECC_SECTOR(err_address);
 965                        err_byte = ECC_BYTE(err_address);
 966
 967                        err_correction_info = ioread32(denali->flash_reg +
 968                                                ERR_CORRECTION_INFO);
 969                        err_correction_value =
 970                                ECC_CORRECTION_VALUE(err_correction_info);
 971                        err_device = ECC_ERR_DEVICE(err_correction_info);
 972
 973                        if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
 974                                /*
 975                                 * If err_byte is larger than ECC_SECTOR_SIZE,
 976                                 * means error happened in OOB, so we ignore
 977                                 * it. It's no need for us to correct it
 978                                 * err_device is represented the NAND error
 979                                 * bits are happened in if there are more
 980                                 * than one NAND connected.
 981                                 */
 982                                if (err_byte < ECC_SECTOR_SIZE) {
 983                                        int offset;
 984
 985                                        offset = (err_sector *
 986                                                        ECC_SECTOR_SIZE +
 987                                                        err_byte) *
 988                                                        denali->devnum +
 989                                                        err_device;
 990                                        /* correct the ECC error */
 991                                        buf[offset] ^= err_correction_value;
 992                                        denali->mtd.ecc_stats.corrected++;
 993                                        bitflips++;
 994                                }
 995                        } else {
 996                                /*
 997                                 * if the error is not correctable, need to
 998                                 * look at the page to see if it is an erased
 999                                 * page. if so, then it's not a real ECC error
1000                                 */
1001                                check_erased_page = true;
1002                        }
1003                } while (!ECC_LAST_ERR(err_correction_info));
1004                /*
1005                 * Once handle all ecc errors, controller will triger
1006                 * a ECC_TRANSACTION_DONE interrupt, so here just wait
1007                 * for a while for this interrupt
1008                 */
1009                while (!(read_interrupt_status(denali) &
1010                                INTR_STATUS__ECC_TRANSACTION_DONE))
1011                        cpu_relax();
1012                clear_interrupts(denali);
1013                denali_set_intr_modes(denali, true);
1014        }
1015        *max_bitflips = bitflips;
1016        return check_erased_page;
1017}
1018
1019/* programs the controller to either enable/disable DMA transfers */
1020static void denali_enable_dma(struct denali_nand_info *denali, bool en)
1021{
1022        iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
1023        ioread32(denali->flash_reg + DMA_ENABLE);
1024}
1025
1026/* setups the HW to perform the data DMA */
1027static void denali_setup_dma(struct denali_nand_info *denali, int op)
1028{
1029        uint32_t mode;
1030        const int page_count = 1;
1031        uint32_t addr = denali->buf.dma_buf;
1032
1033        mode = MODE_10 | BANK(denali->flash_bank);
1034
1035        /* DMA is a four step process */
1036
1037        /* 1. setup transfer type and # of pages */
1038        index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
1039
1040        /* 2. set memory high address bits 23:8 */
1041        index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
1042
1043        /* 3. set memory low address bits 23:8 */
1044        index_addr(denali, mode | ((addr & 0xff) << 8), 0x2300);
1045
1046        /* 4. interrupt when complete, burst len = 64 bytes */
1047        index_addr(denali, mode | 0x14000, 0x2400);
1048}
1049
1050/*
1051 * writes a page. user specifies type, and this function handles the
1052 * configuration details.
1053 */
1054static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
1055                        const uint8_t *buf, bool raw_xfer)
1056{
1057        struct denali_nand_info *denali = mtd_to_denali(mtd);
1058        dma_addr_t addr = denali->buf.dma_buf;
1059        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1060        uint32_t irq_status;
1061        uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
1062                                                INTR_STATUS__PROGRAM_FAIL;
1063
1064        /*
1065         * if it is a raw xfer, we want to disable ecc and send the spare area.
1066         * !raw_xfer - enable ecc
1067         * raw_xfer - transfer spare
1068         */
1069        setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
1070
1071        /* copy buffer into DMA buffer */
1072        memcpy(denali->buf.buf, buf, mtd->writesize);
1073
1074        if (raw_xfer) {
1075                /* transfer the data to the spare area */
1076                memcpy(denali->buf.buf + mtd->writesize,
1077                        chip->oob_poi,
1078                        mtd->oobsize);
1079        }
1080
1081        dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
1082
1083        clear_interrupts(denali);
1084        denali_enable_dma(denali, true);
1085
1086        denali_setup_dma(denali, DENALI_WRITE);
1087
1088        /* wait for operation to complete */
1089        irq_status = wait_for_irq(denali, irq_mask);
1090
1091        if (irq_status == 0) {
1092                dev_err(denali->dev, "timeout on write_page (type = %d)\n",
1093                        raw_xfer);
1094                denali->status = NAND_STATUS_FAIL;
1095        }
1096
1097        denali_enable_dma(denali, false);
1098        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
1099
1100        return 0;
1101}
1102
1103/* NAND core entry points */
1104
1105/*
1106 * this is the callback that the NAND core calls to write a page. Since
1107 * writing a page with ECC or without is similar, all the work is done
1108 * by write_page above.
1109 */
1110static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1111                                const uint8_t *buf, int oob_required)
1112{
1113        /*
1114         * for regular page writes, we let HW handle all the ECC
1115         * data written to the device.
1116         */
1117        return write_page(mtd, chip, buf, false);
1118}
1119
1120/*
1121 * This is the callback that the NAND core calls to write a page without ECC.
1122 * raw access is similar to ECC page writes, so all the work is done in the
1123 * write_page() function above.
1124 */
1125static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1126                                        const uint8_t *buf, int oob_required)
1127{
1128        /*
1129         * for raw page writes, we want to disable ECC and simply write
1130         * whatever data is in the buffer.
1131         */
1132        return write_page(mtd, chip, buf, true);
1133}
1134
1135static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1136                            int page)
1137{
1138        return write_oob_data(mtd, chip->oob_poi, page);
1139}
1140
1141static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1142                           int page)
1143{
1144        read_oob_data(mtd, chip->oob_poi, page);
1145
1146        return 0;
1147}
1148
1149static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1150                            uint8_t *buf, int oob_required, int page)
1151{
1152        unsigned int max_bitflips;
1153        struct denali_nand_info *denali = mtd_to_denali(mtd);
1154
1155        dma_addr_t addr = denali->buf.dma_buf;
1156        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1157
1158        uint32_t irq_status;
1159        uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
1160                            INTR_STATUS__ECC_ERR;
1161        bool check_erased_page = false;
1162
1163        if (page != denali->page) {
1164                dev_err(denali->dev,
1165                        "IN %s: page %d is not equal to denali->page %d",
1166                        __func__, page, denali->page);
1167                BUG();
1168        }
1169
1170        setup_ecc_for_xfer(denali, true, false);
1171
1172        denali_enable_dma(denali, true);
1173        dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1174
1175        clear_interrupts(denali);
1176        denali_setup_dma(denali, DENALI_READ);
1177
1178        /* wait for operation to complete */
1179        irq_status = wait_for_irq(denali, irq_mask);
1180
1181        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1182
1183        memcpy(buf, denali->buf.buf, mtd->writesize);
1184
1185        check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
1186        denali_enable_dma(denali, false);
1187
1188        if (check_erased_page) {
1189                read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
1190
1191                /* check ECC failures that may have occurred on erased pages */
1192                if (check_erased_page) {
1193                        if (!is_erased(buf, denali->mtd.writesize))
1194                                denali->mtd.ecc_stats.failed++;
1195                        if (!is_erased(buf, denali->mtd.oobsize))
1196                                denali->mtd.ecc_stats.failed++;
1197                }
1198        }
1199        return max_bitflips;
1200}
1201
1202static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1203                                uint8_t *buf, int oob_required, int page)
1204{
1205        struct denali_nand_info *denali = mtd_to_denali(mtd);
1206        dma_addr_t addr = denali->buf.dma_buf;
1207        size_t size = denali->mtd.writesize + denali->mtd.oobsize;
1208        uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
1209
1210        if (page != denali->page) {
1211                dev_err(denali->dev,
1212                        "IN %s: page %d is not equal to denali->page %d",
1213                        __func__, page, denali->page);
1214                BUG();
1215        }
1216
1217        setup_ecc_for_xfer(denali, false, true);
1218        denali_enable_dma(denali, true);
1219
1220        dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
1221
1222        clear_interrupts(denali);
1223        denali_setup_dma(denali, DENALI_READ);
1224
1225        /* wait for operation to complete */
1226        wait_for_irq(denali, irq_mask);
1227
1228        dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
1229
1230        denali_enable_dma(denali, false);
1231
1232        memcpy(buf, denali->buf.buf, mtd->writesize);
1233        memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
1234
1235        return 0;
1236}
1237
1238static uint8_t denali_read_byte(struct mtd_info *mtd)
1239{
1240        struct denali_nand_info *denali = mtd_to_denali(mtd);
1241        uint8_t result = 0xff;
1242
1243        if (denali->buf.head < denali->buf.tail)
1244                result = denali->buf.buf[denali->buf.head++];
1245
1246        return result;
1247}
1248
1249static void denali_select_chip(struct mtd_info *mtd, int chip)
1250{
1251        struct denali_nand_info *denali = mtd_to_denali(mtd);
1252
1253        spin_lock_irq(&denali->irq_lock);
1254        denali->flash_bank = chip;
1255        spin_unlock_irq(&denali->irq_lock);
1256}
1257
1258static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1259{
1260        struct denali_nand_info *denali = mtd_to_denali(mtd);
1261        int status = denali->status;
1262
1263        denali->status = 0;
1264
1265        return status;
1266}
1267
1268static int denali_erase(struct mtd_info *mtd, int page)
1269{
1270        struct denali_nand_info *denali = mtd_to_denali(mtd);
1271
1272        uint32_t cmd, irq_status;
1273
1274        clear_interrupts(denali);
1275
1276        /* setup page read request for access type */
1277        cmd = MODE_10 | BANK(denali->flash_bank) | page;
1278        index_addr(denali, cmd, 0x1);
1279
1280        /* wait for erase to complete or failure to occur */
1281        irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
1282                                        INTR_STATUS__ERASE_FAIL);
1283
1284        return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
1285}
1286
1287static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
1288                           int page)
1289{
1290        struct denali_nand_info *denali = mtd_to_denali(mtd);
1291        uint32_t addr, id;
1292        int i;
1293
1294        switch (cmd) {
1295        case NAND_CMD_PAGEPROG:
1296                break;
1297        case NAND_CMD_STATUS:
1298                read_status(denali);
1299                break;
1300        case NAND_CMD_READID:
1301        case NAND_CMD_PARAM:
1302                reset_buf(denali);
1303                /*
1304                 * sometimes ManufactureId read from register is not right
1305                 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
1306                 * So here we send READID cmd to NAND insteand
1307                 */
1308                addr = MODE_11 | BANK(denali->flash_bank);
1309                index_addr(denali, addr | 0, 0x90);
1310                index_addr(denali, addr | 1, 0);
1311                for (i = 0; i < 8; i++) {
1312                        index_addr_read_data(denali, addr | 2, &id);
1313                        write_byte_to_buf(denali, id);
1314                }
1315                break;
1316        case NAND_CMD_READ0:
1317        case NAND_CMD_SEQIN:
1318                denali->page = page;
1319                break;
1320        case NAND_CMD_RESET:
1321                reset_bank(denali);
1322                break;
1323        case NAND_CMD_READOOB:
1324                /* TODO: Read OOB data */
1325                break;
1326        default:
1327                pr_err(": unsupported command received 0x%x\n", cmd);
1328                break;
1329        }
1330}
1331
1332/* stubs for ECC functions not used by the NAND core */
1333static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
1334                                uint8_t *ecc_code)
1335{
1336        struct denali_nand_info *denali = mtd_to_denali(mtd);
1337
1338        dev_err(denali->dev, "denali_ecc_calculate called unexpectedly\n");
1339        BUG();
1340        return -EIO;
1341}
1342
1343static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
1344                                uint8_t *read_ecc, uint8_t *calc_ecc)
1345{
1346        struct denali_nand_info *denali = mtd_to_denali(mtd);
1347
1348        dev_err(denali->dev, "denali_ecc_correct called unexpectedly\n");
1349        BUG();
1350        return -EIO;
1351}
1352
1353static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
1354{
1355        struct denali_nand_info *denali = mtd_to_denali(mtd);
1356
1357        dev_err(denali->dev, "denali_ecc_hwctl called unexpectedly\n");
1358        BUG();
1359}
1360/* end NAND core entry points */
1361
1362/* Initialization code to bring the device up to a known good state */
1363static void denali_hw_init(struct denali_nand_info *denali)
1364{
1365        /*
1366         * tell driver how many bit controller will skip before
1367         * writing ECC code in OOB, this register may be already
1368         * set by firmware. So we read this value out.
1369         * if this value is 0, just let it be.
1370         */
1371        denali->bbtskipbytes = ioread32(denali->flash_reg +
1372                                                SPARE_AREA_SKIP_BYTES);
1373        detect_max_banks(denali);
1374        denali_nand_reset(denali);
1375        iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
1376        iowrite32(CHIP_EN_DONT_CARE__FLAG,
1377                        denali->flash_reg + CHIP_ENABLE_DONT_CARE);
1378
1379        iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
1380
1381        /* Should set value for these registers when init */
1382        iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
1383        iowrite32(1, denali->flash_reg + ECC_ENABLE);
1384        denali_nand_timing_set(denali);
1385        denali_irq_init(denali);
1386}
1387
1388/*
1389 * Althogh controller spec said SLC ECC is forceb to be 4bit,
1390 * but denali controller in MRST only support 15bit and 8bit ECC
1391 * correction
1392 */
1393#define ECC_8BITS       14
1394static struct nand_ecclayout nand_8bit_oob = {
1395        .eccbytes = 14,
1396};
1397
1398#define ECC_15BITS      26
1399static struct nand_ecclayout nand_15bit_oob = {
1400        .eccbytes = 26,
1401};
1402
1403static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
1404static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
1405
1406static struct nand_bbt_descr bbt_main_descr = {
1407        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1408                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1409        .offs = 8,
1410        .len = 4,
1411        .veroffs = 12,
1412        .maxblocks = 4,
1413        .pattern = bbt_pattern,
1414};
1415
1416static struct nand_bbt_descr bbt_mirror_descr = {
1417        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
1418                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
1419        .offs = 8,
1420        .len = 4,
1421        .veroffs = 12,
1422        .maxblocks = 4,
1423        .pattern = mirror_pattern,
1424};
1425
1426/* initialize driver data structures */
1427static void denali_drv_init(struct denali_nand_info *denali)
1428{
1429        denali->idx = 0;
1430
1431        /* setup interrupt handler */
1432        /*
1433         * the completion object will be used to notify
1434         * the callee that the interrupt is done
1435         */
1436        init_completion(&denali->complete);
1437
1438        /*
1439         * the spinlock will be used to synchronize the ISR with any
1440         * element that might be access shared data (interrupt status)
1441         */
1442        spin_lock_init(&denali->irq_lock);
1443
1444        /* indicate that MTD has not selected a valid bank yet */
1445        denali->flash_bank = CHIP_SELECT_INVALID;
1446
1447        /* initialize our irq_status variable to indicate no interrupts */
1448        denali->irq_status = 0;
1449}
1450
1451int denali_init(struct denali_nand_info *denali)
1452{
1453        int ret;
1454
1455        if (denali->platform == INTEL_CE4100) {
1456                /*
1457                 * Due to a silicon limitation, we can only support
1458                 * ONFI timing mode 1 and below.
1459                 */
1460                if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
1461                        pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
1462                        return -EINVAL;
1463                }
1464        }
1465
1466        /* allocate a temporary buffer for nand_scan_ident() */
1467        denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
1468                                        GFP_DMA | GFP_KERNEL);
1469        if (!denali->buf.buf)
1470                return -ENOMEM;
1471
1472        denali->mtd.dev.parent = denali->dev;
1473        denali_hw_init(denali);
1474        denali_drv_init(denali);
1475
1476        /*
1477         * denali_isr register is done after all the hardware
1478         * initilization is finished
1479         */
1480        if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
1481                        DENALI_NAND_NAME, denali)) {
1482                pr_err("Spectra: Unable to allocate IRQ\n");
1483                return -ENODEV;
1484        }
1485
1486        /* now that our ISR is registered, we can enable interrupts */
1487        denali_set_intr_modes(denali, true);
1488        denali->mtd.name = "denali-nand";
1489        denali->mtd.owner = THIS_MODULE;
1490        denali->mtd.priv = &denali->nand;
1491
1492        /* register the driver with the NAND core subsystem */
1493        denali->nand.select_chip = denali_select_chip;
1494        denali->nand.cmdfunc = denali_cmdfunc;
1495        denali->nand.read_byte = denali_read_byte;
1496        denali->nand.waitfunc = denali_waitfunc;
1497
1498        /*
1499         * scan for NAND devices attached to the controller
1500         * this is the first stage in a two step process to register
1501         * with the nand subsystem
1502         */
1503        if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
1504                ret = -ENXIO;
1505                goto failed_req_irq;
1506        }
1507
1508        /* allocate the right size buffer now */
1509        devm_kfree(denali->dev, denali->buf.buf);
1510        denali->buf.buf = devm_kzalloc(denali->dev,
1511                             denali->mtd.writesize + denali->mtd.oobsize,
1512                             GFP_KERNEL);
1513        if (!denali->buf.buf) {
1514                ret = -ENOMEM;
1515                goto failed_req_irq;
1516        }
1517
1518        /* Is 32-bit DMA supported? */
1519        ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
1520        if (ret) {
1521                pr_err("Spectra: no usable DMA configuration\n");
1522                goto failed_req_irq;
1523        }
1524
1525        denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
1526                             denali->mtd.writesize + denali->mtd.oobsize,
1527                             DMA_BIDIRECTIONAL);
1528        if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
1529                dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
1530                ret = -EIO;
1531                goto failed_req_irq;
1532        }
1533
1534        /*
1535         * support for multi nand
1536         * MTD known nothing about multi nand, so we should tell it
1537         * the real pagesize and anything necessery
1538         */
1539        denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
1540        denali->nand.chipsize <<= (denali->devnum - 1);
1541        denali->nand.page_shift += (denali->devnum - 1);
1542        denali->nand.pagemask = (denali->nand.chipsize >>
1543                                                denali->nand.page_shift) - 1;
1544        denali->nand.bbt_erase_shift += (denali->devnum - 1);
1545        denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
1546        denali->nand.chip_shift += (denali->devnum - 1);
1547        denali->mtd.writesize <<= (denali->devnum - 1);
1548        denali->mtd.oobsize <<= (denali->devnum - 1);
1549        denali->mtd.erasesize <<= (denali->devnum - 1);
1550        denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
1551        denali->bbtskipbytes *= denali->devnum;
1552
1553        /*
1554         * second stage of the NAND scan
1555         * this stage requires information regarding ECC and
1556         * bad block management.
1557         */
1558
1559        /* Bad block management */
1560        denali->nand.bbt_td = &bbt_main_descr;
1561        denali->nand.bbt_md = &bbt_mirror_descr;
1562
1563        /* skip the scan for now until we have OOB read and write support */
1564        denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
1565        denali->nand.options |= NAND_SKIP_BBTSCAN;
1566        denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
1567
1568        /*
1569         * Denali Controller only support 15bit and 8bit ECC in MRST,
1570         * so just let controller do 15bit ECC for MLC and 8bit ECC for
1571         * SLC if possible.
1572         * */
1573        if (!nand_is_slc(&denali->nand) &&
1574                        (denali->mtd.oobsize > (denali->bbtskipbytes +
1575                        ECC_15BITS * (denali->mtd.writesize /
1576                        ECC_SECTOR_SIZE)))) {
1577                /* if MLC OOB size is large enough, use 15bit ECC*/
1578                denali->nand.ecc.strength = 15;
1579                denali->nand.ecc.layout = &nand_15bit_oob;
1580                denali->nand.ecc.bytes = ECC_15BITS;
1581                iowrite32(15, denali->flash_reg + ECC_CORRECTION);
1582        } else if (denali->mtd.oobsize < (denali->bbtskipbytes +
1583                        ECC_8BITS * (denali->mtd.writesize /
1584                        ECC_SECTOR_SIZE))) {
1585                pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
1586                goto failed_req_irq;
1587        } else {
1588                denali->nand.ecc.strength = 8;
1589                denali->nand.ecc.layout = &nand_8bit_oob;
1590                denali->nand.ecc.bytes = ECC_8BITS;
1591                iowrite32(8, denali->flash_reg + ECC_CORRECTION);
1592        }
1593
1594        denali->nand.ecc.bytes *= denali->devnum;
1595        denali->nand.ecc.strength *= denali->devnum;
1596        denali->nand.ecc.layout->eccbytes *=
1597                denali->mtd.writesize / ECC_SECTOR_SIZE;
1598        denali->nand.ecc.layout->oobfree[0].offset =
1599                denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
1600        denali->nand.ecc.layout->oobfree[0].length =
1601                denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
1602                denali->bbtskipbytes;
1603
1604        /*
1605         * Let driver know the total blocks number and how many blocks
1606         * contained by each nand chip. blksperchip will help driver to
1607         * know how many blocks is taken by FW.
1608         */
1609        denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift;
1610        denali->blksperchip = denali->totalblks / denali->nand.numchips;
1611
1612        /*
1613         * These functions are required by the NAND core framework, otherwise,
1614         * the NAND core will assert. However, we don't need them, so we'll stub
1615         * them out.
1616         */
1617        denali->nand.ecc.calculate = denali_ecc_calculate;
1618        denali->nand.ecc.correct = denali_ecc_correct;
1619        denali->nand.ecc.hwctl = denali_ecc_hwctl;
1620
1621        /* override the default read operations */
1622        denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
1623        denali->nand.ecc.read_page = denali_read_page;
1624        denali->nand.ecc.read_page_raw = denali_read_page_raw;
1625        denali->nand.ecc.write_page = denali_write_page;
1626        denali->nand.ecc.write_page_raw = denali_write_page_raw;
1627        denali->nand.ecc.read_oob = denali_read_oob;
1628        denali->nand.ecc.write_oob = denali_write_oob;
1629        denali->nand.erase = denali_erase;
1630
1631        if (nand_scan_tail(&denali->mtd)) {
1632                ret = -ENXIO;
1633                goto failed_req_irq;
1634        }
1635
1636        ret = mtd_device_register(&denali->mtd, NULL, 0);
1637        if (ret) {
1638                dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
1639                                ret);
1640                goto failed_req_irq;
1641        }
1642        return 0;
1643
1644failed_req_irq:
1645        denali_irq_cleanup(denali->irq, denali);
1646
1647        return ret;
1648}
1649EXPORT_SYMBOL(denali_init);
1650
1651/* driver exit point */
1652void denali_remove(struct denali_nand_info *denali)
1653{
1654        denali_irq_cleanup(denali->irq, denali);
1655        dma_unmap_single(denali->dev, denali->buf.dma_buf,
1656                         denali->mtd.writesize + denali->mtd.oobsize,
1657                         DMA_BIDIRECTIONAL);
1658}
1659EXPORT_SYMBOL(denali_remove);
1660
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.