linux/drivers/mtd/nand/raw/lpc32xx_slc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * NXP LPC32XX NAND SLC driver
   4 *
   5 * Authors:
   6 *    Kevin Wells <kevin.wells@nxp.com>
   7 *    Roland Stigge <stigge@antcom.de>
   8 *
   9 * Copyright \xC2\xA9 2011 NXP Semiconductors
  10 * Copyright \xC2\xA9 2012 Roland Stigge
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/module.h>
  15#include <linux/platform_device.h>
  16#include <linux/mtd/mtd.h>
  17#include <linux/mtd/rawnand.h>
  18#include <linux/mtd/partitions.h>
  19#include <linux/clk.h>
  20#include <linux/err.h>
  21#include <linux/delay.h>
  22#include <linux/io.h>
  23#include <linux/mm.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/dmaengine.h>
  26#include <linux/gpio.h>
  27#include <linux/of.h>
  28#include <linux/of_gpio.h>
  29#include <linux/mtd/lpc32xx_slc.h>
  30#include <linux/mtd/nand-ecc-sw-hamming.h>
  31
  32#define LPC32XX_MODNAME         "lpc32xx-nand"
  33
  34/**********************************************************************
  35* SLC NAND controller register offsets
  36**********************************************************************/
  37
  38#define SLC_DATA(x)             (x + 0x000)
  39#define SLC_ADDR(x)             (x + 0x004)
  40#define SLC_CMD(x)              (x + 0x008)
  41#define SLC_STOP(x)             (x + 0x00C)
  42#define SLC_CTRL(x)             (x + 0x010)
  43#define SLC_CFG(x)              (x + 0x014)
  44#define SLC_STAT(x)             (x + 0x018)
  45#define SLC_INT_STAT(x)         (x + 0x01C)
  46#define SLC_IEN(x)              (x + 0x020)
  47#define SLC_ISR(x)              (x + 0x024)
  48#define SLC_ICR(x)              (x + 0x028)
  49#define SLC_TAC(x)              (x + 0x02C)
  50#define SLC_TC(x)               (x + 0x030)
  51#define SLC_ECC(x)              (x + 0x034)
  52#define SLC_DMA_DATA(x)         (x + 0x038)
  53
  54/**********************************************************************
  55* slc_ctrl register definitions
  56**********************************************************************/
  57#define SLCCTRL_SW_RESET        (1 << 2) /* Reset the NAND controller bit */
  58#define SLCCTRL_ECC_CLEAR       (1 << 1) /* Reset ECC bit */
  59#define SLCCTRL_DMA_START       (1 << 0) /* Start DMA channel bit */
  60
  61/**********************************************************************
  62* slc_cfg register definitions
  63**********************************************************************/
  64#define SLCCFG_CE_LOW           (1 << 5) /* Force CE low bit */
  65#define SLCCFG_DMA_ECC          (1 << 4) /* Enable DMA ECC bit */
  66#define SLCCFG_ECC_EN           (1 << 3) /* ECC enable bit */
  67#define SLCCFG_DMA_BURST        (1 << 2) /* DMA burst bit */
  68#define SLCCFG_DMA_DIR          (1 << 1) /* DMA write(0)/read(1) bit */
  69#define SLCCFG_WIDTH            (1 << 0) /* External device width, 0=8bit */
  70
  71/**********************************************************************
  72* slc_stat register definitions
  73**********************************************************************/
  74#define SLCSTAT_DMA_FIFO        (1 << 2) /* DMA FIFO has data bit */
  75#define SLCSTAT_SLC_FIFO        (1 << 1) /* SLC FIFO has data bit */
  76#define SLCSTAT_NAND_READY      (1 << 0) /* NAND device is ready bit */
  77
  78/**********************************************************************
  79* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
  80**********************************************************************/
  81#define SLCSTAT_INT_TC          (1 << 1) /* Transfer count bit */
  82#define SLCSTAT_INT_RDY_EN      (1 << 0) /* Ready interrupt bit */
  83
  84/**********************************************************************
  85* slc_tac register definitions
  86**********************************************************************/
  87/* Computation of clock cycles on basis of controller and device clock rates */
  88#define SLCTAC_CLOCKS(c, n, s)  (min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
  89
  90/* Clock setting for RDY write sample wait time in 2*n clocks */
  91#define SLCTAC_WDR(n)           (((n) & 0xF) << 28)
  92/* Write pulse width in clock cycles, 1 to 16 clocks */
  93#define SLCTAC_WWIDTH(c, n)     (SLCTAC_CLOCKS(c, n, 24))
  94/* Write hold time of control and data signals, 1 to 16 clocks */
  95#define SLCTAC_WHOLD(c, n)      (SLCTAC_CLOCKS(c, n, 20))
  96/* Write setup time of control and data signals, 1 to 16 clocks */
  97#define SLCTAC_WSETUP(c, n)     (SLCTAC_CLOCKS(c, n, 16))
  98/* Clock setting for RDY read sample wait time in 2*n clocks */
  99#define SLCTAC_RDR(n)           (((n) & 0xF) << 12)
 100/* Read pulse width in clock cycles, 1 to 16 clocks */
 101#define SLCTAC_RWIDTH(c, n)     (SLCTAC_CLOCKS(c, n, 8))
 102/* Read hold time of control and data signals, 1 to 16 clocks */
 103#define SLCTAC_RHOLD(c, n)      (SLCTAC_CLOCKS(c, n, 4))
 104/* Read setup time of control and data signals, 1 to 16 clocks */
 105#define SLCTAC_RSETUP(c, n)     (SLCTAC_CLOCKS(c, n, 0))
 106
 107/**********************************************************************
 108* slc_ecc register definitions
 109**********************************************************************/
 110/* ECC line party fetch macro */
 111#define SLCECC_TO_LINEPAR(n)    (((n) >> 6) & 0x7FFF)
 112#define SLCECC_TO_COLPAR(n)     ((n) & 0x3F)
 113
 114/*
 115 * DMA requires storage space for the DMA local buffer and the hardware ECC
 116 * storage area. The DMA local buffer is only used if DMA mapping fails
 117 * during runtime.
 118 */
 119#define LPC32XX_DMA_DATA_SIZE           4096
 120#define LPC32XX_ECC_SAVE_SIZE           ((4096 / 256) * 4)
 121
 122/* Number of bytes used for ECC stored in NAND per 256 bytes */
 123#define LPC32XX_SLC_DEV_ECC_BYTES       3
 124
 125/*
 126 * If the NAND base clock frequency can't be fetched, this frequency will be
 127 * used instead as the base. This rate is used to setup the timing registers
 128 * used for NAND accesses.
 129 */
 130#define LPC32XX_DEF_BUS_RATE            133250000
 131
 132/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
 133#define LPC32XX_DMA_TIMEOUT             100
 134
 135/*
 136 * NAND ECC Layout for small page NAND devices
 137 * Note: For large and huge page devices, the default layouts are used
 138 */
 139static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
 140                                 struct mtd_oob_region *oobregion)
 141{
 142        if (section)
 143                return -ERANGE;
 144
 145        oobregion->length = 6;
 146        oobregion->offset = 10;
 147
 148        return 0;
 149}
 150
 151static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
 152                                  struct mtd_oob_region *oobregion)
 153{
 154        if (section > 1)
 155                return -ERANGE;
 156
 157        if (!section) {
 158                oobregion->offset = 0;
 159                oobregion->length = 4;
 160        } else {
 161                oobregion->offset = 6;
 162                oobregion->length = 4;
 163        }
 164
 165        return 0;
 166}
 167
 168static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
 169        .ecc = lpc32xx_ooblayout_ecc,
 170        .free = lpc32xx_ooblayout_free,
 171};
 172
 173static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
 174static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
 175
 176/*
 177 * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
 178 * Note: Large page devices used the default layout
 179 */
 180static struct nand_bbt_descr bbt_smallpage_main_descr = {
 181        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 182                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
 183        .offs = 0,
 184        .len = 4,
 185        .veroffs = 6,
 186        .maxblocks = 4,
 187        .pattern = bbt_pattern
 188};
 189
 190static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
 191        .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
 192                | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
 193        .offs = 0,
 194        .len = 4,
 195        .veroffs = 6,
 196        .maxblocks = 4,
 197        .pattern = mirror_pattern
 198};
 199
 200/*
 201 * NAND platform configuration structure
 202 */
 203struct lpc32xx_nand_cfg_slc {
 204        uint32_t wdr_clks;
 205        uint32_t wwidth;
 206        uint32_t whold;
 207        uint32_t wsetup;
 208        uint32_t rdr_clks;
 209        uint32_t rwidth;
 210        uint32_t rhold;
 211        uint32_t rsetup;
 212        int wp_gpio;
 213        struct mtd_partition *parts;
 214        unsigned num_parts;
 215};
 216
 217struct lpc32xx_nand_host {
 218        struct nand_chip        nand_chip;
 219        struct lpc32xx_slc_platform_data *pdata;
 220        struct clk              *clk;
 221        void __iomem            *io_base;
 222        struct lpc32xx_nand_cfg_slc *ncfg;
 223
 224        struct completion       comp;
 225        struct dma_chan         *dma_chan;
 226        uint32_t                dma_buf_len;
 227        struct dma_slave_config dma_slave_config;
 228        struct scatterlist      sgl;
 229
 230        /*
 231         * DMA and CPU addresses of ECC work area and data buffer
 232         */
 233        uint32_t                *ecc_buf;
 234        uint8_t                 *data_buf;
 235        dma_addr_t              io_base_dma;
 236};
 237
 238static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
 239{
 240        uint32_t clkrate, tmp;
 241
 242        /* Reset SLC controller */
 243        writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
 244        udelay(1000);
 245
 246        /* Basic setup */
 247        writel(0, SLC_CFG(host->io_base));
 248        writel(0, SLC_IEN(host->io_base));
 249        writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
 250                SLC_ICR(host->io_base));
 251
 252        /* Get base clock for SLC block */
 253        clkrate = clk_get_rate(host->clk);
 254        if (clkrate == 0)
 255                clkrate = LPC32XX_DEF_BUS_RATE;
 256
 257        /* Compute clock setup values */
 258        tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
 259                SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
 260                SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
 261                SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
 262                SLCTAC_RDR(host->ncfg->rdr_clks) |
 263                SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
 264                SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
 265                SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
 266        writel(tmp, SLC_TAC(host->io_base));
 267}
 268
 269/*
 270 * Hardware specific access to control lines
 271 */
 272static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
 273                                  unsigned int ctrl)
 274{
 275        uint32_t tmp;
 276        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 277
 278        /* Does CE state need to be changed? */
 279        tmp = readl(SLC_CFG(host->io_base));
 280        if (ctrl & NAND_NCE)
 281                tmp |= SLCCFG_CE_LOW;
 282        else
 283                tmp &= ~SLCCFG_CE_LOW;
 284        writel(tmp, SLC_CFG(host->io_base));
 285
 286        if (cmd != NAND_CMD_NONE) {
 287                if (ctrl & NAND_CLE)
 288                        writel(cmd, SLC_CMD(host->io_base));
 289                else
 290                        writel(cmd, SLC_ADDR(host->io_base));
 291        }
 292}
 293
 294/*
 295 * Read the Device Ready pin
 296 */
 297static int lpc32xx_nand_device_ready(struct nand_chip *chip)
 298{
 299        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 300        int rdy = 0;
 301
 302        if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
 303                rdy = 1;
 304
 305        return rdy;
 306}
 307
 308/*
 309 * Enable NAND write protect
 310 */
 311static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
 312{
 313        if (gpio_is_valid(host->ncfg->wp_gpio))
 314                gpio_set_value(host->ncfg->wp_gpio, 0);
 315}
 316
 317/*
 318 * Disable NAND write protect
 319 */
 320static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
 321{
 322        if (gpio_is_valid(host->ncfg->wp_gpio))
 323                gpio_set_value(host->ncfg->wp_gpio, 1);
 324}
 325
 326/*
 327 * Prepares SLC for transfers with H/W ECC enabled
 328 */
 329static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
 330{
 331        /* Hardware ECC is enabled automatically in hardware as needed */
 332}
 333
 334/*
 335 * Calculates the ECC for the data
 336 */
 337static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
 338                                      const unsigned char *buf,
 339                                      unsigned char *code)
 340{
 341        /*
 342         * ECC is calculated automatically in hardware during syndrome read
 343         * and write operations, so it doesn't need to be calculated here.
 344         */
 345        return 0;
 346}
 347
 348/*
 349 * Corrects the data
 350 */
 351static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
 352                                    unsigned char *buf,
 353                                    unsigned char *read_ecc,
 354                                    unsigned char *calc_ecc)
 355{
 356        return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
 357                                      chip->ecc.size, false);
 358}
 359
 360/*
 361 * Read a single byte from NAND device
 362 */
 363static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
 364{
 365        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 366
 367        return (uint8_t)readl(SLC_DATA(host->io_base));
 368}
 369
 370/*
 371 * Simple device read without ECC
 372 */
 373static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
 374{
 375        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 376
 377        /* Direct device read with no ECC */
 378        while (len-- > 0)
 379                *buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
 380}
 381
 382/*
 383 * Simple device write without ECC
 384 */
 385static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
 386                                   int len)
 387{
 388        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 389
 390        /* Direct device write with no ECC */
 391        while (len-- > 0)
 392                writel((uint32_t)*buf++, SLC_DATA(host->io_base));
 393}
 394
 395/*
 396 * Read the OOB data from the device without ECC using FIFO method
 397 */
 398static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
 399{
 400        struct mtd_info *mtd = nand_to_mtd(chip);
 401
 402        return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
 403}
 404
 405/*
 406 * Write the OOB data to the device without ECC using FIFO method
 407 */
 408static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
 409{
 410        struct mtd_info *mtd = nand_to_mtd(chip);
 411
 412        return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
 413                                 mtd->oobsize);
 414}
 415
 416/*
 417 * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
 418 */
 419static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
 420{
 421        int i;
 422
 423        for (i = 0; i < (count * 3); i += 3) {
 424                uint32_t ce = ecc[i / 3];
 425                ce = ~(ce << 2) & 0xFFFFFF;
 426                spare[i + 2] = (uint8_t)(ce & 0xFF);
 427                ce >>= 8;
 428                spare[i + 1] = (uint8_t)(ce & 0xFF);
 429                ce >>= 8;
 430                spare[i] = (uint8_t)(ce & 0xFF);
 431        }
 432}
 433
 434static void lpc32xx_dma_complete_func(void *completion)
 435{
 436        complete(completion);
 437}
 438
 439static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
 440                            void *mem, int len, enum dma_transfer_direction dir)
 441{
 442        struct nand_chip *chip = mtd_to_nand(mtd);
 443        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 444        struct dma_async_tx_descriptor *desc;
 445        int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 446        int res;
 447
 448        host->dma_slave_config.direction = dir;
 449        host->dma_slave_config.src_addr = dma;
 450        host->dma_slave_config.dst_addr = dma;
 451        host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 452        host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 453        host->dma_slave_config.src_maxburst = 4;
 454        host->dma_slave_config.dst_maxburst = 4;
 455        /* DMA controller does flow control: */
 456        host->dma_slave_config.device_fc = false;
 457        if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
 458                dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
 459                return -ENXIO;
 460        }
 461
 462        sg_init_one(&host->sgl, mem, len);
 463
 464        res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
 465                         DMA_BIDIRECTIONAL);
 466        if (res != 1) {
 467                dev_err(mtd->dev.parent, "Failed to map sg list\n");
 468                return -ENXIO;
 469        }
 470        desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
 471                                       flags);
 472        if (!desc) {
 473                dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
 474                goto out1;
 475        }
 476
 477        init_completion(&host->comp);
 478        desc->callback = lpc32xx_dma_complete_func;
 479        desc->callback_param = &host->comp;
 480
 481        dmaengine_submit(desc);
 482        dma_async_issue_pending(host->dma_chan);
 483
 484        wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
 485
 486        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 487                     DMA_BIDIRECTIONAL);
 488
 489        return 0;
 490out1:
 491        dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
 492                     DMA_BIDIRECTIONAL);
 493        return -ENXIO;
 494}
 495
 496/*
 497 * DMA read/write transfers with ECC support
 498 */
 499static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
 500                        int read)
 501{
 502        struct nand_chip *chip = mtd_to_nand(mtd);
 503        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 504        int i, status = 0;
 505        unsigned long timeout;
 506        int res;
 507        enum dma_transfer_direction dir =
 508                read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 509        uint8_t *dma_buf;
 510        bool dma_mapped;
 511
 512        if ((void *)buf <= high_memory) {
 513                dma_buf = buf;
 514                dma_mapped = true;
 515        } else {
 516                dma_buf = host->data_buf;
 517                dma_mapped = false;
 518                if (!read)
 519                        memcpy(host->data_buf, buf, mtd->writesize);
 520        }
 521
 522        if (read) {
 523                writel(readl(SLC_CFG(host->io_base)) |
 524                       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
 525                       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
 526        } else {
 527                writel((readl(SLC_CFG(host->io_base)) |
 528                        SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
 529                       ~SLCCFG_DMA_DIR,
 530                        SLC_CFG(host->io_base));
 531        }
 532
 533        /* Clear initial ECC */
 534        writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
 535
 536        /* Transfer size is data area only */
 537        writel(mtd->writesize, SLC_TC(host->io_base));
 538
 539        /* Start transfer in the NAND controller */
 540        writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
 541               SLC_CTRL(host->io_base));
 542
 543        for (i = 0; i < chip->ecc.steps; i++) {
 544                /* Data */
 545                res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
 546                                       dma_buf + i * chip->ecc.size,
 547                                       mtd->writesize / chip->ecc.steps, dir);
 548                if (res)
 549                        return res;
 550
 551                /* Always _read_ ECC */
 552                if (i == chip->ecc.steps - 1)
 553                        break;
 554                if (!read) /* ECC availability delayed on write */
 555                        udelay(10);
 556                res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
 557                                       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
 558                if (res)
 559                        return res;
 560        }
 561
 562        /*
 563         * According to NXP, the DMA can be finished here, but the NAND
 564         * controller may still have buffered data. After porting to using the
 565         * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
 566         * appears to be always true, according to tests. Keeping the check for
 567         * safety reasons for now.
 568         */
 569        if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
 570                dev_warn(mtd->dev.parent, "FIFO not empty!\n");
 571                timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
 572                while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
 573                       time_before(jiffies, timeout))
 574                        cpu_relax();
 575                if (!time_before(jiffies, timeout)) {
 576                        dev_err(mtd->dev.parent, "FIFO held data too long\n");
 577                        status = -EIO;
 578                }
 579        }
 580
 581        /* Read last calculated ECC value */
 582        if (!read)
 583                udelay(10);
 584        host->ecc_buf[chip->ecc.steps - 1] =
 585                readl(SLC_ECC(host->io_base));
 586
 587        /* Flush DMA */
 588        dmaengine_terminate_all(host->dma_chan);
 589
 590        if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
 591            readl(SLC_TC(host->io_base))) {
 592                /* Something is left in the FIFO, something is wrong */
 593                dev_err(mtd->dev.parent, "DMA FIFO failure\n");
 594                status = -EIO;
 595        }
 596
 597        /* Stop DMA & HW ECC */
 598        writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
 599               SLC_CTRL(host->io_base));
 600        writel(readl(SLC_CFG(host->io_base)) &
 601               ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
 602                 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
 603
 604        if (!dma_mapped && read)
 605                memcpy(buf, host->data_buf, mtd->writesize);
 606
 607        return status;
 608}
 609
 610/*
 611 * Read the data and OOB data from the device, use ECC correction with the
 612 * data, disable ECC for the OOB data
 613 */
 614static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
 615                                           int oob_required, int page)
 616{
 617        struct mtd_info *mtd = nand_to_mtd(chip);
 618        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 619        struct mtd_oob_region oobregion = { };
 620        int stat, i, status, error;
 621        uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
 622
 623        /* Issue read command */
 624        nand_read_page_op(chip, page, 0, NULL, 0);
 625
 626        /* Read data and oob, calculate ECC */
 627        status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
 628
 629        /* Get OOB data */
 630        chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
 631
 632        /* Convert to stored ECC format */
 633        lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
 634
 635        /* Pointer to ECC data retrieved from NAND spare area */
 636        error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
 637        if (error)
 638                return error;
 639
 640        oobecc = chip->oob_poi + oobregion.offset;
 641
 642        for (i = 0; i < chip->ecc.steps; i++) {
 643                stat = chip->ecc.correct(chip, buf, oobecc,
 644                                         &tmpecc[i * chip->ecc.bytes]);
 645                if (stat < 0)
 646                        mtd->ecc_stats.failed++;
 647                else
 648                        mtd->ecc_stats.corrected += stat;
 649
 650                buf += chip->ecc.size;
 651                oobecc += chip->ecc.bytes;
 652        }
 653
 654        return status;
 655}
 656
 657/*
 658 * Read the data and OOB data from the device, no ECC correction with the
 659 * data or OOB data
 660 */
 661static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
 662                                               uint8_t *buf, int oob_required,
 663                                               int page)
 664{
 665        struct mtd_info *mtd = nand_to_mtd(chip);
 666
 667        /* Issue read command */
 668        nand_read_page_op(chip, page, 0, NULL, 0);
 669
 670        /* Raw reads can just use the FIFO interface */
 671        chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
 672        chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
 673
 674        return 0;
 675}
 676
 677/*
 678 * Write the data and OOB data to the device, use ECC with the data,
 679 * disable ECC for the OOB data
 680 */
 681static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
 682                                            const uint8_t *buf,
 683                                            int oob_required, int page)
 684{
 685        struct mtd_info *mtd = nand_to_mtd(chip);
 686        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 687        struct mtd_oob_region oobregion = { };
 688        uint8_t *pb;
 689        int error;
 690
 691        nand_prog_page_begin_op(chip, page, 0, NULL, 0);
 692
 693        /* Write data, calculate ECC on outbound data */
 694        error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
 695        if (error)
 696                return error;
 697
 698        /*
 699         * The calculated ECC needs some manual work done to it before
 700         * committing it to NAND. Process the calculated ECC and place
 701         * the resultant values directly into the OOB buffer. */
 702        error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
 703        if (error)
 704                return error;
 705
 706        pb = chip->oob_poi + oobregion.offset;
 707        lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
 708
 709        /* Write ECC data to device */
 710        chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
 711
 712        return nand_prog_page_end_op(chip);
 713}
 714
 715/*
 716 * Write the data and OOB data to the device, no ECC correction with the
 717 * data or OOB data
 718 */
 719static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
 720                                                const uint8_t *buf,
 721                                                int oob_required, int page)
 722{
 723        struct mtd_info *mtd = nand_to_mtd(chip);
 724
 725        /* Raw writes can just use the FIFO interface */
 726        nand_prog_page_begin_op(chip, page, 0, buf,
 727                                chip->ecc.size * chip->ecc.steps);
 728        chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
 729
 730        return nand_prog_page_end_op(chip);
 731}
 732
 733static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
 734{
 735        struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
 736        dma_cap_mask_t mask;
 737
 738        if (!host->pdata || !host->pdata->dma_filter) {
 739                dev_err(mtd->dev.parent, "no DMA platform data\n");
 740                return -ENOENT;
 741        }
 742
 743        dma_cap_zero(mask);
 744        dma_cap_set(DMA_SLAVE, mask);
 745        host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
 746                                             "nand-slc");
 747        if (!host->dma_chan) {
 748                dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
 749                return -EBUSY;
 750        }
 751
 752        return 0;
 753}
 754
 755static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
 756{
 757        struct lpc32xx_nand_cfg_slc *ncfg;
 758        struct device_node *np = dev->of_node;
 759
 760        ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
 761        if (!ncfg)
 762                return NULL;
 763
 764        of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
 765        of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
 766        of_property_read_u32(np, "nxp,whold", &ncfg->whold);
 767        of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
 768        of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
 769        of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
 770        of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
 771        of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
 772
 773        if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
 774            !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
 775            !ncfg->rhold || !ncfg->rsetup) {
 776                dev_err(dev, "chip parameters not specified correctly\n");
 777                return NULL;
 778        }
 779
 780        ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
 781
 782        return ncfg;
 783}
 784
 785static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
 786{
 787        struct mtd_info *mtd = nand_to_mtd(chip);
 788        struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
 789
 790        if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
 791                return 0;
 792
 793        /* OOB and ECC CPU and DMA work areas */
 794        host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
 795
 796        /*
 797         * Small page FLASH has a unique OOB layout, but large and huge
 798         * page FLASH use the standard layout. Small page FLASH uses a
 799         * custom BBT marker layout.
 800         */
 801        if (mtd->writesize <= 512)
 802                mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
 803
 804        chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
 805        /* These sizes remain the same regardless of page size */
 806        chip->ecc.size = 256;
 807        chip->ecc.strength = 1;
 808        chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
 809        chip->ecc.prepad = 0;
 810        chip->ecc.postpad = 0;
 811        chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
 812        chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
 813        chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
 814        chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
 815        chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
 816        chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
 817        chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
 818        chip->ecc.correct = lpc32xx_nand_ecc_correct;
 819        chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
 820
 821        /*
 822         * Use a custom BBT marker setup for small page FLASH that
 823         * won't interfere with the ECC layout. Large and huge page
 824         * FLASH use the standard layout.
 825         */
 826        if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
 827            mtd->writesize <= 512) {
 828                chip->bbt_td = &bbt_smallpage_main_descr;
 829                chip->bbt_md = &bbt_smallpage_mirror_descr;
 830        }
 831
 832        return 0;
 833}
 834
 835static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
 836        .attach_chip = lpc32xx_nand_attach_chip,
 837};
 838
 839/*
 840 * Probe for NAND controller
 841 */
 842static int lpc32xx_nand_probe(struct platform_device *pdev)
 843{
 844        struct lpc32xx_nand_host *host;
 845        struct mtd_info *mtd;
 846        struct nand_chip *chip;
 847        struct resource *rc;
 848        int res;
 849
 850        /* Allocate memory for the device structure (and zero it) */
 851        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
 852        if (!host)
 853                return -ENOMEM;
 854
 855        rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 856        host->io_base = devm_ioremap_resource(&pdev->dev, rc);
 857        if (IS_ERR(host->io_base))
 858                return PTR_ERR(host->io_base);
 859
 860        host->io_base_dma = rc->start;
 861        if (pdev->dev.of_node)
 862                host->ncfg = lpc32xx_parse_dt(&pdev->dev);
 863        if (!host->ncfg) {
 864                dev_err(&pdev->dev,
 865                        "Missing or bad NAND config from device tree\n");
 866                return -ENOENT;
 867        }
 868        if (host->ncfg->wp_gpio == -EPROBE_DEFER)
 869                return -EPROBE_DEFER;
 870        if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
 871                        host->ncfg->wp_gpio, "NAND WP")) {
 872                dev_err(&pdev->dev, "GPIO not available\n");
 873                return -EBUSY;
 874        }
 875        lpc32xx_wp_disable(host);
 876
 877        host->pdata = dev_get_platdata(&pdev->dev);
 878
 879        chip = &host->nand_chip;
 880        mtd = nand_to_mtd(chip);
 881        nand_set_controller_data(chip, host);
 882        nand_set_flash_node(chip, pdev->dev.of_node);
 883        mtd->owner = THIS_MODULE;
 884        mtd->dev.parent = &pdev->dev;
 885
 886        /* Get NAND clock */
 887        host->clk = devm_clk_get(&pdev->dev, NULL);
 888        if (IS_ERR(host->clk)) {
 889                dev_err(&pdev->dev, "Clock failure\n");
 890                res = -ENOENT;
 891                goto enable_wp;
 892        }
 893        res = clk_prepare_enable(host->clk);
 894        if (res)
 895                goto enable_wp;
 896
 897        /* Set NAND IO addresses and command/ready functions */
 898        chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
 899        chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
 900        chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
 901        chip->legacy.dev_ready = lpc32xx_nand_device_ready;
 902        chip->legacy.chip_delay = 20; /* 20us command delay time */
 903
 904        /* Init NAND controller */
 905        lpc32xx_nand_setup(host);
 906
 907        platform_set_drvdata(pdev, host);
 908
 909        /* NAND callbacks for LPC32xx SLC hardware */
 910        chip->legacy.read_byte = lpc32xx_nand_read_byte;
 911        chip->legacy.read_buf = lpc32xx_nand_read_buf;
 912        chip->legacy.write_buf = lpc32xx_nand_write_buf;
 913
 914        /*
 915         * Allocate a large enough buffer for a single huge page plus
 916         * extra space for the spare area and ECC storage area
 917         */
 918        host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
 919        host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
 920                                      GFP_KERNEL);
 921        if (host->data_buf == NULL) {
 922                res = -ENOMEM;
 923                goto unprepare_clk;
 924        }
 925
 926        res = lpc32xx_nand_dma_setup(host);
 927        if (res) {
 928                res = -EIO;
 929                goto unprepare_clk;
 930        }
 931
 932        /* Find NAND device */
 933        chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
 934        res = nand_scan(chip, 1);
 935        if (res)
 936                goto release_dma;
 937
 938        mtd->name = "nxp_lpc3220_slc";
 939        res = mtd_device_register(mtd, host->ncfg->parts,
 940                                  host->ncfg->num_parts);
 941        if (res)
 942                goto cleanup_nand;
 943
 944        return 0;
 945
 946cleanup_nand:
 947        nand_cleanup(chip);
 948release_dma:
 949        dma_release_channel(host->dma_chan);
 950unprepare_clk:
 951        clk_disable_unprepare(host->clk);
 952enable_wp:
 953        lpc32xx_wp_enable(host);
 954
 955        return res;
 956}
 957
 958/*
 959 * Remove NAND device.
 960 */
 961static int lpc32xx_nand_remove(struct platform_device *pdev)
 962{
 963        uint32_t tmp;
 964        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 965        struct nand_chip *chip = &host->nand_chip;
 966        int ret;
 967
 968        ret = mtd_device_unregister(nand_to_mtd(chip));
 969        WARN_ON(ret);
 970        nand_cleanup(chip);
 971        dma_release_channel(host->dma_chan);
 972
 973        /* Force CE high */
 974        tmp = readl(SLC_CTRL(host->io_base));
 975        tmp &= ~SLCCFG_CE_LOW;
 976        writel(tmp, SLC_CTRL(host->io_base));
 977
 978        clk_disable_unprepare(host->clk);
 979        lpc32xx_wp_enable(host);
 980
 981        return 0;
 982}
 983
 984#ifdef CONFIG_PM
 985static int lpc32xx_nand_resume(struct platform_device *pdev)
 986{
 987        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
 988        int ret;
 989
 990        /* Re-enable NAND clock */
 991        ret = clk_prepare_enable(host->clk);
 992        if (ret)
 993                return ret;
 994
 995        /* Fresh init of NAND controller */
 996        lpc32xx_nand_setup(host);
 997
 998        /* Disable write protect */
 999        lpc32xx_wp_disable(host);
1000
1001        return 0;
1002}
1003
1004static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
1005{
1006        uint32_t tmp;
1007        struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
1008
1009        /* Force CE high */
1010        tmp = readl(SLC_CTRL(host->io_base));
1011        tmp &= ~SLCCFG_CE_LOW;
1012        writel(tmp, SLC_CTRL(host->io_base));
1013
1014        /* Enable write protect for safety */
1015        lpc32xx_wp_enable(host);
1016
1017        /* Disable clock */
1018        clk_disable_unprepare(host->clk);
1019
1020        return 0;
1021}
1022
1023#else
1024#define lpc32xx_nand_resume NULL
1025#define lpc32xx_nand_suspend NULL
1026#endif
1027
1028static const struct of_device_id lpc32xx_nand_match[] = {
1029        { .compatible = "nxp,lpc3220-slc" },
1030        { /* sentinel */ },
1031};
1032MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1033
1034static struct platform_driver lpc32xx_nand_driver = {
1035        .probe          = lpc32xx_nand_probe,
1036        .remove         = lpc32xx_nand_remove,
1037        .resume         = lpc32xx_nand_resume,
1038        .suspend        = lpc32xx_nand_suspend,
1039        .driver         = {
1040                .name   = LPC32XX_MODNAME,
1041                .of_match_table = lpc32xx_nand_match,
1042        },
1043};
1044
1045module_platform_driver(lpc32xx_nand_driver);
1046
1047MODULE_LICENSE("GPL");
1048MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1049MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1050MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
1051