linux/drivers/mtd/nand/ecc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Generic Error-Correcting Code (ECC) engine
   4 *
   5 * Copyright (C) 2019 Macronix
   6 * Author:
   7 *     Miqu\xC3\xA8l RAYNAL <miquel.raynal@bootlin.com>
   8 *
   9 *
  10 * This file describes the abstraction of any NAND ECC engine. It has been
  11 * designed to fit most cases, including parallel NANDs and SPI-NANDs.
  12 *
  13 * There are three main situations where instantiating this ECC engine makes
  14 * sense:
  15 *   - external: The ECC engine is outside the NAND pipeline, typically this
  16 *               is a software ECC engine, or an hardware engine that is
  17 *               outside the NAND controller pipeline.
  18 *   - pipelined: The ECC engine is inside the NAND pipeline, ie. on the
  19 *                controller's side. This is the case of most of the raw NAND
  20 *                controllers. In the pipeline case, the ECC bytes are
  21 *                generated/data corrected on the fly when a page is
  22 *                written/read.
  23 *   - ondie: The ECC engine is inside the NAND pipeline, on the chip's side.
  24 *            Some NAND chips can correct themselves the data.
  25 *
  26 * Besides the initial setup and final cleanups, the interfaces are rather
  27 * simple:
  28 *   - prepare: Prepare an I/O request. Enable/disable the ECC engine based on
  29 *              the I/O request type. In case of software correction or external
  30 *              engine, this step may involve to derive the ECC bytes and place
  31 *              them in the OOB area before a write.
  32 *   - finish: Finish an I/O request. Correct the data in case of a read
  33 *             request and report the number of corrected bits/uncorrectable
  34 *             errors. Most likely empty for write operations, unless you have
  35 *             hardware specific stuff to do, like shutting down the engine to
  36 *             save power.
  37 *
  38 * The I/O request should be enclosed in a prepare()/finish() pair of calls
  39 * and will behave differently depending on the requested I/O type:
  40 *   - raw: Correction disabled
  41 *   - ecc: Correction enabled
  42 *
  43 * The request direction is impacting the logic as well:
  44 *   - read: Load data from the NAND chip
  45 *   - write: Store data in the NAND chip
  46 *
  47 * Mixing all this combinations together gives the following behavior.
  48 * Those are just examples, drivers are free to add custom steps in their
  49 * prepare/finish hook.
  50 *
  51 * [external ECC engine]
  52 *   - external + prepare + raw + read: do nothing
  53 *   - external + finish  + raw + read: do nothing
  54 *   - external + prepare + raw + write: do nothing
  55 *   - external + finish  + raw + write: do nothing
  56 *   - external + prepare + ecc + read: do nothing
  57 *   - external + finish  + ecc + read: calculate expected ECC bytes, extract
  58 *                                      ECC bytes from OOB buffer, correct
  59 *                                      and report any bitflip/error
  60 *   - external + prepare + ecc + write: calculate ECC bytes and store them at
  61 *                                       the right place in the OOB buffer based
  62 *                                       on the OOB layout
  63 *   - external + finish  + ecc + write: do nothing
  64 *
  65 * [pipelined ECC engine]
  66 *   - pipelined + prepare + raw + read: disable the controller's ECC engine if
  67 *                                       activated
  68 *   - pipelined + finish  + raw + read: do nothing
  69 *   - pipelined + prepare + raw + write: disable the controller's ECC engine if
  70 *                                        activated
  71 *   - pipelined + finish  + raw + write: do nothing
  72 *   - pipelined + prepare + ecc + read: enable the controller's ECC engine if
  73 *                                       deactivated
  74 *   - pipelined + finish  + ecc + read: check the status, report any
  75 *                                       error/bitflip
  76 *   - pipelined + prepare + ecc + write: enable the controller's ECC engine if
  77 *                                        deactivated
  78 *   - pipelined + finish  + ecc + write: do nothing
  79 *
  80 * [ondie ECC engine]
  81 *   - ondie + prepare + raw + read: send commands to disable the on-chip ECC
  82 *                                   engine if activated
  83 *   - ondie + finish  + raw + read: do nothing
  84 *   - ondie + prepare + raw + write: send commands to disable the on-chip ECC
  85 *                                    engine if activated
  86 *   - ondie + finish  + raw + write: do nothing
  87 *   - ondie + prepare + ecc + read: send commands to enable the on-chip ECC
  88 *                                   engine if deactivated
  89 *   - ondie + finish  + ecc + read: send commands to check the status, report
  90 *                                   any error/bitflip
  91 *   - ondie + prepare + ecc + write: send commands to enable the on-chip ECC
  92 *                                    engine if deactivated
  93 *   - ondie + finish  + ecc + write: do nothing
  94 */
  95
  96#include <linux/module.h>
  97#include <linux/mtd/nand.h>
  98#include <linux/slab.h>
  99
 100/**
 101 * nand_ecc_init_ctx - Init the ECC engine context
 102 * @nand: the NAND device
 103 *
 104 * On success, the caller is responsible of calling @nand_ecc_cleanup_ctx().
 105 */
 106int nand_ecc_init_ctx(struct nand_device *nand)
 107{
 108        if (!nand->ecc.engine || !nand->ecc.engine->ops->init_ctx)
 109                return 0;
 110
 111        return nand->ecc.engine->ops->init_ctx(nand);
 112}
 113EXPORT_SYMBOL(nand_ecc_init_ctx);
 114
 115/**
 116 * nand_ecc_cleanup_ctx - Cleanup the ECC engine context
 117 * @nand: the NAND device
 118 */
 119void nand_ecc_cleanup_ctx(struct nand_device *nand)
 120{
 121        if (nand->ecc.engine && nand->ecc.engine->ops->cleanup_ctx)
 122                nand->ecc.engine->ops->cleanup_ctx(nand);
 123}
 124EXPORT_SYMBOL(nand_ecc_cleanup_ctx);
 125
 126/**
 127 * nand_ecc_prepare_io_req - Prepare an I/O request
 128 * @nand: the NAND device
 129 * @req: the I/O request
 130 */
 131int nand_ecc_prepare_io_req(struct nand_device *nand,
 132                            struct nand_page_io_req *req)
 133{
 134        if (!nand->ecc.engine || !nand->ecc.engine->ops->prepare_io_req)
 135                return 0;
 136
 137        return nand->ecc.engine->ops->prepare_io_req(nand, req);
 138}
 139EXPORT_SYMBOL(nand_ecc_prepare_io_req);
 140
 141/**
 142 * nand_ecc_finish_io_req - Finish an I/O request
 143 * @nand: the NAND device
 144 * @req: the I/O request
 145 */
 146int nand_ecc_finish_io_req(struct nand_device *nand,
 147                           struct nand_page_io_req *req)
 148{
 149        if (!nand->ecc.engine || !nand->ecc.engine->ops->finish_io_req)
 150                return 0;
 151
 152        return nand->ecc.engine->ops->finish_io_req(nand, req);
 153}
 154EXPORT_SYMBOL(nand_ecc_finish_io_req);
 155
 156/* Define default OOB placement schemes for large and small page devices */
 157static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
 158                                 struct mtd_oob_region *oobregion)
 159{
 160        struct nand_device *nand = mtd_to_nanddev(mtd);
 161        unsigned int total_ecc_bytes = nand->ecc.ctx.total;
 162
 163        if (section > 1)
 164                return -ERANGE;
 165
 166        if (!section) {
 167                oobregion->offset = 0;
 168                if (mtd->oobsize == 16)
 169                        oobregion->length = 4;
 170                else
 171                        oobregion->length = 3;
 172        } else {
 173                if (mtd->oobsize == 8)
 174                        return -ERANGE;
 175
 176                oobregion->offset = 6;
 177                oobregion->length = total_ecc_bytes - 4;
 178        }
 179
 180        return 0;
 181}
 182
 183static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
 184                                  struct mtd_oob_region *oobregion)
 185{
 186        if (section > 1)
 187                return -ERANGE;
 188
 189        if (mtd->oobsize == 16) {
 190                if (section)
 191                        return -ERANGE;
 192
 193                oobregion->length = 8;
 194                oobregion->offset = 8;
 195        } else {
 196                oobregion->length = 2;
 197                if (!section)
 198                        oobregion->offset = 3;
 199                else
 200                        oobregion->offset = 6;
 201        }
 202
 203        return 0;
 204}
 205
 206static const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
 207        .ecc = nand_ooblayout_ecc_sp,
 208        .free = nand_ooblayout_free_sp,
 209};
 210
 211const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void)
 212{
 213        return &nand_ooblayout_sp_ops;
 214}
 215EXPORT_SYMBOL_GPL(nand_get_small_page_ooblayout);
 216
 217static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
 218                                 struct mtd_oob_region *oobregion)
 219{
 220        struct nand_device *nand = mtd_to_nanddev(mtd);
 221        unsigned int total_ecc_bytes = nand->ecc.ctx.total;
 222
 223        if (section || !total_ecc_bytes)
 224                return -ERANGE;
 225
 226        oobregion->length = total_ecc_bytes;
 227        oobregion->offset = mtd->oobsize - oobregion->length;
 228
 229        return 0;
 230}
 231
 232static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
 233                                  struct mtd_oob_region *oobregion)
 234{
 235        struct nand_device *nand = mtd_to_nanddev(mtd);
 236        unsigned int total_ecc_bytes = nand->ecc.ctx.total;
 237
 238        if (section)
 239                return -ERANGE;
 240
 241        oobregion->length = mtd->oobsize - total_ecc_bytes - 2;
 242        oobregion->offset = 2;
 243
 244        return 0;
 245}
 246
 247static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
 248        .ecc = nand_ooblayout_ecc_lp,
 249        .free = nand_ooblayout_free_lp,
 250};
 251
 252const struct mtd_ooblayout_ops *nand_get_large_page_ooblayout(void)
 253{
 254        return &nand_ooblayout_lp_ops;
 255}
 256EXPORT_SYMBOL_GPL(nand_get_large_page_ooblayout);
 257
 258/*
 259 * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
 260 * are placed at a fixed offset.
 261 */
 262static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
 263                                         struct mtd_oob_region *oobregion)
 264{
 265        struct nand_device *nand = mtd_to_nanddev(mtd);
 266        unsigned int total_ecc_bytes = nand->ecc.ctx.total;
 267
 268        if (section)
 269                return -ERANGE;
 270
 271        switch (mtd->oobsize) {
 272        case 64:
 273                oobregion->offset = 40;
 274                break;
 275        case 128:
 276                oobregion->offset = 80;
 277                break;
 278        default:
 279                return -EINVAL;
 280        }
 281
 282        oobregion->length = total_ecc_bytes;
 283        if (oobregion->offset + oobregion->length > mtd->oobsize)
 284                return -ERANGE;
 285
 286        return 0;
 287}
 288
 289static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
 290                                          struct mtd_oob_region *oobregion)
 291{
 292        struct nand_device *nand = mtd_to_nanddev(mtd);
 293        unsigned int total_ecc_bytes = nand->ecc.ctx.total;
 294        int ecc_offset = 0;
 295
 296        if (section < 0 || section > 1)
 297                return -ERANGE;
 298
 299        switch (mtd->oobsize) {
 300        case 64:
 301                ecc_offset = 40;
 302                break;
 303        case 128:
 304                ecc_offset = 80;
 305                break;
 306        default:
 307                return -EINVAL;
 308        }
 309
 310        if (section == 0) {
 311                oobregion->offset = 2;
 312                oobregion->length = ecc_offset - 2;
 313        } else {
 314                oobregion->offset = ecc_offset + total_ecc_bytes;
 315                oobregion->length = mtd->oobsize - oobregion->offset;
 316        }
 317
 318        return 0;
 319}
 320
 321static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
 322        .ecc = nand_ooblayout_ecc_lp_hamming,
 323        .free = nand_ooblayout_free_lp_hamming,
 324};
 325
 326const struct mtd_ooblayout_ops *nand_get_large_page_hamming_ooblayout(void)
 327{
 328        return &nand_ooblayout_lp_hamming_ops;
 329}
 330EXPORT_SYMBOL_GPL(nand_get_large_page_hamming_ooblayout);
 331
 332static enum nand_ecc_engine_type
 333of_get_nand_ecc_engine_type(struct device_node *np)
 334{
 335        struct device_node *eng_np;
 336
 337        if (of_property_read_bool(np, "nand-no-ecc-engine"))
 338                return NAND_ECC_ENGINE_TYPE_NONE;
 339
 340        if (of_property_read_bool(np, "nand-use-soft-ecc-engine"))
 341                return NAND_ECC_ENGINE_TYPE_SOFT;
 342
 343        eng_np = of_parse_phandle(np, "nand-ecc-engine", 0);
 344        of_node_put(eng_np);
 345
 346        if (eng_np) {
 347                if (eng_np == np)
 348                        return NAND_ECC_ENGINE_TYPE_ON_DIE;
 349                else
 350                        return NAND_ECC_ENGINE_TYPE_ON_HOST;
 351        }
 352
 353        return NAND_ECC_ENGINE_TYPE_INVALID;
 354}
 355
 356static const char * const nand_ecc_placement[] = {
 357        [NAND_ECC_PLACEMENT_OOB] = "oob",
 358        [NAND_ECC_PLACEMENT_INTERLEAVED] = "interleaved",
 359};
 360
 361static enum nand_ecc_placement of_get_nand_ecc_placement(struct device_node *np)
 362{
 363        enum nand_ecc_placement placement;
 364        const char *pm;
 365        int err;
 366
 367        err = of_property_read_string(np, "nand-ecc-placement", &pm);
 368        if (!err) {
 369                for (placement = NAND_ECC_PLACEMENT_OOB;
 370                     placement < ARRAY_SIZE(nand_ecc_placement); placement++) {
 371                        if (!strcasecmp(pm, nand_ecc_placement[placement]))
 372                                return placement;
 373                }
 374        }
 375
 376        return NAND_ECC_PLACEMENT_UNKNOWN;
 377}
 378
 379static const char * const nand_ecc_algos[] = {
 380        [NAND_ECC_ALGO_HAMMING] = "hamming",
 381        [NAND_ECC_ALGO_BCH] = "bch",
 382        [NAND_ECC_ALGO_RS] = "rs",
 383};
 384
 385static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
 386{
 387        enum nand_ecc_algo ecc_algo;
 388        const char *pm;
 389        int err;
 390
 391        err = of_property_read_string(np, "nand-ecc-algo", &pm);
 392        if (!err) {
 393                for (ecc_algo = NAND_ECC_ALGO_HAMMING;
 394                     ecc_algo < ARRAY_SIZE(nand_ecc_algos);
 395                     ecc_algo++) {
 396                        if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
 397                                return ecc_algo;
 398                }
 399        }
 400
 401        return NAND_ECC_ALGO_UNKNOWN;
 402}
 403
 404static int of_get_nand_ecc_step_size(struct device_node *np)
 405{
 406        int ret;
 407        u32 val;
 408
 409        ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
 410        return ret ? ret : val;
 411}
 412
 413static int of_get_nand_ecc_strength(struct device_node *np)
 414{
 415        int ret;
 416        u32 val;
 417
 418        ret = of_property_read_u32(np, "nand-ecc-strength", &val);
 419        return ret ? ret : val;
 420}
 421
 422void of_get_nand_ecc_user_config(struct nand_device *nand)
 423{
 424        struct device_node *dn = nanddev_get_of_node(nand);
 425        int strength, size;
 426
 427        nand->ecc.user_conf.engine_type = of_get_nand_ecc_engine_type(dn);
 428        nand->ecc.user_conf.algo = of_get_nand_ecc_algo(dn);
 429        nand->ecc.user_conf.placement = of_get_nand_ecc_placement(dn);
 430
 431        strength = of_get_nand_ecc_strength(dn);
 432        if (strength >= 0)
 433                nand->ecc.user_conf.strength = strength;
 434
 435        size = of_get_nand_ecc_step_size(dn);
 436        if (size >= 0)
 437                nand->ecc.user_conf.step_size = size;
 438
 439        if (of_property_read_bool(dn, "nand-ecc-maximize"))
 440                nand->ecc.user_conf.flags |= NAND_ECC_MAXIMIZE_STRENGTH;
 441}
 442EXPORT_SYMBOL(of_get_nand_ecc_user_config);
 443
 444/**
 445 * nand_ecc_is_strong_enough - Check if the chip configuration meets the
 446 *                             datasheet requirements.
 447 *
 448 * @nand: Device to check
 449 *
 450 * If our configuration corrects A bits per B bytes and the minimum
 451 * required correction level is X bits per Y bytes, then we must ensure
 452 * both of the following are true:
 453 *
 454 * (1) A / B >= X / Y
 455 * (2) A >= X
 456 *
 457 * Requirement (1) ensures we can correct for the required bitflip density.
 458 * Requirement (2) ensures we can correct even when all bitflips are clumped
 459 * in the same sector.
 460 */
 461bool nand_ecc_is_strong_enough(struct nand_device *nand)
 462{
 463        const struct nand_ecc_props *reqs = nanddev_get_ecc_requirements(nand);
 464        const struct nand_ecc_props *conf = nanddev_get_ecc_conf(nand);
 465        struct mtd_info *mtd = nanddev_to_mtd(nand);
 466        int corr, ds_corr;
 467
 468        if (conf->step_size == 0 || reqs->step_size == 0)
 469                /* Not enough information */
 470                return true;
 471
 472        /*
 473         * We get the number of corrected bits per page to compare
 474         * the correction density.
 475         */
 476        corr = (mtd->writesize * conf->strength) / conf->step_size;
 477        ds_corr = (mtd->writesize * reqs->strength) / reqs->step_size;
 478
 479        return corr >= ds_corr && conf->strength >= reqs->strength;
 480}
 481EXPORT_SYMBOL(nand_ecc_is_strong_enough);
 482
 483/* ECC engine driver internal helpers */
 484int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx,
 485                               struct nand_device *nand)
 486{
 487        unsigned int total_buffer_size;
 488
 489        ctx->nand = nand;
 490
 491        /* Let the user decide the exact length of each buffer */
 492        if (!ctx->page_buffer_size)
 493                ctx->page_buffer_size = nanddev_page_size(nand);
 494        if (!ctx->oob_buffer_size)
 495                ctx->oob_buffer_size = nanddev_per_page_oobsize(nand);
 496
 497        total_buffer_size = ctx->page_buffer_size + ctx->oob_buffer_size;
 498
 499        ctx->spare_databuf = kzalloc(total_buffer_size, GFP_KERNEL);
 500        if (!ctx->spare_databuf)
 501                return -ENOMEM;
 502
 503        ctx->spare_oobbuf = ctx->spare_databuf + ctx->page_buffer_size;
 504
 505        return 0;
 506}
 507EXPORT_SYMBOL_GPL(nand_ecc_init_req_tweaking);
 508
 509void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx)
 510{
 511        kfree(ctx->spare_databuf);
 512}
 513EXPORT_SYMBOL_GPL(nand_ecc_cleanup_req_tweaking);
 514
 515/*
 516 * Ensure data and OOB area is fully read/written otherwise the correction might
 517 * not work as expected.
 518 */
 519void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx,
 520                        struct nand_page_io_req *req)
 521{
 522        struct nand_device *nand = ctx->nand;
 523        struct nand_page_io_req *orig, *tweak;
 524
 525        /* Save the original request */
 526        ctx->orig_req = *req;
 527        ctx->bounce_data = false;
 528        ctx->bounce_oob = false;
 529        orig = &ctx->orig_req;
 530        tweak = req;
 531
 532        /* Ensure the request covers the entire page */
 533        if (orig->datalen < nanddev_page_size(nand)) {
 534                ctx->bounce_data = true;
 535                tweak->dataoffs = 0;
 536                tweak->datalen = nanddev_page_size(nand);
 537                tweak->databuf.in = ctx->spare_databuf;
 538                memset(tweak->databuf.in, 0xFF, ctx->page_buffer_size);
 539        }
 540
 541        if (orig->ooblen < nanddev_per_page_oobsize(nand)) {
 542                ctx->bounce_oob = true;
 543                tweak->ooboffs = 0;
 544                tweak->ooblen = nanddev_per_page_oobsize(nand);
 545                tweak->oobbuf.in = ctx->spare_oobbuf;
 546                memset(tweak->oobbuf.in, 0xFF, ctx->oob_buffer_size);
 547        }
 548
 549        /* Copy the data that must be writen in the bounce buffers, if needed */
 550        if (orig->type == NAND_PAGE_WRITE) {
 551                if (ctx->bounce_data)
 552                        memcpy((void *)tweak->databuf.out + orig->dataoffs,
 553                               orig->databuf.out, orig->datalen);
 554
 555                if (ctx->bounce_oob)
 556                        memcpy((void *)tweak->oobbuf.out + orig->ooboffs,
 557                               orig->oobbuf.out, orig->ooblen);
 558        }
 559}
 560EXPORT_SYMBOL_GPL(nand_ecc_tweak_req);
 561
 562void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx,
 563                          struct nand_page_io_req *req)
 564{
 565        struct nand_page_io_req *orig, *tweak;
 566
 567        orig = &ctx->orig_req;
 568        tweak = req;
 569
 570        /* Restore the data read from the bounce buffers, if needed */
 571        if (orig->type == NAND_PAGE_READ) {
 572                if (ctx->bounce_data)
 573                        memcpy(orig->databuf.in,
 574                               tweak->databuf.in + orig->dataoffs,
 575                               orig->datalen);
 576
 577                if (ctx->bounce_oob)
 578                        memcpy(orig->oobbuf.in,
 579                               tweak->oobbuf.in + orig->ooboffs,
 580                               orig->ooblen);
 581        }
 582
 583        /* Ensure the original request is restored */
 584        *req = *orig;
 585}
 586EXPORT_SYMBOL_GPL(nand_ecc_restore_req);
 587
 588struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand)
 589{
 590        unsigned int algo = nand->ecc.user_conf.algo;
 591
 592        if (algo == NAND_ECC_ALGO_UNKNOWN)
 593                algo = nand->ecc.defaults.algo;
 594
 595        switch (algo) {
 596        case NAND_ECC_ALGO_HAMMING:
 597                return nand_ecc_sw_hamming_get_engine();
 598        case NAND_ECC_ALGO_BCH:
 599                return nand_ecc_sw_bch_get_engine();
 600        default:
 601                break;
 602        }
 603
 604        return NULL;
 605}
 606EXPORT_SYMBOL(nand_ecc_get_sw_engine);
 607
 608struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand)
 609{
 610        return nand->ecc.ondie_engine;
 611}
 612EXPORT_SYMBOL(nand_ecc_get_on_die_hw_engine);
 613
 614MODULE_LICENSE("GPL");
 615MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
 616MODULE_DESCRIPTION("Generic ECC engine");
 617