linux/drivers/crypto/caam/ctrl.c
<<
>>
Prefs
   1/* * CAAM control-plane driver backend
   2 * Controller-level driver, kernel property detection, initialization
   3 *
   4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
   5 */
   6
   7#include <linux/device.h>
   8#include <linux/of_address.h>
   9#include <linux/of_irq.h>
  10
  11#include "compat.h"
  12#include "regs.h"
  13#include "intern.h"
  14#include "jr.h"
  15#include "desc_constr.h"
  16#include "error.h"
  17
  18/*
  19 * i.MX targets tend to have clock control subsystems that can
  20 * enable/disable clocking to our device.
  21 */
  22#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
  23static inline struct clk *caam_drv_identify_clk(struct device *dev,
  24                                                char *clk_name)
  25{
  26        return devm_clk_get(dev, clk_name);
  27}
  28#else
  29static inline struct clk *caam_drv_identify_clk(struct device *dev,
  30                                                char *clk_name)
  31{
  32        return NULL;
  33}
  34#endif
  35
  36/*
  37 * Descriptor to instantiate RNG State Handle 0 in normal mode and
  38 * load the JDKEK, TDKEK and TDSK registers
  39 */
  40static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
  41{
  42        u32 *jump_cmd, op_flags;
  43
  44        init_job_desc(desc, 0);
  45
  46        op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  47                        (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
  48
  49        /* INIT RNG in non-test mode */
  50        append_operation(desc, op_flags);
  51
  52        if (!handle && do_sk) {
  53                /*
  54                 * For SH0, Secure Keys must be generated as well
  55                 */
  56
  57                /* wait for done */
  58                jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
  59                set_jump_tgt_here(desc, jump_cmd);
  60
  61                /*
  62                 * load 1 to clear written reg:
  63                 * resets the done interrrupt and returns the RNG to idle.
  64                 */
  65                append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
  66
  67                /* Initialize State Handle  */
  68                append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  69                                 OP_ALG_AAI_RNG4_SK);
  70        }
  71
  72        append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  73}
  74
  75/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
  76static void build_deinstantiation_desc(u32 *desc, int handle)
  77{
  78        init_job_desc(desc, 0);
  79
  80        /* Uninstantiate State Handle 0 */
  81        append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
  82                         (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
  83
  84        append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
  85}
  86
  87/*
  88 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
  89 *                        the software (no JR/QI used).
  90 * @ctrldev - pointer to device
  91 * @status - descriptor status, after being run
  92 *
  93 * Return: - 0 if no error occurred
  94 *         - -ENODEV if the DECO couldn't be acquired
  95 *         - -EAGAIN if an error occurred while executing the descriptor
  96 */
  97static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
  98                                        u32 *status)
  99{
 100        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 101        struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
 102        struct caam_deco __iomem *deco = ctrlpriv->deco;
 103        unsigned int timeout = 100000;
 104        u32 deco_dbg_reg, flags;
 105        int i;
 106
 107
 108        if (ctrlpriv->virt_en == 1) {
 109                setbits32(&ctrl->deco_rsr, DECORSR_JR0);
 110
 111                while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
 112                       --timeout)
 113                        cpu_relax();
 114
 115                timeout = 100000;
 116        }
 117
 118        setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
 119
 120        while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
 121                                                                 --timeout)
 122                cpu_relax();
 123
 124        if (!timeout) {
 125                dev_err(ctrldev, "failed to acquire DECO 0\n");
 126                clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
 127                return -ENODEV;
 128        }
 129
 130        for (i = 0; i < desc_len(desc); i++)
 131                wr_reg32(&deco->descbuf[i], *(desc + i));
 132
 133        flags = DECO_JQCR_WHL;
 134        /*
 135         * If the descriptor length is longer than 4 words, then the
 136         * FOUR bit in JRCTRL register must be set.
 137         */
 138        if (desc_len(desc) >= 4)
 139                flags |= DECO_JQCR_FOUR;
 140
 141        /* Instruct the DECO to execute it */
 142        setbits32(&deco->jr_ctl_hi, flags);
 143
 144        timeout = 10000000;
 145        do {
 146                deco_dbg_reg = rd_reg32(&deco->desc_dbg);
 147                /*
 148                 * If an error occured in the descriptor, then
 149                 * the DECO status field will be set to 0x0D
 150                 */
 151                if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
 152                    DESC_DBG_DECO_STAT_HOST_ERR)
 153                        break;
 154                cpu_relax();
 155        } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
 156
 157        *status = rd_reg32(&deco->op_status_hi) &
 158                  DECO_OP_STATUS_HI_ERR_MASK;
 159
 160        if (ctrlpriv->virt_en == 1)
 161                clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
 162
 163        /* Mark the DECO as free */
 164        clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
 165
 166        if (!timeout)
 167                return -EAGAIN;
 168
 169        return 0;
 170}
 171
 172/*
 173 * instantiate_rng - builds and executes a descriptor on DECO0,
 174 *                   which initializes the RNG block.
 175 * @ctrldev - pointer to device
 176 * @state_handle_mask - bitmask containing the instantiation status
 177 *                      for the RNG4 state handles which exist in
 178 *                      the RNG4 block: 1 if it's been instantiated
 179 *                      by an external entry, 0 otherwise.
 180 * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
 181 *            Caution: this can be done only once; if the keys need to be
 182 *            regenerated, a POR is required
 183 *
 184 * Return: - 0 if no error occurred
 185 *         - -ENOMEM if there isn't enough memory to allocate the descriptor
 186 *         - -ENODEV if DECO0 couldn't be acquired
 187 *         - -EAGAIN if an error occurred when executing the descriptor
 188 *            f.i. there was a RNG hardware error due to not "good enough"
 189 *            entropy being aquired.
 190 */
 191static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
 192                           int gen_sk)
 193{
 194        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 195        struct caam_ctrl __iomem *ctrl;
 196        u32 *desc, status = 0, rdsta_val;
 197        int ret = 0, sh_idx;
 198
 199        ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
 200        desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
 201        if (!desc)
 202                return -ENOMEM;
 203
 204        for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
 205                /*
 206                 * If the corresponding bit is set, this state handle
 207                 * was initialized by somebody else, so it's left alone.
 208                 */
 209                if ((1 << sh_idx) & state_handle_mask)
 210                        continue;
 211
 212                /* Create the descriptor for instantiating RNG State Handle */
 213                build_instantiation_desc(desc, sh_idx, gen_sk);
 214
 215                /* Try to run it through DECO0 */
 216                ret = run_descriptor_deco0(ctrldev, desc, &status);
 217
 218                /*
 219                 * If ret is not 0, or descriptor status is not 0, then
 220                 * something went wrong. No need to try the next state
 221                 * handle (if available), bail out here.
 222                 * Also, if for some reason, the State Handle didn't get
 223                 * instantiated although the descriptor has finished
 224                 * without any error (HW optimizations for later
 225                 * CAAM eras), then try again.
 226                 */
 227                rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
 228                if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
 229                    !(rdsta_val & (1 << sh_idx)))
 230                        ret = -EAGAIN;
 231                if (ret)
 232                        break;
 233                dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
 234                /* Clear the contents before recreating the descriptor */
 235                memset(desc, 0x00, CAAM_CMD_SZ * 7);
 236        }
 237
 238        kfree(desc);
 239
 240        return ret;
 241}
 242
 243/*
 244 * deinstantiate_rng - builds and executes a descriptor on DECO0,
 245 *                     which deinitializes the RNG block.
 246 * @ctrldev - pointer to device
 247 * @state_handle_mask - bitmask containing the instantiation status
 248 *                      for the RNG4 state handles which exist in
 249 *                      the RNG4 block: 1 if it's been instantiated
 250 *
 251 * Return: - 0 if no error occurred
 252 *         - -ENOMEM if there isn't enough memory to allocate the descriptor
 253 *         - -ENODEV if DECO0 couldn't be acquired
 254 *         - -EAGAIN if an error occurred when executing the descriptor
 255 */
 256static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
 257{
 258        u32 *desc, status;
 259        int sh_idx, ret = 0;
 260
 261        desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
 262        if (!desc)
 263                return -ENOMEM;
 264
 265        for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
 266                /*
 267                 * If the corresponding bit is set, then it means the state
 268                 * handle was initialized by us, and thus it needs to be
 269                 * deintialized as well
 270                 */
 271                if ((1 << sh_idx) & state_handle_mask) {
 272                        /*
 273                         * Create the descriptor for deinstantating this state
 274                         * handle
 275                         */
 276                        build_deinstantiation_desc(desc, sh_idx);
 277
 278                        /* Try to run it through DECO0 */
 279                        ret = run_descriptor_deco0(ctrldev, desc, &status);
 280
 281                        if (ret || status) {
 282                                dev_err(ctrldev,
 283                                        "Failed to deinstantiate RNG4 SH%d\n",
 284                                        sh_idx);
 285                                break;
 286                        }
 287                        dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
 288                }
 289        }
 290
 291        kfree(desc);
 292
 293        return ret;
 294}
 295
 296static int caam_remove(struct platform_device *pdev)
 297{
 298        struct device *ctrldev;
 299        struct caam_drv_private *ctrlpriv;
 300        struct caam_ctrl __iomem *ctrl;
 301        int ring;
 302
 303        ctrldev = &pdev->dev;
 304        ctrlpriv = dev_get_drvdata(ctrldev);
 305        ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
 306
 307        /* Remove platform devices for JobRs */
 308        for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
 309                if (ctrlpriv->jrpdev[ring])
 310                        of_device_unregister(ctrlpriv->jrpdev[ring]);
 311        }
 312
 313        /* De-initialize RNG state handles initialized by this driver. */
 314        if (ctrlpriv->rng4_sh_init)
 315                deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
 316
 317        /* Shut down debug views */
 318#ifdef CONFIG_DEBUG_FS
 319        debugfs_remove_recursive(ctrlpriv->dfs_root);
 320#endif
 321
 322        /* Unmap controller region */
 323        iounmap(ctrl);
 324
 325        /* shut clocks off before finalizing shutdown */
 326        clk_disable_unprepare(ctrlpriv->caam_ipg);
 327        clk_disable_unprepare(ctrlpriv->caam_mem);
 328        clk_disable_unprepare(ctrlpriv->caam_aclk);
 329        clk_disable_unprepare(ctrlpriv->caam_emi_slow);
 330
 331        return 0;
 332}
 333
 334/*
 335 * kick_trng - sets the various parameters for enabling the initialization
 336 *             of the RNG4 block in CAAM
 337 * @pdev - pointer to the platform device
 338 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
 339 */
 340static void kick_trng(struct platform_device *pdev, int ent_delay)
 341{
 342        struct device *ctrldev = &pdev->dev;
 343        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
 344        struct caam_ctrl __iomem *ctrl;
 345        struct rng4tst __iomem *r4tst;
 346        u32 val;
 347
 348        ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
 349        r4tst = &ctrl->r4tst[0];
 350
 351        /* put RNG4 into program mode */
 352        setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
 353
 354        /*
 355         * Performance-wise, it does not make sense to
 356         * set the delay to a value that is lower
 357         * than the last one that worked (i.e. the state handles
 358         * were instantiated properly. Thus, instead of wasting
 359         * time trying to set the values controlling the sample
 360         * frequency, the function simply returns.
 361         */
 362        val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
 363              >> RTSDCTL_ENT_DLY_SHIFT;
 364        if (ent_delay <= val) {
 365                /* put RNG4 into run mode */
 366                clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
 367                return;
 368        }
 369
 370        val = rd_reg32(&r4tst->rtsdctl);
 371        val = (val & ~RTSDCTL_ENT_DLY_MASK) |
 372              (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
 373        wr_reg32(&r4tst->rtsdctl, val);
 374        /* min. freq. count, equal to 1/4 of the entropy sample length */
 375        wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
 376        /* disable maximum frequency count */
 377        wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
 378        /* read the control register */
 379        val = rd_reg32(&r4tst->rtmctl);
 380        /*
 381         * select raw sampling in both entropy shifter
 382         * and statistical checker
 383         */
 384        setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
 385        /* put RNG4 into run mode */
 386        clrbits32(&val, RTMCTL_PRGM);
 387        /* write back the control register */
 388        wr_reg32(&r4tst->rtmctl, val);
 389}
 390
 391/**
 392 * caam_get_era() - Return the ERA of the SEC on SoC, based
 393 * on "sec-era" propery in the DTS. This property is updated by u-boot.
 394 **/
 395int caam_get_era(void)
 396{
 397        struct device_node *caam_node;
 398        int ret;
 399        u32 prop;
 400
 401        caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 402        ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
 403        of_node_put(caam_node);
 404
 405        return ret ? -ENOTSUPP : prop;
 406}
 407EXPORT_SYMBOL(caam_get_era);
 408
 409/* Probe routine for CAAM top (controller) level */
 410static int caam_probe(struct platform_device *pdev)
 411{
 412        int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
 413        u64 caam_id;
 414        struct device *dev;
 415        struct device_node *nprop, *np;
 416        struct caam_ctrl __iomem *ctrl;
 417        struct caam_drv_private *ctrlpriv;
 418        struct clk *clk;
 419#ifdef CONFIG_DEBUG_FS
 420        struct caam_perfmon *perfmon;
 421#endif
 422        u32 scfgr, comp_params;
 423        u32 cha_vid_ls;
 424        int pg_size;
 425        int BLOCK_OFFSET = 0;
 426
 427        ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
 428        if (!ctrlpriv)
 429                return -ENOMEM;
 430
 431        dev = &pdev->dev;
 432        dev_set_drvdata(dev, ctrlpriv);
 433        ctrlpriv->pdev = pdev;
 434        nprop = pdev->dev.of_node;
 435
 436        /* Enable clocking */
 437        clk = caam_drv_identify_clk(&pdev->dev, "ipg");
 438        if (IS_ERR(clk)) {
 439                ret = PTR_ERR(clk);
 440                dev_err(&pdev->dev,
 441                        "can't identify CAAM ipg clk: %d\n", ret);
 442                return ret;
 443        }
 444        ctrlpriv->caam_ipg = clk;
 445
 446        clk = caam_drv_identify_clk(&pdev->dev, "mem");
 447        if (IS_ERR(clk)) {
 448                ret = PTR_ERR(clk);
 449                dev_err(&pdev->dev,
 450                        "can't identify CAAM mem clk: %d\n", ret);
 451                return ret;
 452        }
 453        ctrlpriv->caam_mem = clk;
 454
 455        clk = caam_drv_identify_clk(&pdev->dev, "aclk");
 456        if (IS_ERR(clk)) {
 457                ret = PTR_ERR(clk);
 458                dev_err(&pdev->dev,
 459                        "can't identify CAAM aclk clk: %d\n", ret);
 460                return ret;
 461        }
 462        ctrlpriv->caam_aclk = clk;
 463
 464        clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
 465        if (IS_ERR(clk)) {
 466                ret = PTR_ERR(clk);
 467                dev_err(&pdev->dev,
 468                        "can't identify CAAM emi_slow clk: %d\n", ret);
 469                return ret;
 470        }
 471        ctrlpriv->caam_emi_slow = clk;
 472
 473        ret = clk_prepare_enable(ctrlpriv->caam_ipg);
 474        if (ret < 0) {
 475                dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
 476                return ret;
 477        }
 478
 479        ret = clk_prepare_enable(ctrlpriv->caam_mem);
 480        if (ret < 0) {
 481                dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
 482                        ret);
 483                goto disable_caam_ipg;
 484        }
 485
 486        ret = clk_prepare_enable(ctrlpriv->caam_aclk);
 487        if (ret < 0) {
 488                dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
 489                goto disable_caam_mem;
 490        }
 491
 492        ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
 493        if (ret < 0) {
 494                dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
 495                        ret);
 496                goto disable_caam_aclk;
 497        }
 498
 499        /* Get configuration properties from device tree */
 500        /* First, get register page */
 501        ctrl = of_iomap(nprop, 0);
 502        if (ctrl == NULL) {
 503                dev_err(dev, "caam: of_iomap() failed\n");
 504                ret = -ENOMEM;
 505                goto disable_caam_emi_slow;
 506        }
 507        /* Finding the page size for using the CTPR_MS register */
 508        comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
 509        pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
 510
 511        /* Allocating the BLOCK_OFFSET based on the supported page size on
 512         * the platform
 513         */
 514        if (pg_size == 0)
 515                BLOCK_OFFSET = PG_SIZE_4K;
 516        else
 517                BLOCK_OFFSET = PG_SIZE_64K;
 518
 519        ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
 520        ctrlpriv->assure = (struct caam_assurance __force *)
 521                           ((uint8_t *)ctrl +
 522                            BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
 523                           );
 524        ctrlpriv->deco = (struct caam_deco __force *)
 525                         ((uint8_t *)ctrl +
 526                         BLOCK_OFFSET * DECO_BLOCK_NUMBER
 527                         );
 528
 529        /* Get the IRQ of the controller (for security violations only) */
 530        ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
 531
 532        /*
 533         * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
 534         * long pointers in master configuration register
 535         */
 536        clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
 537                      MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
 538                      (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 539
 540        /*
 541         *  Read the Compile Time paramters and SCFGR to determine
 542         * if Virtualization is enabled for this platform
 543         */
 544        scfgr = rd_reg32(&ctrl->scfgr);
 545
 546        ctrlpriv->virt_en = 0;
 547        if (comp_params & CTPR_MS_VIRT_EN_INCL) {
 548                /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
 549                 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
 550                 */
 551                if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
 552                    (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
 553                       (scfgr & SCFGR_VIRT_EN)))
 554                                ctrlpriv->virt_en = 1;
 555        } else {
 556                /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
 557                if (comp_params & CTPR_MS_VIRT_EN_POR)
 558                                ctrlpriv->virt_en = 1;
 559        }
 560
 561        if (ctrlpriv->virt_en == 1)
 562                setbits32(&ctrl->jrstart, JRSTART_JR0_START |
 563                          JRSTART_JR1_START | JRSTART_JR2_START |
 564                          JRSTART_JR3_START);
 565
 566        if (sizeof(dma_addr_t) == sizeof(u64))
 567                if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
 568                        dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
 569                else
 570                        dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
 571        else
 572                dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 573
 574        /*
 575         * Detect and enable JobRs
 576         * First, find out how many ring spec'ed, allocate references
 577         * for all, then go probe each one.
 578         */
 579        rspec = 0;
 580        for_each_available_child_of_node(nprop, np)
 581                if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
 582                    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
 583                        rspec++;
 584
 585        ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
 586                                        sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
 587        if (ctrlpriv->jrpdev == NULL) {
 588                ret = -ENOMEM;
 589                goto iounmap_ctrl;
 590        }
 591
 592        ring = 0;
 593        ctrlpriv->total_jobrs = 0;
 594        for_each_available_child_of_node(nprop, np)
 595                if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
 596                    of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
 597                        ctrlpriv->jrpdev[ring] =
 598                                of_platform_device_create(np, NULL, dev);
 599                        if (!ctrlpriv->jrpdev[ring]) {
 600                                pr_warn("JR%d Platform device creation error\n",
 601                                        ring);
 602                                continue;
 603                        }
 604                        ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
 605                                             ((uint8_t *)ctrl +
 606                                             (ring + JR_BLOCK_NUMBER) *
 607                                              BLOCK_OFFSET
 608                                             );
 609                        ctrlpriv->total_jobrs++;
 610                        ring++;
 611        }
 612
 613        /* Check to see if QI present. If so, enable */
 614        ctrlpriv->qi_present =
 615                        !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
 616                           CTPR_MS_QI_MASK);
 617        if (ctrlpriv->qi_present) {
 618                ctrlpriv->qi = (struct caam_queue_if __force *)
 619                               ((uint8_t *)ctrl +
 620                                 BLOCK_OFFSET * QI_BLOCK_NUMBER
 621                               );
 622                /* This is all that's required to physically enable QI */
 623                wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
 624        }
 625
 626        /* If no QI and no rings specified, quit and go home */
 627        if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
 628                dev_err(dev, "no queues configured, terminating\n");
 629                ret = -ENOMEM;
 630                goto caam_remove;
 631        }
 632
 633        cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
 634
 635        /*
 636         * If SEC has RNG version >= 4 and RNG state handle has not been
 637         * already instantiated, do RNG instantiation
 638         */
 639        if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
 640                ctrlpriv->rng4_sh_init =
 641                        rd_reg32(&ctrl->r4tst[0].rdsta);
 642                /*
 643                 * If the secure keys (TDKEK, JDKEK, TDSK), were already
 644                 * generated, signal this to the function that is instantiating
 645                 * the state handles. An error would occur if RNG4 attempts
 646                 * to regenerate these keys before the next POR.
 647                 */
 648                gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
 649                ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
 650                do {
 651                        int inst_handles =
 652                                rd_reg32(&ctrl->r4tst[0].rdsta) &
 653                                                                RDSTA_IFMASK;
 654                        /*
 655                         * If either SH were instantiated by somebody else
 656                         * (e.g. u-boot) then it is assumed that the entropy
 657                         * parameters are properly set and thus the function
 658                         * setting these (kick_trng(...)) is skipped.
 659                         * Also, if a handle was instantiated, do not change
 660                         * the TRNG parameters.
 661                         */
 662                        if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
 663                                dev_info(dev,
 664                                         "Entropy delay = %u\n",
 665                                         ent_delay);
 666                                kick_trng(pdev, ent_delay);
 667                                ent_delay += 400;
 668                        }
 669                        /*
 670                         * if instantiate_rng(...) fails, the loop will rerun
 671                         * and the kick_trng(...) function will modfiy the
 672                         * upper and lower limits of the entropy sampling
 673                         * interval, leading to a sucessful initialization of
 674                         * the RNG.
 675                         */
 676                        ret = instantiate_rng(dev, inst_handles,
 677                                              gen_sk);
 678                        if (ret == -EAGAIN)
 679                                /*
 680                                 * if here, the loop will rerun,
 681                                 * so don't hog the CPU
 682                                 */
 683                                cpu_relax();
 684                } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
 685                if (ret) {
 686                        dev_err(dev, "failed to instantiate RNG");
 687                        goto caam_remove;
 688                }
 689                /*
 690                 * Set handles init'ed by this module as the complement of the
 691                 * already initialized ones
 692                 */
 693                ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
 694
 695                /* Enable RDB bit so that RNG works faster */
 696                setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
 697        }
 698
 699        /* NOTE: RTIC detection ought to go here, around Si time */
 700
 701        caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
 702                  (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
 703
 704        /* Report "alive" for developer to see */
 705        dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
 706                 caam_get_era());
 707        dev_info(dev, "job rings = %d, qi = %d\n",
 708                 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
 709
 710#ifdef CONFIG_DEBUG_FS
 711        /*
 712         * FIXME: needs better naming distinction, as some amalgamation of
 713         * "caam" and nprop->full_name. The OF name isn't distinctive,
 714         * but does separate instances
 715         */
 716        perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
 717
 718        ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
 719        ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
 720
 721        /* Controller-level - performance monitor counters */
 722        ctrlpriv->ctl_rq_dequeued =
 723                debugfs_create_u64("rq_dequeued",
 724                                   S_IRUSR | S_IRGRP | S_IROTH,
 725                                   ctrlpriv->ctl, &perfmon->req_dequeued);
 726        ctrlpriv->ctl_ob_enc_req =
 727                debugfs_create_u64("ob_rq_encrypted",
 728                                   S_IRUSR | S_IRGRP | S_IROTH,
 729                                   ctrlpriv->ctl, &perfmon->ob_enc_req);
 730        ctrlpriv->ctl_ib_dec_req =
 731                debugfs_create_u64("ib_rq_decrypted",
 732                                   S_IRUSR | S_IRGRP | S_IROTH,
 733                                   ctrlpriv->ctl, &perfmon->ib_dec_req);
 734        ctrlpriv->ctl_ob_enc_bytes =
 735                debugfs_create_u64("ob_bytes_encrypted",
 736                                   S_IRUSR | S_IRGRP | S_IROTH,
 737                                   ctrlpriv->ctl, &perfmon->ob_enc_bytes);
 738        ctrlpriv->ctl_ob_prot_bytes =
 739                debugfs_create_u64("ob_bytes_protected",
 740                                   S_IRUSR | S_IRGRP | S_IROTH,
 741                                   ctrlpriv->ctl, &perfmon->ob_prot_bytes);
 742        ctrlpriv->ctl_ib_dec_bytes =
 743                debugfs_create_u64("ib_bytes_decrypted",
 744                                   S_IRUSR | S_IRGRP | S_IROTH,
 745                                   ctrlpriv->ctl, &perfmon->ib_dec_bytes);
 746        ctrlpriv->ctl_ib_valid_bytes =
 747                debugfs_create_u64("ib_bytes_validated",
 748                                   S_IRUSR | S_IRGRP | S_IROTH,
 749                                   ctrlpriv->ctl, &perfmon->ib_valid_bytes);
 750
 751        /* Controller level - global status values */
 752        ctrlpriv->ctl_faultaddr =
 753                debugfs_create_u64("fault_addr",
 754                                   S_IRUSR | S_IRGRP | S_IROTH,
 755                                   ctrlpriv->ctl, &perfmon->faultaddr);
 756        ctrlpriv->ctl_faultdetail =
 757                debugfs_create_u32("fault_detail",
 758                                   S_IRUSR | S_IRGRP | S_IROTH,
 759                                   ctrlpriv->ctl, &perfmon->faultdetail);
 760        ctrlpriv->ctl_faultstatus =
 761                debugfs_create_u32("fault_status",
 762                                   S_IRUSR | S_IRGRP | S_IROTH,
 763                                   ctrlpriv->ctl, &perfmon->status);
 764
 765        /* Internal covering keys (useful in non-secure mode only) */
 766        ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
 767        ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 768        ctrlpriv->ctl_kek = debugfs_create_blob("kek",
 769                                                S_IRUSR |
 770                                                S_IRGRP | S_IROTH,
 771                                                ctrlpriv->ctl,
 772                                                &ctrlpriv->ctl_kek_wrap);
 773
 774        ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
 775        ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 776        ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
 777                                                 S_IRUSR |
 778                                                 S_IRGRP | S_IROTH,
 779                                                 ctrlpriv->ctl,
 780                                                 &ctrlpriv->ctl_tkek_wrap);
 781
 782        ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
 783        ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
 784        ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
 785                                                 S_IRUSR |
 786                                                 S_IRGRP | S_IROTH,
 787                                                 ctrlpriv->ctl,
 788                                                 &ctrlpriv->ctl_tdsk_wrap);
 789#endif
 790        return 0;
 791
 792caam_remove:
 793        caam_remove(pdev);
 794iounmap_ctrl:
 795        iounmap(ctrl);
 796disable_caam_emi_slow:
 797        clk_disable_unprepare(ctrlpriv->caam_emi_slow);
 798disable_caam_aclk:
 799        clk_disable_unprepare(ctrlpriv->caam_aclk);
 800disable_caam_mem:
 801        clk_disable_unprepare(ctrlpriv->caam_mem);
 802disable_caam_ipg:
 803        clk_disable_unprepare(ctrlpriv->caam_ipg);
 804        return ret;
 805}
 806
 807static struct of_device_id caam_match[] = {
 808        {
 809                .compatible = "fsl,sec-v4.0",
 810        },
 811        {
 812                .compatible = "fsl,sec4.0",
 813        },
 814        {},
 815};
 816MODULE_DEVICE_TABLE(of, caam_match);
 817
 818static struct platform_driver caam_driver = {
 819        .driver = {
 820                .name = "caam",
 821                .of_match_table = caam_match,
 822        },
 823        .probe       = caam_probe,
 824        .remove      = caam_remove,
 825};
 826
 827module_platform_driver(caam_driver);
 828
 829MODULE_LICENSE("GPL");
 830MODULE_DESCRIPTION("FSL CAAM request backend");
 831MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
 832
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.