linux/drivers/crypto/omap-sham.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for OMAP SHA1/MD5 HW acceleration.
   5 *
   6 * Copyright (c) 2010 Nokia Corporation
   7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   8 * Copyright (c) 2011 Texas Instruments Incorporated
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Some ideas are from old omap-sha1-md5.c driver.
  15 */
  16
  17#define pr_fmt(fmt) "%s: " fmt, __func__
  18
  19#include <linux/err.h>
  20#include <linux/device.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/errno.h>
  24#include <linux/interrupt.h>
  25#include <linux/kernel.h>
  26#include <linux/irq.h>
  27#include <linux/io.h>
  28#include <linux/platform_device.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/dmaengine.h>
  32#include <linux/pm_runtime.h>
  33#include <linux/of.h>
  34#include <linux/of_device.h>
  35#include <linux/of_address.h>
  36#include <linux/of_irq.h>
  37#include <linux/delay.h>
  38#include <linux/crypto.h>
  39#include <linux/cryptohash.h>
  40#include <crypto/scatterwalk.h>
  41#include <crypto/algapi.h>
  42#include <crypto/sha.h>
  43#include <crypto/hash.h>
  44#include <crypto/internal/hash.h>
  45
  46#define MD5_DIGEST_SIZE                 16
  47
  48#define SHA_REG_IDIGEST(dd, x)          ((dd)->pdata->idigest_ofs + ((x)*0x04))
  49#define SHA_REG_DIN(dd, x)              ((dd)->pdata->din_ofs + ((x) * 0x04))
  50#define SHA_REG_DIGCNT(dd)              ((dd)->pdata->digcnt_ofs)
  51
  52#define SHA_REG_ODIGEST(dd, x)          ((dd)->pdata->odigest_ofs + (x * 0x04))
  53
  54#define SHA_REG_CTRL                    0x18
  55#define SHA_REG_CTRL_LENGTH             (0xFFFFFFFF << 5)
  56#define SHA_REG_CTRL_CLOSE_HASH         (1 << 4)
  57#define SHA_REG_CTRL_ALGO_CONST         (1 << 3)
  58#define SHA_REG_CTRL_ALGO               (1 << 2)
  59#define SHA_REG_CTRL_INPUT_READY        (1 << 1)
  60#define SHA_REG_CTRL_OUTPUT_READY       (1 << 0)
  61
  62#define SHA_REG_REV(dd)                 ((dd)->pdata->rev_ofs)
  63
  64#define SHA_REG_MASK(dd)                ((dd)->pdata->mask_ofs)
  65#define SHA_REG_MASK_DMA_EN             (1 << 3)
  66#define SHA_REG_MASK_IT_EN              (1 << 2)
  67#define SHA_REG_MASK_SOFTRESET          (1 << 1)
  68#define SHA_REG_AUTOIDLE                (1 << 0)
  69
  70#define SHA_REG_SYSSTATUS(dd)           ((dd)->pdata->sysstatus_ofs)
  71#define SHA_REG_SYSSTATUS_RESETDONE     (1 << 0)
  72
  73#define SHA_REG_MODE(dd)                ((dd)->pdata->mode_ofs)
  74#define SHA_REG_MODE_HMAC_OUTER_HASH    (1 << 7)
  75#define SHA_REG_MODE_HMAC_KEY_PROC      (1 << 5)
  76#define SHA_REG_MODE_CLOSE_HASH         (1 << 4)
  77#define SHA_REG_MODE_ALGO_CONSTANT      (1 << 3)
  78
  79#define SHA_REG_MODE_ALGO_MASK          (7 << 0)
  80#define SHA_REG_MODE_ALGO_MD5_128       (0 << 1)
  81#define SHA_REG_MODE_ALGO_SHA1_160      (1 << 1)
  82#define SHA_REG_MODE_ALGO_SHA2_224      (2 << 1)
  83#define SHA_REG_MODE_ALGO_SHA2_256      (3 << 1)
  84#define SHA_REG_MODE_ALGO_SHA2_384      (1 << 0)
  85#define SHA_REG_MODE_ALGO_SHA2_512      (3 << 0)
  86
  87#define SHA_REG_LENGTH(dd)              ((dd)->pdata->length_ofs)
  88
  89#define SHA_REG_IRQSTATUS               0x118
  90#define SHA_REG_IRQSTATUS_CTX_RDY       (1 << 3)
  91#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
  92#define SHA_REG_IRQSTATUS_INPUT_RDY     (1 << 1)
  93#define SHA_REG_IRQSTATUS_OUTPUT_RDY    (1 << 0)
  94
  95#define SHA_REG_IRQENA                  0x11C
  96#define SHA_REG_IRQENA_CTX_RDY          (1 << 3)
  97#define SHA_REG_IRQENA_PARTHASH_RDY     (1 << 2)
  98#define SHA_REG_IRQENA_INPUT_RDY        (1 << 1)
  99#define SHA_REG_IRQENA_OUTPUT_RDY       (1 << 0)
 100
 101#define DEFAULT_TIMEOUT_INTERVAL        HZ
 102
 103/* mostly device flags */
 104#define FLAGS_BUSY              0
 105#define FLAGS_FINAL             1
 106#define FLAGS_DMA_ACTIVE        2
 107#define FLAGS_OUTPUT_READY      3
 108#define FLAGS_INIT              4
 109#define FLAGS_CPU               5
 110#define FLAGS_DMA_READY         6
 111#define FLAGS_AUTO_XOR          7
 112#define FLAGS_BE32_SHA1         8
 113/* context flags */
 114#define FLAGS_FINUP             16
 115#define FLAGS_SG                17
 116
 117#define FLAGS_MODE_SHIFT        18
 118#define FLAGS_MODE_MASK         (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
 119#define FLAGS_MODE_MD5          (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
 120#define FLAGS_MODE_SHA1         (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
 121#define FLAGS_MODE_SHA224       (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
 122#define FLAGS_MODE_SHA256       (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
 123#define FLAGS_MODE_SHA384       (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
 124#define FLAGS_MODE_SHA512       (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
 125
 126#define FLAGS_HMAC              21
 127#define FLAGS_ERROR             22
 128
 129#define OP_UPDATE               1
 130#define OP_FINAL                2
 131
 132#define OMAP_ALIGN_MASK         (sizeof(u32)-1)
 133#define OMAP_ALIGNED            __attribute__((aligned(sizeof(u32))))
 134
 135#define BUFLEN                  PAGE_SIZE
 136
 137struct omap_sham_dev;
 138
 139struct omap_sham_reqctx {
 140        struct omap_sham_dev    *dd;
 141        unsigned long           flags;
 142        unsigned long           op;
 143
 144        u8                      digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
 145        size_t                  digcnt;
 146        size_t                  bufcnt;
 147        size_t                  buflen;
 148        dma_addr_t              dma_addr;
 149
 150        /* walk state */
 151        struct scatterlist      *sg;
 152        struct scatterlist      sgl;
 153        unsigned int            offset; /* offset in current sg */
 154        unsigned int            total;  /* total request */
 155
 156        u8                      buffer[0] OMAP_ALIGNED;
 157};
 158
 159struct omap_sham_hmac_ctx {
 160        struct crypto_shash     *shash;
 161        u8                      ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 162        u8                      opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 163};
 164
 165struct omap_sham_ctx {
 166        struct omap_sham_dev    *dd;
 167
 168        unsigned long           flags;
 169
 170        /* fallback stuff */
 171        struct crypto_shash     *fallback;
 172
 173        struct omap_sham_hmac_ctx base[0];
 174};
 175
 176#define OMAP_SHAM_QUEUE_LENGTH  1
 177
 178struct omap_sham_algs_info {
 179        struct ahash_alg        *algs_list;
 180        unsigned int            size;
 181        unsigned int            registered;
 182};
 183
 184struct omap_sham_pdata {
 185        struct omap_sham_algs_info      *algs_info;
 186        unsigned int    algs_info_size;
 187        unsigned long   flags;
 188        int             digest_size;
 189
 190        void            (*copy_hash)(struct ahash_request *req, int out);
 191        void            (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
 192                                      int final, int dma);
 193        void            (*trigger)(struct omap_sham_dev *dd, size_t length);
 194        int             (*poll_irq)(struct omap_sham_dev *dd);
 195        irqreturn_t     (*intr_hdlr)(int irq, void *dev_id);
 196
 197        u32             odigest_ofs;
 198        u32             idigest_ofs;
 199        u32             din_ofs;
 200        u32             digcnt_ofs;
 201        u32             rev_ofs;
 202        u32             mask_ofs;
 203        u32             sysstatus_ofs;
 204        u32             mode_ofs;
 205        u32             length_ofs;
 206
 207        u32             major_mask;
 208        u32             major_shift;
 209        u32             minor_mask;
 210        u32             minor_shift;
 211};
 212
 213struct omap_sham_dev {
 214        struct list_head        list;
 215        unsigned long           phys_base;
 216        struct device           *dev;
 217        void __iomem            *io_base;
 218        int                     irq;
 219        spinlock_t              lock;
 220        int                     err;
 221        struct dma_chan         *dma_lch;
 222        struct tasklet_struct   done_task;
 223        u8                      polling_mode;
 224
 225        unsigned long           flags;
 226        struct crypto_queue     queue;
 227        struct ahash_request    *req;
 228
 229        const struct omap_sham_pdata    *pdata;
 230};
 231
 232struct omap_sham_drv {
 233        struct list_head        dev_list;
 234        spinlock_t              lock;
 235        unsigned long           flags;
 236};
 237
 238static struct omap_sham_drv sham = {
 239        .dev_list = LIST_HEAD_INIT(sham.dev_list),
 240        .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 241};
 242
 243static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 244{
 245        return __raw_readl(dd->io_base + offset);
 246}
 247
 248static inline void omap_sham_write(struct omap_sham_dev *dd,
 249                                        u32 offset, u32 value)
 250{
 251        __raw_writel(value, dd->io_base + offset);
 252}
 253
 254static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 255                                        u32 value, u32 mask)
 256{
 257        u32 val;
 258
 259        val = omap_sham_read(dd, address);
 260        val &= ~mask;
 261        val |= value;
 262        omap_sham_write(dd, address, val);
 263}
 264
 265static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 266{
 267        unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 268
 269        while (!(omap_sham_read(dd, offset) & bit)) {
 270                if (time_is_before_jiffies(timeout))
 271                        return -ETIMEDOUT;
 272        }
 273
 274        return 0;
 275}
 276
 277static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
 278{
 279        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 280        struct omap_sham_dev *dd = ctx->dd;
 281        u32 *hash = (u32 *)ctx->digest;
 282        int i;
 283
 284        for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 285                if (out)
 286                        hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
 287                else
 288                        omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
 289        }
 290}
 291
 292static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
 293{
 294        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 295        struct omap_sham_dev *dd = ctx->dd;
 296        int i;
 297
 298        if (ctx->flags & BIT(FLAGS_HMAC)) {
 299                struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 300                struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 301                struct omap_sham_hmac_ctx *bctx = tctx->base;
 302                u32 *opad = (u32 *)bctx->opad;
 303
 304                for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 305                        if (out)
 306                                opad[i] = omap_sham_read(dd,
 307                                                SHA_REG_ODIGEST(dd, i));
 308                        else
 309                                omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
 310                                                opad[i]);
 311                }
 312        }
 313
 314        omap_sham_copy_hash_omap2(req, out);
 315}
 316
 317static void omap_sham_copy_ready_hash(struct ahash_request *req)
 318{
 319        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 320        u32 *in = (u32 *)ctx->digest;
 321        u32 *hash = (u32 *)req->result;
 322        int i, d, big_endian = 0;
 323
 324        if (!hash)
 325                return;
 326
 327        switch (ctx->flags & FLAGS_MODE_MASK) {
 328        case FLAGS_MODE_MD5:
 329                d = MD5_DIGEST_SIZE / sizeof(u32);
 330                break;
 331        case FLAGS_MODE_SHA1:
 332                /* OMAP2 SHA1 is big endian */
 333                if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
 334                        big_endian = 1;
 335                d = SHA1_DIGEST_SIZE / sizeof(u32);
 336                break;
 337        case FLAGS_MODE_SHA224:
 338                d = SHA224_DIGEST_SIZE / sizeof(u32);
 339                break;
 340        case FLAGS_MODE_SHA256:
 341                d = SHA256_DIGEST_SIZE / sizeof(u32);
 342                break;
 343        case FLAGS_MODE_SHA384:
 344                d = SHA384_DIGEST_SIZE / sizeof(u32);
 345                break;
 346        case FLAGS_MODE_SHA512:
 347                d = SHA512_DIGEST_SIZE / sizeof(u32);
 348                break;
 349        default:
 350                d = 0;
 351        }
 352
 353        if (big_endian)
 354                for (i = 0; i < d; i++)
 355                        hash[i] = be32_to_cpu(in[i]);
 356        else
 357                for (i = 0; i < d; i++)
 358                        hash[i] = le32_to_cpu(in[i]);
 359}
 360
 361static int omap_sham_hw_init(struct omap_sham_dev *dd)
 362{
 363        int err;
 364
 365        err = pm_runtime_get_sync(dd->dev);
 366        if (err < 0) {
 367                dev_err(dd->dev, "failed to get sync: %d\n", err);
 368                return err;
 369        }
 370
 371        if (!test_bit(FLAGS_INIT, &dd->flags)) {
 372                set_bit(FLAGS_INIT, &dd->flags);
 373                dd->err = 0;
 374        }
 375
 376        return 0;
 377}
 378
 379static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
 380                                 int final, int dma)
 381{
 382        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 383        u32 val = length << 5, mask;
 384
 385        if (likely(ctx->digcnt))
 386                omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 387
 388        omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 389                SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 390                SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 391        /*
 392         * Setting ALGO_CONST only for the first iteration
 393         * and CLOSE_HASH only for the last one.
 394         */
 395        if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
 396                val |= SHA_REG_CTRL_ALGO;
 397        if (!ctx->digcnt)
 398                val |= SHA_REG_CTRL_ALGO_CONST;
 399        if (final)
 400                val |= SHA_REG_CTRL_CLOSE_HASH;
 401
 402        mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 403                        SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 404
 405        omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 406}
 407
 408static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
 409{
 410}
 411
 412static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
 413{
 414        return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 415}
 416
 417static int get_block_size(struct omap_sham_reqctx *ctx)
 418{
 419        int d;
 420
 421        switch (ctx->flags & FLAGS_MODE_MASK) {
 422        case FLAGS_MODE_MD5:
 423        case FLAGS_MODE_SHA1:
 424                d = SHA1_BLOCK_SIZE;
 425                break;
 426        case FLAGS_MODE_SHA224:
 427        case FLAGS_MODE_SHA256:
 428                d = SHA256_BLOCK_SIZE;
 429                break;
 430        case FLAGS_MODE_SHA384:
 431        case FLAGS_MODE_SHA512:
 432                d = SHA512_BLOCK_SIZE;
 433                break;
 434        default:
 435                d = 0;
 436        }
 437
 438        return d;
 439}
 440
 441static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
 442                                    u32 *value, int count)
 443{
 444        for (; count--; value++, offset += 4)
 445                omap_sham_write(dd, offset, *value);
 446}
 447
 448static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 449                                 int final, int dma)
 450{
 451        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 452        u32 val, mask;
 453
 454        /*
 455         * Setting ALGO_CONST only for the first iteration and
 456         * CLOSE_HASH only for the last one. Note that flags mode bits
 457         * correspond to algorithm encoding in mode register.
 458         */
 459        val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
 460        if (!ctx->digcnt) {
 461                struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 462                struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 463                struct omap_sham_hmac_ctx *bctx = tctx->base;
 464                int bs, nr_dr;
 465
 466                val |= SHA_REG_MODE_ALGO_CONSTANT;
 467
 468                if (ctx->flags & BIT(FLAGS_HMAC)) {
 469                        bs = get_block_size(ctx);
 470                        nr_dr = bs / (2 * sizeof(u32));
 471                        val |= SHA_REG_MODE_HMAC_KEY_PROC;
 472                        omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
 473                                          (u32 *)bctx->ipad, nr_dr);
 474                        omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
 475                                          (u32 *)bctx->ipad + nr_dr, nr_dr);
 476                        ctx->digcnt += bs;
 477                }
 478        }
 479
 480        if (final) {
 481                val |= SHA_REG_MODE_CLOSE_HASH;
 482
 483                if (ctx->flags & BIT(FLAGS_HMAC))
 484                        val |= SHA_REG_MODE_HMAC_OUTER_HASH;
 485        }
 486
 487        mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
 488               SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
 489               SHA_REG_MODE_HMAC_KEY_PROC;
 490
 491        dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
 492        omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
 493        omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
 494        omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 495                             SHA_REG_MASK_IT_EN |
 496                                     (dma ? SHA_REG_MASK_DMA_EN : 0),
 497                             SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 498}
 499
 500static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 501{
 502        omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
 503}
 504
 505static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
 506{
 507        return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
 508                              SHA_REG_IRQSTATUS_INPUT_RDY);
 509}
 510
 511static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 512                              size_t length, int final)
 513{
 514        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 515        int count, len32, bs32, offset = 0;
 516        const u32 *buffer = (const u32 *)buf;
 517
 518        dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 519                                                ctx->digcnt, length, final);
 520
 521        dd->pdata->write_ctrl(dd, length, final, 0);
 522        dd->pdata->trigger(dd, length);
 523
 524        /* should be non-zero before next lines to disable clocks later */
 525        ctx->digcnt += length;
 526
 527        if (final)
 528                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 529
 530        set_bit(FLAGS_CPU, &dd->flags);
 531
 532        len32 = DIV_ROUND_UP(length, sizeof(u32));
 533        bs32 = get_block_size(ctx) / sizeof(u32);
 534
 535        while (len32) {
 536                if (dd->pdata->poll_irq(dd))
 537                        return -ETIMEDOUT;
 538
 539                for (count = 0; count < min(len32, bs32); count++, offset++)
 540                        omap_sham_write(dd, SHA_REG_DIN(dd, count),
 541                                        buffer[offset]);
 542                len32 -= min(len32, bs32);
 543        }
 544
 545        return -EINPROGRESS;
 546}
 547
 548static void omap_sham_dma_callback(void *param)
 549{
 550        struct omap_sham_dev *dd = param;
 551
 552        set_bit(FLAGS_DMA_READY, &dd->flags);
 553        tasklet_schedule(&dd->done_task);
 554}
 555
 556static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
 557                              size_t length, int final, int is_sg)
 558{
 559        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 560        struct dma_async_tx_descriptor *tx;
 561        struct dma_slave_config cfg;
 562        int len32, ret, dma_min = get_block_size(ctx);
 563
 564        dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 565                                                ctx->digcnt, length, final);
 566
 567        memset(&cfg, 0, sizeof(cfg));
 568
 569        cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 570        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 571        cfg.dst_maxburst = dma_min / DMA_SLAVE_BUSWIDTH_4_BYTES;
 572
 573        ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 574        if (ret) {
 575                pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
 576                return ret;
 577        }
 578
 579        len32 = DIV_ROUND_UP(length, dma_min) * dma_min;
 580
 581        if (is_sg) {
 582                /*
 583                 * The SG entry passed in may not have the 'length' member
 584                 * set correctly so use a local SG entry (sgl) with the
 585                 * proper value for 'length' instead.  If this is not done,
 586                 * the dmaengine may try to DMA the incorrect amount of data.
 587                 */
 588                sg_init_table(&ctx->sgl, 1);
 589                sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
 590                ctx->sgl.offset = ctx->sg->offset;
 591                sg_dma_len(&ctx->sgl) = len32;
 592                sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
 593
 594                tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
 595                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 596        } else {
 597                tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
 598                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 599        }
 600
 601        if (!tx) {
 602                dev_err(dd->dev, "prep_slave_sg/single() failed\n");
 603                return -EINVAL;
 604        }
 605
 606        tx->callback = omap_sham_dma_callback;
 607        tx->callback_param = dd;
 608
 609        dd->pdata->write_ctrl(dd, length, final, 1);
 610
 611        ctx->digcnt += length;
 612
 613        if (final)
 614                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 615
 616        set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 617
 618        dmaengine_submit(tx);
 619        dma_async_issue_pending(dd->dma_lch);
 620
 621        dd->pdata->trigger(dd, length);
 622
 623        return -EINPROGRESS;
 624}
 625
 626static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
 627                                const u8 *data, size_t length)
 628{
 629        size_t count = min(length, ctx->buflen - ctx->bufcnt);
 630
 631        count = min(count, ctx->total);
 632        if (count <= 0)
 633                return 0;
 634        memcpy(ctx->buffer + ctx->bufcnt, data, count);
 635        ctx->bufcnt += count;
 636
 637        return count;
 638}
 639
 640static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
 641{
 642        size_t count;
 643        const u8 *vaddr;
 644
 645        while (ctx->sg) {
 646                vaddr = kmap_atomic(sg_page(ctx->sg));
 647                vaddr += ctx->sg->offset;
 648
 649                count = omap_sham_append_buffer(ctx,
 650                                vaddr + ctx->offset,
 651                                ctx->sg->length - ctx->offset);
 652
 653                kunmap_atomic((void *)vaddr);
 654
 655                if (!count)
 656                        break;
 657                ctx->offset += count;
 658                ctx->total -= count;
 659                if (ctx->offset == ctx->sg->length) {
 660                        ctx->sg = sg_next(ctx->sg);
 661                        if (ctx->sg)
 662                                ctx->offset = 0;
 663                        else
 664                                ctx->total = 0;
 665                }
 666        }
 667
 668        return 0;
 669}
 670
 671static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
 672                                        struct omap_sham_reqctx *ctx,
 673                                        size_t length, int final)
 674{
 675        int ret;
 676
 677        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
 678                                       DMA_TO_DEVICE);
 679        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 680                dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
 681                return -EINVAL;
 682        }
 683
 684        ctx->flags &= ~BIT(FLAGS_SG);
 685
 686        ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
 687        if (ret != -EINPROGRESS)
 688                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 689                                 DMA_TO_DEVICE);
 690
 691        return ret;
 692}
 693
 694static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 695{
 696        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 697        unsigned int final;
 698        size_t count;
 699
 700        omap_sham_append_sg(ctx);
 701
 702        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 703
 704        dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
 705                                         ctx->bufcnt, ctx->digcnt, final);
 706
 707        if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 708                count = ctx->bufcnt;
 709                ctx->bufcnt = 0;
 710                return omap_sham_xmit_dma_map(dd, ctx, count, final);
 711        }
 712
 713        return 0;
 714}
 715
 716/* Start address alignment */
 717#define SG_AA(sg)       (IS_ALIGNED(sg->offset, sizeof(u32)))
 718/* SHA1 block size alignment */
 719#define SG_SA(sg, bs)   (IS_ALIGNED(sg->length, bs))
 720
 721static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 722{
 723        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 724        unsigned int length, final, tail;
 725        struct scatterlist *sg;
 726        int ret, bs;
 727
 728        if (!ctx->total)
 729                return 0;
 730
 731        if (ctx->bufcnt || ctx->offset)
 732                return omap_sham_update_dma_slow(dd);
 733
 734        /*
 735         * Don't use the sg interface when the transfer size is less
 736         * than the number of elements in a DMA frame.  Otherwise,
 737         * the dmaengine infrastructure will calculate that it needs
 738         * to transfer 0 frames which ultimately fails.
 739         */
 740        if (ctx->total < get_block_size(ctx))
 741                return omap_sham_update_dma_slow(dd);
 742
 743        dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
 744                        ctx->digcnt, ctx->bufcnt, ctx->total);
 745
 746        sg = ctx->sg;
 747        bs = get_block_size(ctx);
 748
 749        if (!SG_AA(sg))
 750                return omap_sham_update_dma_slow(dd);
 751
 752        if (!sg_is_last(sg) && !SG_SA(sg, bs))
 753                /* size is not BLOCK_SIZE aligned */
 754                return omap_sham_update_dma_slow(dd);
 755
 756        length = min(ctx->total, sg->length);
 757
 758        if (sg_is_last(sg)) {
 759                if (!(ctx->flags & BIT(FLAGS_FINUP))) {
 760                        /* not last sg must be BLOCK_SIZE aligned */
 761                        tail = length & (bs - 1);
 762                        /* without finup() we need one block to close hash */
 763                        if (!tail)
 764                                tail = bs;
 765                        length -= tail;
 766                }
 767        }
 768
 769        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 770                dev_err(dd->dev, "dma_map_sg  error\n");
 771                return -EINVAL;
 772        }
 773
 774        ctx->flags |= BIT(FLAGS_SG);
 775
 776        ctx->total -= length;
 777        ctx->offset = length; /* offset where to start slow */
 778
 779        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 780
 781        ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
 782        if (ret != -EINPROGRESS)
 783                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 784
 785        return ret;
 786}
 787
 788static int omap_sham_update_cpu(struct omap_sham_dev *dd)
 789{
 790        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 791        int bufcnt, final;
 792
 793        if (!ctx->total)
 794                return 0;
 795
 796        omap_sham_append_sg(ctx);
 797
 798        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 799
 800        dev_dbg(dd->dev, "cpu: bufcnt: %u, digcnt: %d, final: %d\n",
 801                ctx->bufcnt, ctx->digcnt, final);
 802
 803        if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 804                bufcnt = ctx->bufcnt;
 805                ctx->bufcnt = 0;
 806                return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final);
 807        }
 808
 809        return 0;
 810}
 811
 812static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 813{
 814        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 815
 816        dmaengine_terminate_all(dd->dma_lch);
 817
 818        if (ctx->flags & BIT(FLAGS_SG)) {
 819                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 820                if (ctx->sg->length == ctx->offset) {
 821                        ctx->sg = sg_next(ctx->sg);
 822                        if (ctx->sg)
 823                                ctx->offset = 0;
 824                }
 825        } else {
 826                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 827                                 DMA_TO_DEVICE);
 828        }
 829
 830        return 0;
 831}
 832
 833static int omap_sham_init(struct ahash_request *req)
 834{
 835        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 836        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 837        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 838        struct omap_sham_dev *dd = NULL, *tmp;
 839        int bs = 0;
 840
 841        spin_lock_bh(&sham.lock);
 842        if (!tctx->dd) {
 843                list_for_each_entry(tmp, &sham.dev_list, list) {
 844                        dd = tmp;
 845                        break;
 846                }
 847                tctx->dd = dd;
 848        } else {
 849                dd = tctx->dd;
 850        }
 851        spin_unlock_bh(&sham.lock);
 852
 853        ctx->dd = dd;
 854
 855        ctx->flags = 0;
 856
 857        dev_dbg(dd->dev, "init: digest size: %d\n",
 858                crypto_ahash_digestsize(tfm));
 859
 860        switch (crypto_ahash_digestsize(tfm)) {
 861        case MD5_DIGEST_SIZE:
 862                ctx->flags |= FLAGS_MODE_MD5;
 863                bs = SHA1_BLOCK_SIZE;
 864                break;
 865        case SHA1_DIGEST_SIZE:
 866                ctx->flags |= FLAGS_MODE_SHA1;
 867                bs = SHA1_BLOCK_SIZE;
 868                break;
 869        case SHA224_DIGEST_SIZE:
 870                ctx->flags |= FLAGS_MODE_SHA224;
 871                bs = SHA224_BLOCK_SIZE;
 872                break;
 873        case SHA256_DIGEST_SIZE:
 874                ctx->flags |= FLAGS_MODE_SHA256;
 875                bs = SHA256_BLOCK_SIZE;
 876                break;
 877        case SHA384_DIGEST_SIZE:
 878                ctx->flags |= FLAGS_MODE_SHA384;
 879                bs = SHA384_BLOCK_SIZE;
 880                break;
 881        case SHA512_DIGEST_SIZE:
 882                ctx->flags |= FLAGS_MODE_SHA512;
 883                bs = SHA512_BLOCK_SIZE;
 884                break;
 885        }
 886
 887        ctx->bufcnt = 0;
 888        ctx->digcnt = 0;
 889        ctx->buflen = BUFLEN;
 890
 891        if (tctx->flags & BIT(FLAGS_HMAC)) {
 892                if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
 893                        struct omap_sham_hmac_ctx *bctx = tctx->base;
 894
 895                        memcpy(ctx->buffer, bctx->ipad, bs);
 896                        ctx->bufcnt = bs;
 897                }
 898
 899                ctx->flags |= BIT(FLAGS_HMAC);
 900        }
 901
 902        return 0;
 903
 904}
 905
 906static int omap_sham_update_req(struct omap_sham_dev *dd)
 907{
 908        struct ahash_request *req = dd->req;
 909        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 910        int err;
 911
 912        dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 913                 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
 914
 915        if (ctx->flags & BIT(FLAGS_CPU))
 916                err = omap_sham_update_cpu(dd);
 917        else
 918                err = omap_sham_update_dma_start(dd);
 919
 920        /* wait for dma completion before can take more data */
 921        dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
 922
 923        return err;
 924}
 925
 926static int omap_sham_final_req(struct omap_sham_dev *dd)
 927{
 928        struct ahash_request *req = dd->req;
 929        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 930        int err = 0, use_dma = 1;
 931
 932        if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode)
 933                /*
 934                 * faster to handle last block with cpu or
 935                 * use cpu when dma is not present.
 936                 */
 937                use_dma = 0;
 938
 939        if (use_dma)
 940                err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 941        else
 942                err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 943
 944        ctx->bufcnt = 0;
 945
 946        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 947
 948        return err;
 949}
 950
 951static int omap_sham_finish_hmac(struct ahash_request *req)
 952{
 953        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 954        struct omap_sham_hmac_ctx *bctx = tctx->base;
 955        int bs = crypto_shash_blocksize(bctx->shash);
 956        int ds = crypto_shash_digestsize(bctx->shash);
 957        SHASH_DESC_ON_STACK(shash, bctx->shash);
 958
 959        shash->tfm = bctx->shash;
 960        shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 961
 962        return crypto_shash_init(shash) ?:
 963               crypto_shash_update(shash, bctx->opad, bs) ?:
 964               crypto_shash_finup(shash, req->result, ds, req->result);
 965}
 966
 967static int omap_sham_finish(struct ahash_request *req)
 968{
 969        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 970        struct omap_sham_dev *dd = ctx->dd;
 971        int err = 0;
 972
 973        if (ctx->digcnt) {
 974                omap_sham_copy_ready_hash(req);
 975                if ((ctx->flags & BIT(FLAGS_HMAC)) &&
 976                                !test_bit(FLAGS_AUTO_XOR, &dd->flags))
 977                        err = omap_sham_finish_hmac(req);
 978        }
 979
 980        dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 981
 982        return err;
 983}
 984
 985static void omap_sham_finish_req(struct ahash_request *req, int err)
 986{
 987        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 988        struct omap_sham_dev *dd = ctx->dd;
 989
 990        if (!err) {
 991                dd->pdata->copy_hash(req, 1);
 992                if (test_bit(FLAGS_FINAL, &dd->flags))
 993                        err = omap_sham_finish(req);
 994        } else {
 995                ctx->flags |= BIT(FLAGS_ERROR);
 996        }
 997
 998        /* atomic operation is not needed here */
 999        dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1000                        BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1001
1002        pm_runtime_put(dd->dev);
1003
1004        if (req->base.complete)
1005                req->base.complete(&req->base, err);
1006
1007        /* handle new request */
1008        tasklet_schedule(&dd->done_task);
1009}
1010
1011static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1012                                  struct ahash_request *req)
1013{
1014        struct crypto_async_request *async_req, *backlog;
1015        struct omap_sham_reqctx *ctx;
1016        unsigned long flags;
1017        int err = 0, ret = 0;
1018
1019        spin_lock_irqsave(&dd->lock, flags);
1020        if (req)
1021                ret = ahash_enqueue_request(&dd->queue, req);
1022        if (test_bit(FLAGS_BUSY, &dd->flags)) {
1023                spin_unlock_irqrestore(&dd->lock, flags);
1024                return ret;
1025        }
1026        backlog = crypto_get_backlog(&dd->queue);
1027        async_req = crypto_dequeue_request(&dd->queue);
1028        if (async_req)
1029                set_bit(FLAGS_BUSY, &dd->flags);
1030        spin_unlock_irqrestore(&dd->lock, flags);
1031
1032        if (!async_req)
1033                return ret;
1034
1035        if (backlog)
1036                backlog->complete(backlog, -EINPROGRESS);
1037
1038        req = ahash_request_cast(async_req);
1039        dd->req = req;
1040        ctx = ahash_request_ctx(req);
1041
1042        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1043                                                ctx->op, req->nbytes);
1044
1045        err = omap_sham_hw_init(dd);
1046        if (err)
1047                goto err1;
1048
1049        if (ctx->digcnt)
1050                /* request has changed - restore hash */
1051                dd->pdata->copy_hash(req, 0);
1052
1053        if (ctx->op == OP_UPDATE) {
1054                err = omap_sham_update_req(dd);
1055                if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1056                        /* no final() after finup() */
1057                        err = omap_sham_final_req(dd);
1058        } else if (ctx->op == OP_FINAL) {
1059                err = omap_sham_final_req(dd);
1060        }
1061err1:
1062        if (err != -EINPROGRESS)
1063                /* done_task will not finish it, so do it here */
1064                omap_sham_finish_req(req, err);
1065
1066        dev_dbg(dd->dev, "exit, err: %d\n", err);
1067
1068        return ret;
1069}
1070
1071static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1072{
1073        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1074        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1075        struct omap_sham_dev *dd = tctx->dd;
1076
1077        ctx->op = op;
1078
1079        return omap_sham_handle_queue(dd, req);
1080}
1081
1082static int omap_sham_update(struct ahash_request *req)
1083{
1084        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1085        struct omap_sham_dev *dd = ctx->dd;
1086        int bs = get_block_size(ctx);
1087
1088        if (!req->nbytes)
1089                return 0;
1090
1091        ctx->total = req->nbytes;
1092        ctx->sg = req->src;
1093        ctx->offset = 0;
1094
1095        if (ctx->flags & BIT(FLAGS_FINUP)) {
1096                if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
1097                        /*
1098                        * OMAP HW accel works only with buffers >= 9
1099                        * will switch to bypass in final()
1100                        * final has the same request and data
1101                        */
1102                        omap_sham_append_sg(ctx);
1103                        return 0;
1104                } else if ((ctx->bufcnt + ctx->total <= bs) ||
1105                           dd->polling_mode) {
1106                        /*
1107                         * faster to use CPU for short transfers or
1108                         * use cpu when dma is not present.
1109                         */
1110                        ctx->flags |= BIT(FLAGS_CPU);
1111                }
1112        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1113                omap_sham_append_sg(ctx);
1114                return 0;
1115        }
1116
1117        if (dd->polling_mode)
1118                ctx->flags |= BIT(FLAGS_CPU);
1119
1120        return omap_sham_enqueue(req, OP_UPDATE);
1121}
1122
1123static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
1124                                  const u8 *data, unsigned int len, u8 *out)
1125{
1126        SHASH_DESC_ON_STACK(shash, tfm);
1127
1128        shash->tfm = tfm;
1129        shash->flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1130
1131        return crypto_shash_digest(shash, data, len, out);
1132}
1133
1134static int omap_sham_final_shash(struct ahash_request *req)
1135{
1136        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1137        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1138
1139        return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1140                                      ctx->buffer, ctx->bufcnt, req->result);
1141}
1142
1143static int omap_sham_final(struct ahash_request *req)
1144{
1145        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1146
1147        ctx->flags |= BIT(FLAGS_FINUP);
1148
1149        if (ctx->flags & BIT(FLAGS_ERROR))
1150                return 0; /* uncompleted hash is not needed */
1151
1152        /* OMAP HW accel works only with buffers >= 9 */
1153        /* HMAC is always >= 9 because ipad == block size */
1154        if ((ctx->digcnt + ctx->bufcnt) < 9)
1155                return omap_sham_final_shash(req);
1156        else if (ctx->bufcnt)
1157                return omap_sham_enqueue(req, OP_FINAL);
1158
1159        /* copy ready hash (+ finalize hmac) */
1160        return omap_sham_finish(req);
1161}
1162
1163static int omap_sham_finup(struct ahash_request *req)
1164{
1165        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1166        int err1, err2;
1167
1168        ctx->flags |= BIT(FLAGS_FINUP);
1169
1170        err1 = omap_sham_update(req);
1171        if (err1 == -EINPROGRESS || err1 == -EBUSY)
1172                return err1;
1173        /*
1174         * final() has to be always called to cleanup resources
1175         * even if udpate() failed, except EINPROGRESS
1176         */
1177        err2 = omap_sham_final(req);
1178
1179        return err1 ?: err2;
1180}
1181
1182static int omap_sham_digest(struct ahash_request *req)
1183{
1184        return omap_sham_init(req) ?: omap_sham_finup(req);
1185}
1186
1187static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1188                      unsigned int keylen)
1189{
1190        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1191        struct omap_sham_hmac_ctx *bctx = tctx->base;
1192        int bs = crypto_shash_blocksize(bctx->shash);
1193        int ds = crypto_shash_digestsize(bctx->shash);
1194        struct omap_sham_dev *dd = NULL, *tmp;
1195        int err, i;
1196
1197        spin_lock_bh(&sham.lock);
1198        if (!tctx->dd) {
1199                list_for_each_entry(tmp, &sham.dev_list, list) {
1200                        dd = tmp;
1201                        break;
1202                }
1203                tctx->dd = dd;
1204        } else {
1205                dd = tctx->dd;
1206        }
1207        spin_unlock_bh(&sham.lock);
1208
1209        err = crypto_shash_setkey(tctx->fallback, key, keylen);
1210        if (err)
1211                return err;
1212
1213        if (keylen > bs) {
1214                err = omap_sham_shash_digest(bctx->shash,
1215                                crypto_shash_get_flags(bctx->shash),
1216                                key, keylen, bctx->ipad);
1217                if (err)
1218                        return err;
1219                keylen = ds;
1220        } else {
1221                memcpy(bctx->ipad, key, keylen);
1222        }
1223
1224        memset(bctx->ipad + keylen, 0, bs - keylen);
1225
1226        if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1227                memcpy(bctx->opad, bctx->ipad, bs);
1228
1229                for (i = 0; i < bs; i++) {
1230                        bctx->ipad[i] ^= 0x36;
1231                        bctx->opad[i] ^= 0x5c;
1232                }
1233        }
1234
1235        return err;
1236}
1237
1238static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1239{
1240        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1241        const char *alg_name = crypto_tfm_alg_name(tfm);
1242
1243        /* Allocate a fallback and abort if it failed. */
1244        tctx->fallback = crypto_alloc_shash(alg_name, 0,
1245                                            CRYPTO_ALG_NEED_FALLBACK);
1246        if (IS_ERR(tctx->fallback)) {
1247                pr_err("omap-sham: fallback driver '%s' "
1248                                "could not be loaded.\n", alg_name);
1249                return PTR_ERR(tctx->fallback);
1250        }
1251
1252        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1253                                 sizeof(struct omap_sham_reqctx) + BUFLEN);
1254
1255        if (alg_base) {
1256                struct omap_sham_hmac_ctx *bctx = tctx->base;
1257                tctx->flags |= BIT(FLAGS_HMAC);
1258                bctx->shash = crypto_alloc_shash(alg_base, 0,
1259                                                CRYPTO_ALG_NEED_FALLBACK);
1260                if (IS_ERR(bctx->shash)) {
1261                        pr_err("omap-sham: base driver '%s' "
1262                                        "could not be loaded.\n", alg_base);
1263                        crypto_free_shash(tctx->fallback);
1264                        return PTR_ERR(bctx->shash);
1265                }
1266
1267        }
1268
1269        return 0;
1270}
1271
1272static int omap_sham_cra_init(struct crypto_tfm *tfm)
1273{
1274        return omap_sham_cra_init_alg(tfm, NULL);
1275}
1276
1277static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1278{
1279        return omap_sham_cra_init_alg(tfm, "sha1");
1280}
1281
1282static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1283{
1284        return omap_sham_cra_init_alg(tfm, "sha224");
1285}
1286
1287static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1288{
1289        return omap_sham_cra_init_alg(tfm, "sha256");
1290}
1291
1292static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1293{
1294        return omap_sham_cra_init_alg(tfm, "md5");
1295}
1296
1297static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1298{
1299        return omap_sham_cra_init_alg(tfm, "sha384");
1300}
1301
1302static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1303{
1304        return omap_sham_cra_init_alg(tfm, "sha512");
1305}
1306
1307static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1308{
1309        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1310
1311        crypto_free_shash(tctx->fallback);
1312        tctx->fallback = NULL;
1313
1314        if (tctx->flags & BIT(FLAGS_HMAC)) {
1315                struct omap_sham_hmac_ctx *bctx = tctx->base;
1316                crypto_free_shash(bctx->shash);
1317        }
1318}
1319
1320static struct ahash_alg algs_sha1_md5[] = {
1321{
1322        .init           = omap_sham_init,
1323        .update         = omap_sham_update,
1324        .final          = omap_sham_final,
1325        .finup          = omap_sham_finup,
1326        .digest         = omap_sham_digest,
1327        .halg.digestsize        = SHA1_DIGEST_SIZE,
1328        .halg.base      = {
1329                .cra_name               = "sha1",
1330                .cra_driver_name        = "omap-sha1",
1331                .cra_priority           = 100,
1332                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1333                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1334                                                CRYPTO_ALG_ASYNC |
1335                                                CRYPTO_ALG_NEED_FALLBACK,
1336                .cra_blocksize          = SHA1_BLOCK_SIZE,
1337                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1338                .cra_alignmask          = 0,
1339                .cra_module             = THIS_MODULE,
1340                .cra_init               = omap_sham_cra_init,
1341                .cra_exit               = omap_sham_cra_exit,
1342        }
1343},
1344{
1345        .init           = omap_sham_init,
1346        .update         = omap_sham_update,
1347        .final          = omap_sham_final,
1348        .finup          = omap_sham_finup,
1349        .digest         = omap_sham_digest,
1350        .halg.digestsize        = MD5_DIGEST_SIZE,
1351        .halg.base      = {
1352                .cra_name               = "md5",
1353                .cra_driver_name        = "omap-md5",
1354                .cra_priority           = 100,
1355                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1356                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1357                                                CRYPTO_ALG_ASYNC |
1358                                                CRYPTO_ALG_NEED_FALLBACK,
1359                .cra_blocksize          = SHA1_BLOCK_SIZE,
1360                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1361                .cra_alignmask          = OMAP_ALIGN_MASK,
1362                .cra_module             = THIS_MODULE,
1363                .cra_init               = omap_sham_cra_init,
1364                .cra_exit               = omap_sham_cra_exit,
1365        }
1366},
1367{
1368        .init           = omap_sham_init,
1369        .update         = omap_sham_update,
1370        .final          = omap_sham_final,
1371        .finup          = omap_sham_finup,
1372        .digest         = omap_sham_digest,
1373        .setkey         = omap_sham_setkey,
1374        .halg.digestsize        = SHA1_DIGEST_SIZE,
1375        .halg.base      = {
1376                .cra_name               = "hmac(sha1)",
1377                .cra_driver_name        = "omap-hmac-sha1",
1378                .cra_priority           = 100,
1379                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1380                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1381                                                CRYPTO_ALG_ASYNC |
1382                                                CRYPTO_ALG_NEED_FALLBACK,
1383                .cra_blocksize          = SHA1_BLOCK_SIZE,
1384                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1385                                        sizeof(struct omap_sham_hmac_ctx),
1386                .cra_alignmask          = OMAP_ALIGN_MASK,
1387                .cra_module             = THIS_MODULE,
1388                .cra_init               = omap_sham_cra_sha1_init,
1389                .cra_exit               = omap_sham_cra_exit,
1390        }
1391},
1392{
1393        .init           = omap_sham_init,
1394        .update         = omap_sham_update,
1395        .final          = omap_sham_final,
1396        .finup          = omap_sham_finup,
1397        .digest         = omap_sham_digest,
1398        .setkey         = omap_sham_setkey,
1399        .halg.digestsize        = MD5_DIGEST_SIZE,
1400        .halg.base      = {
1401                .cra_name               = "hmac(md5)",
1402                .cra_driver_name        = "omap-hmac-md5",
1403                .cra_priority           = 100,
1404                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1405                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1406                                                CRYPTO_ALG_ASYNC |
1407                                                CRYPTO_ALG_NEED_FALLBACK,
1408                .cra_blocksize          = SHA1_BLOCK_SIZE,
1409                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1410                                        sizeof(struct omap_sham_hmac_ctx),
1411                .cra_alignmask          = OMAP_ALIGN_MASK,
1412                .cra_module             = THIS_MODULE,
1413                .cra_init               = omap_sham_cra_md5_init,
1414                .cra_exit               = omap_sham_cra_exit,
1415        }
1416}
1417};
1418
1419/* OMAP4 has some algs in addition to what OMAP2 has */
1420static struct ahash_alg algs_sha224_sha256[] = {
1421{
1422        .init           = omap_sham_init,
1423        .update         = omap_sham_update,
1424        .final          = omap_sham_final,
1425        .finup          = omap_sham_finup,
1426        .digest         = omap_sham_digest,
1427        .halg.digestsize        = SHA224_DIGEST_SIZE,
1428        .halg.base      = {
1429                .cra_name               = "sha224",
1430                .cra_driver_name        = "omap-sha224",
1431                .cra_priority           = 100,
1432                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1433                                                CRYPTO_ALG_ASYNC |
1434                                                CRYPTO_ALG_NEED_FALLBACK,
1435                .cra_blocksize          = SHA224_BLOCK_SIZE,
1436                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1437                .cra_alignmask          = 0,
1438                .cra_module             = THIS_MODULE,
1439                .cra_init               = omap_sham_cra_init,
1440                .cra_exit               = omap_sham_cra_exit,
1441        }
1442},
1443{
1444        .init           = omap_sham_init,
1445        .update         = omap_sham_update,
1446        .final          = omap_sham_final,
1447        .finup          = omap_sham_finup,
1448        .digest         = omap_sham_digest,
1449        .halg.digestsize        = SHA256_DIGEST_SIZE,
1450        .halg.base      = {
1451                .cra_name               = "sha256",
1452                .cra_driver_name        = "omap-sha256",
1453                .cra_priority           = 100,
1454                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1455                                                CRYPTO_ALG_ASYNC |
1456                                                CRYPTO_ALG_NEED_FALLBACK,
1457                .cra_blocksize          = SHA256_BLOCK_SIZE,
1458                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1459                .cra_alignmask          = 0,
1460                .cra_module             = THIS_MODULE,
1461                .cra_init               = omap_sham_cra_init,
1462                .cra_exit               = omap_sham_cra_exit,
1463        }
1464},
1465{
1466        .init           = omap_sham_init,
1467        .update         = omap_sham_update,
1468        .final          = omap_sham_final,
1469        .finup          = omap_sham_finup,
1470        .digest         = omap_sham_digest,
1471        .setkey         = omap_sham_setkey,
1472        .halg.digestsize        = SHA224_DIGEST_SIZE,
1473        .halg.base      = {
1474                .cra_name               = "hmac(sha224)",
1475                .cra_driver_name        = "omap-hmac-sha224",
1476                .cra_priority           = 100,
1477                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1478                                                CRYPTO_ALG_ASYNC |
1479                                                CRYPTO_ALG_NEED_FALLBACK,
1480                .cra_blocksize          = SHA224_BLOCK_SIZE,
1481                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1482                                        sizeof(struct omap_sham_hmac_ctx),
1483                .cra_alignmask          = OMAP_ALIGN_MASK,
1484                .cra_module             = THIS_MODULE,
1485                .cra_init               = omap_sham_cra_sha224_init,
1486                .cra_exit               = omap_sham_cra_exit,
1487        }
1488},
1489{
1490        .init           = omap_sham_init,
1491        .update         = omap_sham_update,
1492        .final          = omap_sham_final,
1493        .finup          = omap_sham_finup,
1494        .digest         = omap_sham_digest,
1495        .setkey         = omap_sham_setkey,
1496        .halg.digestsize        = SHA256_DIGEST_SIZE,
1497        .halg.base      = {
1498                .cra_name               = "hmac(sha256)",
1499                .cra_driver_name        = "omap-hmac-sha256",
1500                .cra_priority           = 100,
1501                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1502                                                CRYPTO_ALG_ASYNC |
1503                                                CRYPTO_ALG_NEED_FALLBACK,
1504                .cra_blocksize          = SHA256_BLOCK_SIZE,
1505                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1506                                        sizeof(struct omap_sham_hmac_ctx),
1507                .cra_alignmask          = OMAP_ALIGN_MASK,
1508                .cra_module             = THIS_MODULE,
1509                .cra_init               = omap_sham_cra_sha256_init,
1510                .cra_exit               = omap_sham_cra_exit,
1511        }
1512},
1513};
1514
1515static struct ahash_alg algs_sha384_sha512[] = {
1516{
1517        .init           = omap_sham_init,
1518        .update         = omap_sham_update,
1519        .final          = omap_sham_final,
1520        .finup          = omap_sham_finup,
1521        .digest         = omap_sham_digest,
1522        .halg.digestsize        = SHA384_DIGEST_SIZE,
1523        .halg.base      = {
1524                .cra_name               = "sha384",
1525                .cra_driver_name        = "omap-sha384",
1526                .cra_priority           = 100,
1527                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1528                                                CRYPTO_ALG_ASYNC |
1529                                                CRYPTO_ALG_NEED_FALLBACK,
1530                .cra_blocksize          = SHA384_BLOCK_SIZE,
1531                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1532                .cra_alignmask          = 0,
1533                .cra_module             = THIS_MODULE,
1534                .cra_init               = omap_sham_cra_init,
1535                .cra_exit               = omap_sham_cra_exit,
1536        }
1537},
1538{
1539        .init           = omap_sham_init,
1540        .update         = omap_sham_update,
1541        .final          = omap_sham_final,
1542        .finup          = omap_sham_finup,
1543        .digest         = omap_sham_digest,
1544        .halg.digestsize        = SHA512_DIGEST_SIZE,
1545        .halg.base      = {
1546                .cra_name               = "sha512",
1547                .cra_driver_name        = "omap-sha512",
1548                .cra_priority           = 100,
1549                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1550                                                CRYPTO_ALG_ASYNC |
1551                                                CRYPTO_ALG_NEED_FALLBACK,
1552                .cra_blocksize          = SHA512_BLOCK_SIZE,
1553                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1554                .cra_alignmask          = 0,
1555                .cra_module             = THIS_MODULE,
1556                .cra_init               = omap_sham_cra_init,
1557                .cra_exit               = omap_sham_cra_exit,
1558        }
1559},
1560{
1561        .init           = omap_sham_init,
1562        .update         = omap_sham_update,
1563        .final          = omap_sham_final,
1564        .finup          = omap_sham_finup,
1565        .digest         = omap_sham_digest,
1566        .setkey         = omap_sham_setkey,
1567        .halg.digestsize        = SHA384_DIGEST_SIZE,
1568        .halg.base      = {
1569                .cra_name               = "hmac(sha384)",
1570                .cra_driver_name        = "omap-hmac-sha384",
1571                .cra_priority           = 100,
1572                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1573                                                CRYPTO_ALG_ASYNC |
1574                                                CRYPTO_ALG_NEED_FALLBACK,
1575                .cra_blocksize          = SHA384_BLOCK_SIZE,
1576                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1577                                        sizeof(struct omap_sham_hmac_ctx),
1578                .cra_alignmask          = OMAP_ALIGN_MASK,
1579                .cra_module             = THIS_MODULE,
1580                .cra_init               = omap_sham_cra_sha384_init,
1581                .cra_exit               = omap_sham_cra_exit,
1582        }
1583},
1584{
1585        .init           = omap_sham_init,
1586        .update         = omap_sham_update,
1587        .final          = omap_sham_final,
1588        .finup          = omap_sham_finup,
1589        .digest         = omap_sham_digest,
1590        .setkey         = omap_sham_setkey,
1591        .halg.digestsize        = SHA512_DIGEST_SIZE,
1592        .halg.base      = {
1593                .cra_name               = "hmac(sha512)",
1594                .cra_driver_name        = "omap-hmac-sha512",
1595                .cra_priority           = 100,
1596                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1597                                                CRYPTO_ALG_ASYNC |
1598                                                CRYPTO_ALG_NEED_FALLBACK,
1599                .cra_blocksize          = SHA512_BLOCK_SIZE,
1600                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1601                                        sizeof(struct omap_sham_hmac_ctx),
1602                .cra_alignmask          = OMAP_ALIGN_MASK,
1603                .cra_module             = THIS_MODULE,
1604                .cra_init               = omap_sham_cra_sha512_init,
1605                .cra_exit               = omap_sham_cra_exit,
1606        }
1607},
1608};
1609
1610static void omap_sham_done_task(unsigned long data)
1611{
1612        struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1613        int err = 0;
1614
1615        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1616                omap_sham_handle_queue(dd, NULL);
1617                return;
1618        }
1619
1620        if (test_bit(FLAGS_CPU, &dd->flags)) {
1621                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1622                        /* hash or semi-hash ready */
1623                        err = omap_sham_update_cpu(dd);
1624                        if (err != -EINPROGRESS)
1625                                goto finish;
1626                }
1627        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1628                if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1629                        omap_sham_update_dma_stop(dd);
1630                        if (dd->err) {
1631                                err = dd->err;
1632                                goto finish;
1633                        }
1634                }
1635                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1636                        /* hash or semi-hash ready */
1637                        clear_bit(FLAGS_DMA_READY, &dd->flags);
1638                        err = omap_sham_update_dma_start(dd);
1639                        if (err != -EINPROGRESS)
1640                                goto finish;
1641                }
1642        }
1643
1644        return;
1645
1646finish:
1647        dev_dbg(dd->dev, "update done: err: %d\n", err);
1648        /* finish curent request */
1649        omap_sham_finish_req(dd->req, err);
1650}
1651
1652static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1653{
1654        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1655                dev_warn(dd->dev, "Interrupt when no active requests.\n");
1656        } else {
1657                set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1658                tasklet_schedule(&dd->done_task);
1659        }
1660
1661        return IRQ_HANDLED;
1662}
1663
1664static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1665{
1666        struct omap_sham_dev *dd = dev_id;
1667
1668        if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1669                /* final -> allow device to go to power-saving mode */
1670                omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1671
1672        omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1673                                 SHA_REG_CTRL_OUTPUT_READY);
1674        omap_sham_read(dd, SHA_REG_CTRL);
1675
1676        return omap_sham_irq_common(dd);
1677}
1678
1679static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1680{
1681        struct omap_sham_dev *dd = dev_id;
1682
1683        omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1684
1685        return omap_sham_irq_common(dd);
1686}
1687
1688static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1689        {
1690                .algs_list      = algs_sha1_md5,
1691                .size           = ARRAY_SIZE(algs_sha1_md5),
1692        },
1693};
1694
1695static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1696        .algs_info      = omap_sham_algs_info_omap2,
1697        .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1698        .flags          = BIT(FLAGS_BE32_SHA1),
1699        .digest_size    = SHA1_DIGEST_SIZE,
1700        .copy_hash      = omap_sham_copy_hash_omap2,
1701        .write_ctrl     = omap_sham_write_ctrl_omap2,
1702        .trigger        = omap_sham_trigger_omap2,
1703        .poll_irq       = omap_sham_poll_irq_omap2,
1704        .intr_hdlr      = omap_sham_irq_omap2,
1705        .idigest_ofs    = 0x00,
1706        .din_ofs        = 0x1c,
1707        .digcnt_ofs     = 0x14,
1708        .rev_ofs        = 0x5c,
1709        .mask_ofs       = 0x60,
1710        .sysstatus_ofs  = 0x64,
1711        .major_mask     = 0xf0,
1712        .major_shift    = 4,
1713        .minor_mask     = 0x0f,
1714        .minor_shift    = 0,
1715};
1716
1717#ifdef CONFIG_OF
1718static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1719        {
1720                .algs_list      = algs_sha1_md5,
1721                .size           = ARRAY_SIZE(algs_sha1_md5),
1722        },
1723        {
1724                .algs_list      = algs_sha224_sha256,
1725                .size           = ARRAY_SIZE(algs_sha224_sha256),
1726        },
1727};
1728
1729static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1730        .algs_info      = omap_sham_algs_info_omap4,
1731        .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1732        .flags          = BIT(FLAGS_AUTO_XOR),
1733        .digest_size    = SHA256_DIGEST_SIZE,
1734        .copy_hash      = omap_sham_copy_hash_omap4,
1735        .write_ctrl     = omap_sham_write_ctrl_omap4,
1736        .trigger        = omap_sham_trigger_omap4,
1737        .poll_irq       = omap_sham_poll_irq_omap4,
1738        .intr_hdlr      = omap_sham_irq_omap4,
1739        .idigest_ofs    = 0x020,
1740        .odigest_ofs    = 0x0,
1741        .din_ofs        = 0x080,
1742        .digcnt_ofs     = 0x040,
1743        .rev_ofs        = 0x100,
1744        .mask_ofs       = 0x110,
1745        .sysstatus_ofs  = 0x114,
1746        .mode_ofs       = 0x44,
1747        .length_ofs     = 0x48,
1748        .major_mask     = 0x0700,
1749        .major_shift    = 8,
1750        .minor_mask     = 0x003f,
1751        .minor_shift    = 0,
1752};
1753
1754static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1755        {
1756                .algs_list      = algs_sha1_md5,
1757                .size           = ARRAY_SIZE(algs_sha1_md5),
1758        },
1759        {
1760                .algs_list      = algs_sha224_sha256,
1761                .size           = ARRAY_SIZE(algs_sha224_sha256),
1762        },
1763        {
1764                .algs_list      = algs_sha384_sha512,
1765                .size           = ARRAY_SIZE(algs_sha384_sha512),
1766        },
1767};
1768
1769static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1770        .algs_info      = omap_sham_algs_info_omap5,
1771        .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1772        .flags          = BIT(FLAGS_AUTO_XOR),
1773        .digest_size    = SHA512_DIGEST_SIZE,
1774        .copy_hash      = omap_sham_copy_hash_omap4,
1775        .write_ctrl     = omap_sham_write_ctrl_omap4,
1776        .trigger        = omap_sham_trigger_omap4,
1777        .poll_irq       = omap_sham_poll_irq_omap4,
1778        .intr_hdlr      = omap_sham_irq_omap4,
1779        .idigest_ofs    = 0x240,
1780        .odigest_ofs    = 0x200,
1781        .din_ofs        = 0x080,
1782        .digcnt_ofs     = 0x280,
1783        .rev_ofs        = 0x100,
1784        .mask_ofs       = 0x110,
1785        .sysstatus_ofs  = 0x114,
1786        .mode_ofs       = 0x284,
1787        .length_ofs     = 0x288,
1788        .major_mask     = 0x0700,
1789        .major_shift    = 8,
1790        .minor_mask     = 0x003f,
1791        .minor_shift    = 0,
1792};
1793
1794static const struct of_device_id omap_sham_of_match[] = {
1795        {
1796                .compatible     = "ti,omap2-sham",
1797                .data           = &omap_sham_pdata_omap2,
1798        },
1799        {
1800                .compatible     = "ti,omap3-sham",
1801                .data           = &omap_sham_pdata_omap2,
1802        },
1803        {
1804                .compatible     = "ti,omap4-sham",
1805                .data           = &omap_sham_pdata_omap4,
1806        },
1807        {
1808                .compatible     = "ti,omap5-sham",
1809                .data           = &omap_sham_pdata_omap5,
1810        },
1811        {},
1812};
1813MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1814
1815static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1816                struct device *dev, struct resource *res)
1817{
1818        struct device_node *node = dev->of_node;
1819        const struct of_device_id *match;
1820        int err = 0;
1821
1822        match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1823        if (!match) {
1824                dev_err(dev, "no compatible OF match\n");
1825                err = -EINVAL;
1826                goto err;
1827        }
1828
1829        err = of_address_to_resource(node, 0, res);
1830        if (err < 0) {
1831                dev_err(dev, "can't translate OF node address\n");
1832                err = -EINVAL;
1833                goto err;
1834        }
1835
1836        dd->irq = irq_of_parse_and_map(node, 0);
1837        if (!dd->irq) {
1838                dev_err(dev, "can't translate OF irq value\n");
1839                err = -EINVAL;
1840                goto err;
1841        }
1842
1843        dd->pdata = match->data;
1844
1845err:
1846        return err;
1847}
1848#else
1849static const struct of_device_id omap_sham_of_match[] = {
1850        {},
1851};
1852
1853static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1854                struct device *dev, struct resource *res)
1855{
1856        return -EINVAL;
1857}
1858#endif
1859
1860static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1861                struct platform_device *pdev, struct resource *res)
1862{
1863        struct device *dev = &pdev->dev;
1864        struct resource *r;
1865        int err = 0;
1866
1867        /* Get the base address */
1868        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1869        if (!r) {
1870                dev_err(dev, "no MEM resource info\n");
1871                err = -ENODEV;
1872                goto err;
1873        }
1874        memcpy(res, r, sizeof(*res));
1875
1876        /* Get the IRQ */
1877        dd->irq = platform_get_irq(pdev, 0);
1878        if (dd->irq < 0) {
1879                dev_err(dev, "no IRQ resource info\n");
1880                err = dd->irq;
1881                goto err;
1882        }
1883
1884        /* Only OMAP2/3 can be non-DT */
1885        dd->pdata = &omap_sham_pdata_omap2;
1886
1887err:
1888        return err;
1889}
1890
1891static int omap_sham_probe(struct platform_device *pdev)
1892{
1893        struct omap_sham_dev *dd;
1894        struct device *dev = &pdev->dev;
1895        struct resource res;
1896        dma_cap_mask_t mask;
1897        int err, i, j;
1898        u32 rev;
1899
1900        dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
1901        if (dd == NULL) {
1902                dev_err(dev, "unable to alloc data struct.\n");
1903                err = -ENOMEM;
1904                goto data_err;
1905        }
1906        dd->dev = dev;
1907        platform_set_drvdata(pdev, dd);
1908
1909        INIT_LIST_HEAD(&dd->list);
1910        spin_lock_init(&dd->lock);
1911        tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1912        crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1913
1914        err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1915                               omap_sham_get_res_pdev(dd, pdev, &res);
1916        if (err)
1917                goto data_err;
1918
1919        dd->io_base = devm_ioremap_resource(dev, &res);
1920        if (IS_ERR(dd->io_base)) {
1921                err = PTR_ERR(dd->io_base);
1922                goto data_err;
1923        }
1924        dd->phys_base = res.start;
1925
1926        err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
1927                               IRQF_TRIGGER_NONE, dev_name(dev), dd);
1928        if (err) {
1929                dev_err(dev, "unable to request irq %d, err = %d\n",
1930                        dd->irq, err);
1931                goto data_err;
1932        }
1933
1934        dma_cap_zero(mask);
1935        dma_cap_set(DMA_SLAVE, mask);
1936
1937        dd->dma_lch = dma_request_chan(dev, "rx");
1938        if (IS_ERR(dd->dma_lch)) {
1939                err = PTR_ERR(dd->dma_lch);
1940                if (err == -EPROBE_DEFER)
1941                        goto data_err;
1942
1943                dd->polling_mode = 1;
1944                dev_dbg(dev, "using polling mode instead of dma\n");
1945        }
1946
1947        dd->flags |= dd->pdata->flags;
1948
1949        pm_runtime_enable(dev);
1950        pm_runtime_irq_safe(dev);
1951
1952        err = pm_runtime_get_sync(dev);
1953        if (err < 0) {
1954                dev_err(dev, "failed to get sync: %d\n", err);
1955                goto err_pm;
1956        }
1957
1958        rev = omap_sham_read(dd, SHA_REG_REV(dd));
1959        pm_runtime_put_sync(&pdev->dev);
1960
1961        dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1962                (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
1963                (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1964
1965        spin_lock(&sham.lock);
1966        list_add_tail(&dd->list, &sham.dev_list);
1967        spin_unlock(&sham.lock);
1968
1969        for (i = 0; i < dd->pdata->algs_info_size; i++) {
1970                for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1971                        err = crypto_register_ahash(
1972                                        &dd->pdata->algs_info[i].algs_list[j]);
1973                        if (err)
1974                                goto err_algs;
1975
1976                        dd->pdata->algs_info[i].registered++;
1977                }
1978        }
1979
1980        return 0;
1981
1982err_algs:
1983        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1984                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1985                        crypto_unregister_ahash(
1986                                        &dd->pdata->algs_info[i].algs_list[j]);
1987err_pm:
1988        pm_runtime_disable(dev);
1989        if (!dd->polling_mode)
1990                dma_release_channel(dd->dma_lch);
1991data_err:
1992        dev_err(dev, "initialization failed.\n");
1993
1994        return err;
1995}
1996
1997static int omap_sham_remove(struct platform_device *pdev)
1998{
1999        static struct omap_sham_dev *dd;
2000        int i, j;
2001
2002        dd = platform_get_drvdata(pdev);
2003        if (!dd)
2004                return -ENODEV;
2005        spin_lock(&sham.lock);
2006        list_del(&dd->list);
2007        spin_unlock(&sham.lock);
2008        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2009                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2010                        crypto_unregister_ahash(
2011                                        &dd->pdata->algs_info[i].algs_list[j]);
2012        tasklet_kill(&dd->done_task);
2013        pm_runtime_disable(&pdev->dev);
2014
2015        if (!dd->polling_mode)
2016                dma_release_channel(dd->dma_lch);
2017
2018        return 0;
2019}
2020
2021#ifdef CONFIG_PM_SLEEP
2022static int omap_sham_suspend(struct device *dev)
2023{
2024        pm_runtime_put_sync(dev);
2025        return 0;
2026}
2027
2028static int omap_sham_resume(struct device *dev)
2029{
2030        int err = pm_runtime_get_sync(dev);
2031        if (err < 0) {
2032                dev_err(dev, "failed to get sync: %d\n", err);
2033                return err;
2034        }
2035        return 0;
2036}
2037#endif
2038
2039static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2040
2041static struct platform_driver omap_sham_driver = {
2042        .probe  = omap_sham_probe,
2043        .remove = omap_sham_remove,
2044        .driver = {
2045                .name   = "omap-sham",
2046                .pm     = &omap_sham_pm_ops,
2047                .of_match_table = omap_sham_of_match,
2048        },
2049};
2050
2051module_platform_driver(omap_sham_driver);
2052
2053MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2054MODULE_LICENSE("GPL v2");
2055MODULE_AUTHOR("Dmitry Kasatkin");
2056MODULE_ALIAS("platform:omap-sham");
2057
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.