linux/drivers/crypto/omap-sham.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for OMAP SHA1/MD5 HW acceleration.
   5 *
   6 * Copyright (c) 2010 Nokia Corporation
   7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   8 * Copyright (c) 2011 Texas Instruments Incorporated
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Some ideas are from old omap-sha1-md5.c driver.
  15 */
  16
  17#define pr_fmt(fmt) "%s: " fmt, __func__
  18
  19#include <linux/err.h>
  20#include <linux/device.h>
  21#include <linux/module.h>
  22#include <linux/init.h>
  23#include <linux/errno.h>
  24#include <linux/interrupt.h>
  25#include <linux/kernel.h>
  26#include <linux/irq.h>
  27#include <linux/io.h>
  28#include <linux/platform_device.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/dmaengine.h>
  32#include <linux/omap-dma.h>
  33#include <linux/pm_runtime.h>
  34#include <linux/of.h>
  35#include <linux/of_device.h>
  36#include <linux/of_address.h>
  37#include <linux/of_irq.h>
  38#include <linux/delay.h>
  39#include <linux/crypto.h>
  40#include <linux/cryptohash.h>
  41#include <crypto/scatterwalk.h>
  42#include <crypto/algapi.h>
  43#include <crypto/sha.h>
  44#include <crypto/hash.h>
  45#include <crypto/internal/hash.h>
  46
  47#define SHA1_MD5_BLOCK_SIZE             SHA1_BLOCK_SIZE
  48#define MD5_DIGEST_SIZE                 16
  49
  50#define DST_MAXBURST                    16
  51#define DMA_MIN                         (DST_MAXBURST * sizeof(u32))
  52
  53#define SHA_REG_IDIGEST(dd, x)          ((dd)->pdata->idigest_ofs + ((x)*0x04))
  54#define SHA_REG_DIN(dd, x)              ((dd)->pdata->din_ofs + ((x) * 0x04))
  55#define SHA_REG_DIGCNT(dd)              ((dd)->pdata->digcnt_ofs)
  56
  57#define SHA_REG_ODIGEST(x)              (0x00 + ((x) * 0x04))
  58
  59#define SHA_REG_CTRL                    0x18
  60#define SHA_REG_CTRL_LENGTH             (0xFFFFFFFF << 5)
  61#define SHA_REG_CTRL_CLOSE_HASH         (1 << 4)
  62#define SHA_REG_CTRL_ALGO_CONST         (1 << 3)
  63#define SHA_REG_CTRL_ALGO               (1 << 2)
  64#define SHA_REG_CTRL_INPUT_READY        (1 << 1)
  65#define SHA_REG_CTRL_OUTPUT_READY       (1 << 0)
  66
  67#define SHA_REG_REV(dd)                 ((dd)->pdata->rev_ofs)
  68
  69#define SHA_REG_MASK(dd)                ((dd)->pdata->mask_ofs)
  70#define SHA_REG_MASK_DMA_EN             (1 << 3)
  71#define SHA_REG_MASK_IT_EN              (1 << 2)
  72#define SHA_REG_MASK_SOFTRESET          (1 << 1)
  73#define SHA_REG_AUTOIDLE                (1 << 0)
  74
  75#define SHA_REG_SYSSTATUS(dd)           ((dd)->pdata->sysstatus_ofs)
  76#define SHA_REG_SYSSTATUS_RESETDONE     (1 << 0)
  77
  78#define SHA_REG_MODE                    0x44
  79#define SHA_REG_MODE_HMAC_OUTER_HASH    (1 << 7)
  80#define SHA_REG_MODE_HMAC_KEY_PROC      (1 << 5)
  81#define SHA_REG_MODE_CLOSE_HASH         (1 << 4)
  82#define SHA_REG_MODE_ALGO_CONSTANT      (1 << 3)
  83#define SHA_REG_MODE_ALGO_MASK          (3 << 1)
  84#define         SHA_REG_MODE_ALGO_MD5_128       (0 << 1)
  85#define         SHA_REG_MODE_ALGO_SHA1_160      (1 << 1)
  86#define         SHA_REG_MODE_ALGO_SHA2_224      (2 << 1)
  87#define         SHA_REG_MODE_ALGO_SHA2_256      (3 << 1)
  88
  89#define SHA_REG_LENGTH                  0x48
  90
  91#define SHA_REG_IRQSTATUS               0x118
  92#define SHA_REG_IRQSTATUS_CTX_RDY       (1 << 3)
  93#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
  94#define SHA_REG_IRQSTATUS_INPUT_RDY     (1 << 1)
  95#define SHA_REG_IRQSTATUS_OUTPUT_RDY    (1 << 0)
  96
  97#define SHA_REG_IRQENA                  0x11C
  98#define SHA_REG_IRQENA_CTX_RDY          (1 << 3)
  99#define SHA_REG_IRQENA_PARTHASH_RDY     (1 << 2)
 100#define SHA_REG_IRQENA_INPUT_RDY        (1 << 1)
 101#define SHA_REG_IRQENA_OUTPUT_RDY       (1 << 0)
 102
 103#define DEFAULT_TIMEOUT_INTERVAL        HZ
 104
 105/* mostly device flags */
 106#define FLAGS_BUSY              0
 107#define FLAGS_FINAL             1
 108#define FLAGS_DMA_ACTIVE        2
 109#define FLAGS_OUTPUT_READY      3
 110#define FLAGS_INIT              4
 111#define FLAGS_CPU               5
 112#define FLAGS_DMA_READY         6
 113#define FLAGS_AUTO_XOR          7
 114#define FLAGS_BE32_SHA1         8
 115/* context flags */
 116#define FLAGS_FINUP             16
 117#define FLAGS_SG                17
 118
 119#define FLAGS_MODE_SHIFT        18
 120#define FLAGS_MODE_MASK         (SHA_REG_MODE_ALGO_MASK                 \
 121                                        << (FLAGS_MODE_SHIFT - 1))
 122#define         FLAGS_MODE_MD5          (SHA_REG_MODE_ALGO_MD5_128      \
 123                                                << (FLAGS_MODE_SHIFT - 1))
 124#define         FLAGS_MODE_SHA1         (SHA_REG_MODE_ALGO_SHA1_160     \
 125                                                << (FLAGS_MODE_SHIFT - 1))
 126#define         FLAGS_MODE_SHA224       (SHA_REG_MODE_ALGO_SHA2_224     \
 127                                                << (FLAGS_MODE_SHIFT - 1))
 128#define         FLAGS_MODE_SHA256       (SHA_REG_MODE_ALGO_SHA2_256     \
 129                                                << (FLAGS_MODE_SHIFT - 1))
 130#define FLAGS_HMAC              20
 131#define FLAGS_ERROR             21
 132
 133#define OP_UPDATE               1
 134#define OP_FINAL                2
 135
 136#define OMAP_ALIGN_MASK         (sizeof(u32)-1)
 137#define OMAP_ALIGNED            __attribute__((aligned(sizeof(u32))))
 138
 139#define BUFLEN                  PAGE_SIZE
 140
 141struct omap_sham_dev;
 142
 143struct omap_sham_reqctx {
 144        struct omap_sham_dev    *dd;
 145        unsigned long           flags;
 146        unsigned long           op;
 147
 148        u8                      digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED;
 149        size_t                  digcnt;
 150        size_t                  bufcnt;
 151        size_t                  buflen;
 152        dma_addr_t              dma_addr;
 153
 154        /* walk state */
 155        struct scatterlist      *sg;
 156        struct scatterlist      sgl;
 157        unsigned int            offset; /* offset in current sg */
 158        unsigned int            total;  /* total request */
 159
 160        u8                      buffer[0] OMAP_ALIGNED;
 161};
 162
 163struct omap_sham_hmac_ctx {
 164        struct crypto_shash     *shash;
 165        u8                      ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
 166        u8                      opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED;
 167};
 168
 169struct omap_sham_ctx {
 170        struct omap_sham_dev    *dd;
 171
 172        unsigned long           flags;
 173
 174        /* fallback stuff */
 175        struct crypto_shash     *fallback;
 176
 177        struct omap_sham_hmac_ctx base[0];
 178};
 179
 180#define OMAP_SHAM_QUEUE_LENGTH  1
 181
 182struct omap_sham_algs_info {
 183        struct ahash_alg        *algs_list;
 184        unsigned int            size;
 185        unsigned int            registered;
 186};
 187
 188struct omap_sham_pdata {
 189        struct omap_sham_algs_info      *algs_info;
 190        unsigned int    algs_info_size;
 191        unsigned long   flags;
 192        int             digest_size;
 193
 194        void            (*copy_hash)(struct ahash_request *req, int out);
 195        void            (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
 196                                      int final, int dma);
 197        void            (*trigger)(struct omap_sham_dev *dd, size_t length);
 198        int             (*poll_irq)(struct omap_sham_dev *dd);
 199        irqreturn_t     (*intr_hdlr)(int irq, void *dev_id);
 200
 201        u32             odigest_ofs;
 202        u32             idigest_ofs;
 203        u32             din_ofs;
 204        u32             digcnt_ofs;
 205        u32             rev_ofs;
 206        u32             mask_ofs;
 207        u32             sysstatus_ofs;
 208
 209        u32             major_mask;
 210        u32             major_shift;
 211        u32             minor_mask;
 212        u32             minor_shift;
 213};
 214
 215struct omap_sham_dev {
 216        struct list_head        list;
 217        unsigned long           phys_base;
 218        struct device           *dev;
 219        void __iomem            *io_base;
 220        int                     irq;
 221        spinlock_t              lock;
 222        int                     err;
 223        unsigned int            dma;
 224        struct dma_chan         *dma_lch;
 225        struct tasklet_struct   done_task;
 226
 227        unsigned long           flags;
 228        struct crypto_queue     queue;
 229        struct ahash_request    *req;
 230
 231        const struct omap_sham_pdata    *pdata;
 232};
 233
 234struct omap_sham_drv {
 235        struct list_head        dev_list;
 236        spinlock_t              lock;
 237        unsigned long           flags;
 238};
 239
 240static struct omap_sham_drv sham = {
 241        .dev_list = LIST_HEAD_INIT(sham.dev_list),
 242        .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 243};
 244
 245static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 246{
 247        return __raw_readl(dd->io_base + offset);
 248}
 249
 250static inline void omap_sham_write(struct omap_sham_dev *dd,
 251                                        u32 offset, u32 value)
 252{
 253        __raw_writel(value, dd->io_base + offset);
 254}
 255
 256static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 257                                        u32 value, u32 mask)
 258{
 259        u32 val;
 260
 261        val = omap_sham_read(dd, address);
 262        val &= ~mask;
 263        val |= value;
 264        omap_sham_write(dd, address, val);
 265}
 266
 267static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 268{
 269        unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 270
 271        while (!(omap_sham_read(dd, offset) & bit)) {
 272                if (time_is_before_jiffies(timeout))
 273                        return -ETIMEDOUT;
 274        }
 275
 276        return 0;
 277}
 278
 279static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
 280{
 281        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 282        struct omap_sham_dev *dd = ctx->dd;
 283        u32 *hash = (u32 *)ctx->digest;
 284        int i;
 285
 286        for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 287                if (out)
 288                        hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
 289                else
 290                        omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
 291        }
 292}
 293
 294static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
 295{
 296        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 297        struct omap_sham_dev *dd = ctx->dd;
 298        int i;
 299
 300        if (ctx->flags & BIT(FLAGS_HMAC)) {
 301                struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 302                struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 303                struct omap_sham_hmac_ctx *bctx = tctx->base;
 304                u32 *opad = (u32 *)bctx->opad;
 305
 306                for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 307                        if (out)
 308                                opad[i] = omap_sham_read(dd,
 309                                                SHA_REG_ODIGEST(i));
 310                        else
 311                                omap_sham_write(dd, SHA_REG_ODIGEST(i),
 312                                                opad[i]);
 313                }
 314        }
 315
 316        omap_sham_copy_hash_omap2(req, out);
 317}
 318
 319static void omap_sham_copy_ready_hash(struct ahash_request *req)
 320{
 321        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 322        u32 *in = (u32 *)ctx->digest;
 323        u32 *hash = (u32 *)req->result;
 324        int i, d, big_endian = 0;
 325
 326        if (!hash)
 327                return;
 328
 329        switch (ctx->flags & FLAGS_MODE_MASK) {
 330        case FLAGS_MODE_MD5:
 331                d = MD5_DIGEST_SIZE / sizeof(u32);
 332                break;
 333        case FLAGS_MODE_SHA1:
 334                /* OMAP2 SHA1 is big endian */
 335                if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
 336                        big_endian = 1;
 337                d = SHA1_DIGEST_SIZE / sizeof(u32);
 338                break;
 339        case FLAGS_MODE_SHA224:
 340                d = SHA224_DIGEST_SIZE / sizeof(u32);
 341                break;
 342        case FLAGS_MODE_SHA256:
 343                d = SHA256_DIGEST_SIZE / sizeof(u32);
 344                break;
 345        default:
 346                d = 0;
 347        }
 348
 349        if (big_endian)
 350                for (i = 0; i < d; i++)
 351                        hash[i] = be32_to_cpu(in[i]);
 352        else
 353                for (i = 0; i < d; i++)
 354                        hash[i] = le32_to_cpu(in[i]);
 355}
 356
 357static int omap_sham_hw_init(struct omap_sham_dev *dd)
 358{
 359        pm_runtime_get_sync(dd->dev);
 360
 361        if (!test_bit(FLAGS_INIT, &dd->flags)) {
 362                set_bit(FLAGS_INIT, &dd->flags);
 363                dd->err = 0;
 364        }
 365
 366        return 0;
 367}
 368
 369static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
 370                                 int final, int dma)
 371{
 372        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 373        u32 val = length << 5, mask;
 374
 375        if (likely(ctx->digcnt))
 376                omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 377
 378        omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 379                SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 380                SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 381        /*
 382         * Setting ALGO_CONST only for the first iteration
 383         * and CLOSE_HASH only for the last one.
 384         */
 385        if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
 386                val |= SHA_REG_CTRL_ALGO;
 387        if (!ctx->digcnt)
 388                val |= SHA_REG_CTRL_ALGO_CONST;
 389        if (final)
 390                val |= SHA_REG_CTRL_CLOSE_HASH;
 391
 392        mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 393                        SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 394
 395        omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 396}
 397
 398static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
 399{
 400}
 401
 402static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
 403{
 404        return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 405}
 406
 407static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
 408                                    u32 *value, int count)
 409{
 410        for (; count--; value++, offset += 4)
 411                omap_sham_write(dd, offset, *value);
 412}
 413
 414static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 415                                 int final, int dma)
 416{
 417        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 418        u32 val, mask;
 419
 420        /*
 421         * Setting ALGO_CONST only for the first iteration and
 422         * CLOSE_HASH only for the last one. Note that flags mode bits
 423         * correspond to algorithm encoding in mode register.
 424         */
 425        val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1);
 426        if (!ctx->digcnt) {
 427                struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 428                struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 429                struct omap_sham_hmac_ctx *bctx = tctx->base;
 430
 431                val |= SHA_REG_MODE_ALGO_CONSTANT;
 432
 433                if (ctx->flags & BIT(FLAGS_HMAC)) {
 434                        val |= SHA_REG_MODE_HMAC_KEY_PROC;
 435                        omap_sham_write_n(dd, SHA_REG_ODIGEST(0),
 436                                          (u32 *)bctx->ipad,
 437                                          SHA1_BLOCK_SIZE / sizeof(u32));
 438                        ctx->digcnt += SHA1_BLOCK_SIZE;
 439                }
 440        }
 441
 442        if (final) {
 443                val |= SHA_REG_MODE_CLOSE_HASH;
 444
 445                if (ctx->flags & BIT(FLAGS_HMAC))
 446                        val |= SHA_REG_MODE_HMAC_OUTER_HASH;
 447        }
 448
 449        mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
 450               SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
 451               SHA_REG_MODE_HMAC_KEY_PROC;
 452
 453        dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
 454        omap_sham_write_mask(dd, SHA_REG_MODE, val, mask);
 455        omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
 456        omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 457                             SHA_REG_MASK_IT_EN |
 458                                     (dma ? SHA_REG_MASK_DMA_EN : 0),
 459                             SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 460}
 461
 462static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 463{
 464        omap_sham_write(dd, SHA_REG_LENGTH, length);
 465}
 466
 467static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
 468{
 469        return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
 470                              SHA_REG_IRQSTATUS_INPUT_RDY);
 471}
 472
 473static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 474                              size_t length, int final)
 475{
 476        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 477        int count, len32;
 478        const u32 *buffer = (const u32 *)buf;
 479
 480        dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 481                                                ctx->digcnt, length, final);
 482
 483        dd->pdata->write_ctrl(dd, length, final, 0);
 484        dd->pdata->trigger(dd, length);
 485
 486        /* should be non-zero before next lines to disable clocks later */
 487        ctx->digcnt += length;
 488
 489        if (dd->pdata->poll_irq(dd))
 490                return -ETIMEDOUT;
 491
 492        if (final)
 493                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 494
 495        set_bit(FLAGS_CPU, &dd->flags);
 496
 497        len32 = DIV_ROUND_UP(length, sizeof(u32));
 498
 499        for (count = 0; count < len32; count++)
 500                omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]);
 501
 502        return -EINPROGRESS;
 503}
 504
 505static void omap_sham_dma_callback(void *param)
 506{
 507        struct omap_sham_dev *dd = param;
 508
 509        set_bit(FLAGS_DMA_READY, &dd->flags);
 510        tasklet_schedule(&dd->done_task);
 511}
 512
 513static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
 514                              size_t length, int final, int is_sg)
 515{
 516        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 517        struct dma_async_tx_descriptor *tx;
 518        struct dma_slave_config cfg;
 519        int len32, ret;
 520
 521        dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 522                                                ctx->digcnt, length, final);
 523
 524        memset(&cfg, 0, sizeof(cfg));
 525
 526        cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 527        cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 528        cfg.dst_maxburst = DST_MAXBURST;
 529
 530        ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 531        if (ret) {
 532                pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
 533                return ret;
 534        }
 535
 536        len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN;
 537
 538        if (is_sg) {
 539                /*
 540                 * The SG entry passed in may not have the 'length' member
 541                 * set correctly so use a local SG entry (sgl) with the
 542                 * proper value for 'length' instead.  If this is not done,
 543                 * the dmaengine may try to DMA the incorrect amount of data.
 544                 */
 545                sg_init_table(&ctx->sgl, 1);
 546                ctx->sgl.page_link = ctx->sg->page_link;
 547                ctx->sgl.offset = ctx->sg->offset;
 548                sg_dma_len(&ctx->sgl) = len32;
 549                sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
 550
 551                tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1,
 552                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 553        } else {
 554                tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32,
 555                        DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 556        }
 557
 558        if (!tx) {
 559                dev_err(dd->dev, "prep_slave_sg/single() failed\n");
 560                return -EINVAL;
 561        }
 562
 563        tx->callback = omap_sham_dma_callback;
 564        tx->callback_param = dd;
 565
 566        dd->pdata->write_ctrl(dd, length, final, 1);
 567
 568        ctx->digcnt += length;
 569
 570        if (final)
 571                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 572
 573        set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 574
 575        dmaengine_submit(tx);
 576        dma_async_issue_pending(dd->dma_lch);
 577
 578        dd->pdata->trigger(dd, length);
 579
 580        return -EINPROGRESS;
 581}
 582
 583static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
 584                                const u8 *data, size_t length)
 585{
 586        size_t count = min(length, ctx->buflen - ctx->bufcnt);
 587
 588        count = min(count, ctx->total);
 589        if (count <= 0)
 590                return 0;
 591        memcpy(ctx->buffer + ctx->bufcnt, data, count);
 592        ctx->bufcnt += count;
 593
 594        return count;
 595}
 596
 597static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
 598{
 599        size_t count;
 600
 601        while (ctx->sg) {
 602                count = omap_sham_append_buffer(ctx,
 603                                sg_virt(ctx->sg) + ctx->offset,
 604                                ctx->sg->length - ctx->offset);
 605                if (!count)
 606                        break;
 607                ctx->offset += count;
 608                ctx->total -= count;
 609                if (ctx->offset == ctx->sg->length) {
 610                        ctx->sg = sg_next(ctx->sg);
 611                        if (ctx->sg)
 612                                ctx->offset = 0;
 613                        else
 614                                ctx->total = 0;
 615                }
 616        }
 617
 618        return 0;
 619}
 620
 621static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
 622                                        struct omap_sham_reqctx *ctx,
 623                                        size_t length, int final)
 624{
 625        int ret;
 626
 627        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
 628                                       DMA_TO_DEVICE);
 629        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 630                dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
 631                return -EINVAL;
 632        }
 633
 634        ctx->flags &= ~BIT(FLAGS_SG);
 635
 636        ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0);
 637        if (ret != -EINPROGRESS)
 638                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 639                                 DMA_TO_DEVICE);
 640
 641        return ret;
 642}
 643
 644static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 645{
 646        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 647        unsigned int final;
 648        size_t count;
 649
 650        omap_sham_append_sg(ctx);
 651
 652        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 653
 654        dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
 655                                         ctx->bufcnt, ctx->digcnt, final);
 656
 657        if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 658                count = ctx->bufcnt;
 659                ctx->bufcnt = 0;
 660                return omap_sham_xmit_dma_map(dd, ctx, count, final);
 661        }
 662
 663        return 0;
 664}
 665
 666/* Start address alignment */
 667#define SG_AA(sg)       (IS_ALIGNED(sg->offset, sizeof(u32)))
 668/* SHA1 block size alignment */
 669#define SG_SA(sg)       (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
 670
 671static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 672{
 673        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 674        unsigned int length, final, tail;
 675        struct scatterlist *sg;
 676        int ret;
 677
 678        if (!ctx->total)
 679                return 0;
 680
 681        if (ctx->bufcnt || ctx->offset)
 682                return omap_sham_update_dma_slow(dd);
 683
 684        /*
 685         * Don't use the sg interface when the transfer size is less
 686         * than the number of elements in a DMA frame.  Otherwise,
 687         * the dmaengine infrastructure will calculate that it needs
 688         * to transfer 0 frames which ultimately fails.
 689         */
 690        if (ctx->total < (DST_MAXBURST * sizeof(u32)))
 691                return omap_sham_update_dma_slow(dd);
 692
 693        dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
 694                        ctx->digcnt, ctx->bufcnt, ctx->total);
 695
 696        sg = ctx->sg;
 697
 698        if (!SG_AA(sg))
 699                return omap_sham_update_dma_slow(dd);
 700
 701        if (!sg_is_last(sg) && !SG_SA(sg))
 702                /* size is not SHA1_BLOCK_SIZE aligned */
 703                return omap_sham_update_dma_slow(dd);
 704
 705        length = min(ctx->total, sg->length);
 706
 707        if (sg_is_last(sg)) {
 708                if (!(ctx->flags & BIT(FLAGS_FINUP))) {
 709                        /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
 710                        tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
 711                        /* without finup() we need one block to close hash */
 712                        if (!tail)
 713                                tail = SHA1_MD5_BLOCK_SIZE;
 714                        length -= tail;
 715                }
 716        }
 717
 718        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 719                dev_err(dd->dev, "dma_map_sg  error\n");
 720                return -EINVAL;
 721        }
 722
 723        ctx->flags |= BIT(FLAGS_SG);
 724
 725        ctx->total -= length;
 726        ctx->offset = length; /* offset where to start slow */
 727
 728        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 729
 730        ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1);
 731        if (ret != -EINPROGRESS)
 732                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 733
 734        return ret;
 735}
 736
 737static int omap_sham_update_cpu(struct omap_sham_dev *dd)
 738{
 739        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 740        int bufcnt;
 741
 742        omap_sham_append_sg(ctx);
 743        bufcnt = ctx->bufcnt;
 744        ctx->bufcnt = 0;
 745
 746        return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 747}
 748
 749static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 750{
 751        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 752
 753        dmaengine_terminate_all(dd->dma_lch);
 754
 755        if (ctx->flags & BIT(FLAGS_SG)) {
 756                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 757                if (ctx->sg->length == ctx->offset) {
 758                        ctx->sg = sg_next(ctx->sg);
 759                        if (ctx->sg)
 760                                ctx->offset = 0;
 761                }
 762        } else {
 763                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 764                                 DMA_TO_DEVICE);
 765        }
 766
 767        return 0;
 768}
 769
 770static int omap_sham_init(struct ahash_request *req)
 771{
 772        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 773        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 774        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 775        struct omap_sham_dev *dd = NULL, *tmp;
 776
 777        spin_lock_bh(&sham.lock);
 778        if (!tctx->dd) {
 779                list_for_each_entry(tmp, &sham.dev_list, list) {
 780                        dd = tmp;
 781                        break;
 782                }
 783                tctx->dd = dd;
 784        } else {
 785                dd = tctx->dd;
 786        }
 787        spin_unlock_bh(&sham.lock);
 788
 789        ctx->dd = dd;
 790
 791        ctx->flags = 0;
 792
 793        dev_dbg(dd->dev, "init: digest size: %d\n",
 794                crypto_ahash_digestsize(tfm));
 795
 796        switch (crypto_ahash_digestsize(tfm)) {
 797        case MD5_DIGEST_SIZE:
 798                ctx->flags |= FLAGS_MODE_MD5;
 799                break;
 800        case SHA1_DIGEST_SIZE:
 801                ctx->flags |= FLAGS_MODE_SHA1;
 802                break;
 803        case SHA224_DIGEST_SIZE:
 804                ctx->flags |= FLAGS_MODE_SHA224;
 805                break;
 806        case SHA256_DIGEST_SIZE:
 807                ctx->flags |= FLAGS_MODE_SHA256;
 808                break;
 809        }
 810
 811        ctx->bufcnt = 0;
 812        ctx->digcnt = 0;
 813        ctx->buflen = BUFLEN;
 814
 815        if (tctx->flags & BIT(FLAGS_HMAC)) {
 816                if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
 817                        struct omap_sham_hmac_ctx *bctx = tctx->base;
 818
 819                        memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
 820                        ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
 821                }
 822
 823                ctx->flags |= BIT(FLAGS_HMAC);
 824        }
 825
 826        return 0;
 827
 828}
 829
 830static int omap_sham_update_req(struct omap_sham_dev *dd)
 831{
 832        struct ahash_request *req = dd->req;
 833        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 834        int err;
 835
 836        dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 837                 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
 838
 839        if (ctx->flags & BIT(FLAGS_CPU))
 840                err = omap_sham_update_cpu(dd);
 841        else
 842                err = omap_sham_update_dma_start(dd);
 843
 844        /* wait for dma completion before can take more data */
 845        dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
 846
 847        return err;
 848}
 849
 850static int omap_sham_final_req(struct omap_sham_dev *dd)
 851{
 852        struct ahash_request *req = dd->req;
 853        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 854        int err = 0, use_dma = 1;
 855
 856        if (ctx->bufcnt <= DMA_MIN)
 857                /* faster to handle last block with cpu */
 858                use_dma = 0;
 859
 860        if (use_dma)
 861                err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 862        else
 863                err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 864
 865        ctx->bufcnt = 0;
 866
 867        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 868
 869        return err;
 870}
 871
 872static int omap_sham_finish_hmac(struct ahash_request *req)
 873{
 874        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 875        struct omap_sham_hmac_ctx *bctx = tctx->base;
 876        int bs = crypto_shash_blocksize(bctx->shash);
 877        int ds = crypto_shash_digestsize(bctx->shash);
 878        struct {
 879                struct shash_desc shash;
 880                char ctx[crypto_shash_descsize(bctx->shash)];
 881        } desc;
 882
 883        desc.shash.tfm = bctx->shash;
 884        desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 885
 886        return crypto_shash_init(&desc.shash) ?:
 887               crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
 888               crypto_shash_finup(&desc.shash, req->result, ds, req->result);
 889}
 890
 891static int omap_sham_finish(struct ahash_request *req)
 892{
 893        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 894        struct omap_sham_dev *dd = ctx->dd;
 895        int err = 0;
 896
 897        if (ctx->digcnt) {
 898                omap_sham_copy_ready_hash(req);
 899                if ((ctx->flags & BIT(FLAGS_HMAC)) &&
 900                                !test_bit(FLAGS_AUTO_XOR, &dd->flags))
 901                        err = omap_sham_finish_hmac(req);
 902        }
 903
 904        dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 905
 906        return err;
 907}
 908
 909static void omap_sham_finish_req(struct ahash_request *req, int err)
 910{
 911        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 912        struct omap_sham_dev *dd = ctx->dd;
 913
 914        if (!err) {
 915                dd->pdata->copy_hash(req, 1);
 916                if (test_bit(FLAGS_FINAL, &dd->flags))
 917                        err = omap_sham_finish(req);
 918        } else {
 919                ctx->flags |= BIT(FLAGS_ERROR);
 920        }
 921
 922        /* atomic operation is not needed here */
 923        dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
 924                        BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
 925
 926        pm_runtime_put(dd->dev);
 927
 928        if (req->base.complete)
 929                req->base.complete(&req->base, err);
 930
 931        /* handle new request */
 932        tasklet_schedule(&dd->done_task);
 933}
 934
 935static int omap_sham_handle_queue(struct omap_sham_dev *dd,
 936                                  struct ahash_request *req)
 937{
 938        struct crypto_async_request *async_req, *backlog;
 939        struct omap_sham_reqctx *ctx;
 940        unsigned long flags;
 941        int err = 0, ret = 0;
 942
 943        spin_lock_irqsave(&dd->lock, flags);
 944        if (req)
 945                ret = ahash_enqueue_request(&dd->queue, req);
 946        if (test_bit(FLAGS_BUSY, &dd->flags)) {
 947                spin_unlock_irqrestore(&dd->lock, flags);
 948                return ret;
 949        }
 950        backlog = crypto_get_backlog(&dd->queue);
 951        async_req = crypto_dequeue_request(&dd->queue);
 952        if (async_req)
 953                set_bit(FLAGS_BUSY, &dd->flags);
 954        spin_unlock_irqrestore(&dd->lock, flags);
 955
 956        if (!async_req)
 957                return ret;
 958
 959        if (backlog)
 960                backlog->complete(backlog, -EINPROGRESS);
 961
 962        req = ahash_request_cast(async_req);
 963        dd->req = req;
 964        ctx = ahash_request_ctx(req);
 965
 966        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 967                                                ctx->op, req->nbytes);
 968
 969        err = omap_sham_hw_init(dd);
 970        if (err)
 971                goto err1;
 972
 973        if (ctx->digcnt)
 974                /* request has changed - restore hash */
 975                dd->pdata->copy_hash(req, 0);
 976
 977        if (ctx->op == OP_UPDATE) {
 978                err = omap_sham_update_req(dd);
 979                if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
 980                        /* no final() after finup() */
 981                        err = omap_sham_final_req(dd);
 982        } else if (ctx->op == OP_FINAL) {
 983                err = omap_sham_final_req(dd);
 984        }
 985err1:
 986        if (err != -EINPROGRESS)
 987                /* done_task will not finish it, so do it here */
 988                omap_sham_finish_req(req, err);
 989
 990        dev_dbg(dd->dev, "exit, err: %d\n", err);
 991
 992        return ret;
 993}
 994
 995static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 996{
 997        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 998        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 999        struct omap_sham_dev *dd = tctx->dd;
1000
1001        ctx->op = op;
1002
1003        return omap_sham_handle_queue(dd, req);
1004}
1005
1006static int omap_sham_update(struct ahash_request *req)
1007{
1008        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1009
1010        if (!req->nbytes)
1011                return 0;
1012
1013        ctx->total = req->nbytes;
1014        ctx->sg = req->src;
1015        ctx->offset = 0;
1016
1017        if (ctx->flags & BIT(FLAGS_FINUP)) {
1018                if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
1019                        /*
1020                        * OMAP HW accel works only with buffers >= 9
1021                        * will switch to bypass in final()
1022                        * final has the same request and data
1023                        */
1024                        omap_sham_append_sg(ctx);
1025                        return 0;
1026                } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
1027                        /*
1028                        * faster to use CPU for short transfers
1029                        */
1030                        ctx->flags |= BIT(FLAGS_CPU);
1031                }
1032        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
1033                omap_sham_append_sg(ctx);
1034                return 0;
1035        }
1036
1037        return omap_sham_enqueue(req, OP_UPDATE);
1038}
1039
1040static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
1041                                  const u8 *data, unsigned int len, u8 *out)
1042{
1043        struct {
1044                struct shash_desc shash;
1045                char ctx[crypto_shash_descsize(shash)];
1046        } desc;
1047
1048        desc.shash.tfm = shash;
1049        desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1050
1051        return crypto_shash_digest(&desc.shash, data, len, out);
1052}
1053
1054static int omap_sham_final_shash(struct ahash_request *req)
1055{
1056        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1057        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1058
1059        return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1060                                      ctx->buffer, ctx->bufcnt, req->result);
1061}
1062
1063static int omap_sham_final(struct ahash_request *req)
1064{
1065        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1066
1067        ctx->flags |= BIT(FLAGS_FINUP);
1068
1069        if (ctx->flags & BIT(FLAGS_ERROR))
1070                return 0; /* uncompleted hash is not needed */
1071
1072        /* OMAP HW accel works only with buffers >= 9 */
1073        /* HMAC is always >= 9 because ipad == block size */
1074        if ((ctx->digcnt + ctx->bufcnt) < 9)
1075                return omap_sham_final_shash(req);
1076        else if (ctx->bufcnt)
1077                return omap_sham_enqueue(req, OP_FINAL);
1078
1079        /* copy ready hash (+ finalize hmac) */
1080        return omap_sham_finish(req);
1081}
1082
1083static int omap_sham_finup(struct ahash_request *req)
1084{
1085        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1086        int err1, err2;
1087
1088        ctx->flags |= BIT(FLAGS_FINUP);
1089
1090        err1 = omap_sham_update(req);
1091        if (err1 == -EINPROGRESS || err1 == -EBUSY)
1092                return err1;
1093        /*
1094         * final() has to be always called to cleanup resources
1095         * even if udpate() failed, except EINPROGRESS
1096         */
1097        err2 = omap_sham_final(req);
1098
1099        return err1 ?: err2;
1100}
1101
1102static int omap_sham_digest(struct ahash_request *req)
1103{
1104        return omap_sham_init(req) ?: omap_sham_finup(req);
1105}
1106
1107static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1108                      unsigned int keylen)
1109{
1110        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1111        struct omap_sham_hmac_ctx *bctx = tctx->base;
1112        int bs = crypto_shash_blocksize(bctx->shash);
1113        int ds = crypto_shash_digestsize(bctx->shash);
1114        struct omap_sham_dev *dd = NULL, *tmp;
1115        int err, i;
1116
1117        spin_lock_bh(&sham.lock);
1118        if (!tctx->dd) {
1119                list_for_each_entry(tmp, &sham.dev_list, list) {
1120                        dd = tmp;
1121                        break;
1122                }
1123                tctx->dd = dd;
1124        } else {
1125                dd = tctx->dd;
1126        }
1127        spin_unlock_bh(&sham.lock);
1128
1129        err = crypto_shash_setkey(tctx->fallback, key, keylen);
1130        if (err)
1131                return err;
1132
1133        if (keylen > bs) {
1134                err = omap_sham_shash_digest(bctx->shash,
1135                                crypto_shash_get_flags(bctx->shash),
1136                                key, keylen, bctx->ipad);
1137                if (err)
1138                        return err;
1139                keylen = ds;
1140        } else {
1141                memcpy(bctx->ipad, key, keylen);
1142        }
1143
1144        memset(bctx->ipad + keylen, 0, bs - keylen);
1145
1146        if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1147                memcpy(bctx->opad, bctx->ipad, bs);
1148
1149                for (i = 0; i < bs; i++) {
1150                        bctx->ipad[i] ^= 0x36;
1151                        bctx->opad[i] ^= 0x5c;
1152                }
1153        }
1154
1155        return err;
1156}
1157
1158static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1159{
1160        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1161        const char *alg_name = crypto_tfm_alg_name(tfm);
1162
1163        /* Allocate a fallback and abort if it failed. */
1164        tctx->fallback = crypto_alloc_shash(alg_name, 0,
1165                                            CRYPTO_ALG_NEED_FALLBACK);
1166        if (IS_ERR(tctx->fallback)) {
1167                pr_err("omap-sham: fallback driver '%s' "
1168                                "could not be loaded.\n", alg_name);
1169                return PTR_ERR(tctx->fallback);
1170        }
1171
1172        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1173                                 sizeof(struct omap_sham_reqctx) + BUFLEN);
1174
1175        if (alg_base) {
1176                struct omap_sham_hmac_ctx *bctx = tctx->base;
1177                tctx->flags |= BIT(FLAGS_HMAC);
1178                bctx->shash = crypto_alloc_shash(alg_base, 0,
1179                                                CRYPTO_ALG_NEED_FALLBACK);
1180                if (IS_ERR(bctx->shash)) {
1181                        pr_err("omap-sham: base driver '%s' "
1182                                        "could not be loaded.\n", alg_base);
1183                        crypto_free_shash(tctx->fallback);
1184                        return PTR_ERR(bctx->shash);
1185                }
1186
1187        }
1188
1189        return 0;
1190}
1191
1192static int omap_sham_cra_init(struct crypto_tfm *tfm)
1193{
1194        return omap_sham_cra_init_alg(tfm, NULL);
1195}
1196
1197static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1198{
1199        return omap_sham_cra_init_alg(tfm, "sha1");
1200}
1201
1202static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1203{
1204        return omap_sham_cra_init_alg(tfm, "sha224");
1205}
1206
1207static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1208{
1209        return omap_sham_cra_init_alg(tfm, "sha256");
1210}
1211
1212static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1213{
1214        return omap_sham_cra_init_alg(tfm, "md5");
1215}
1216
1217static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1218{
1219        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1220
1221        crypto_free_shash(tctx->fallback);
1222        tctx->fallback = NULL;
1223
1224        if (tctx->flags & BIT(FLAGS_HMAC)) {
1225                struct omap_sham_hmac_ctx *bctx = tctx->base;
1226                crypto_free_shash(bctx->shash);
1227        }
1228}
1229
1230static struct ahash_alg algs_sha1_md5[] = {
1231{
1232        .init           = omap_sham_init,
1233        .update         = omap_sham_update,
1234        .final          = omap_sham_final,
1235        .finup          = omap_sham_finup,
1236        .digest         = omap_sham_digest,
1237        .halg.digestsize        = SHA1_DIGEST_SIZE,
1238        .halg.base      = {
1239                .cra_name               = "sha1",
1240                .cra_driver_name        = "omap-sha1",
1241                .cra_priority           = 100,
1242                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1243                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1244                                                CRYPTO_ALG_ASYNC |
1245                                                CRYPTO_ALG_NEED_FALLBACK,
1246                .cra_blocksize          = SHA1_BLOCK_SIZE,
1247                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1248                .cra_alignmask          = 0,
1249                .cra_module             = THIS_MODULE,
1250                .cra_init               = omap_sham_cra_init,
1251                .cra_exit               = omap_sham_cra_exit,
1252        }
1253},
1254{
1255        .init           = omap_sham_init,
1256        .update         = omap_sham_update,
1257        .final          = omap_sham_final,
1258        .finup          = omap_sham_finup,
1259        .digest         = omap_sham_digest,
1260        .halg.digestsize        = MD5_DIGEST_SIZE,
1261        .halg.base      = {
1262                .cra_name               = "md5",
1263                .cra_driver_name        = "omap-md5",
1264                .cra_priority           = 100,
1265                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1266                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1267                                                CRYPTO_ALG_ASYNC |
1268                                                CRYPTO_ALG_NEED_FALLBACK,
1269                .cra_blocksize          = SHA1_BLOCK_SIZE,
1270                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1271                .cra_alignmask          = OMAP_ALIGN_MASK,
1272                .cra_module             = THIS_MODULE,
1273                .cra_init               = omap_sham_cra_init,
1274                .cra_exit               = omap_sham_cra_exit,
1275        }
1276},
1277{
1278        .init           = omap_sham_init,
1279        .update         = omap_sham_update,
1280        .final          = omap_sham_final,
1281        .finup          = omap_sham_finup,
1282        .digest         = omap_sham_digest,
1283        .setkey         = omap_sham_setkey,
1284        .halg.digestsize        = SHA1_DIGEST_SIZE,
1285        .halg.base      = {
1286                .cra_name               = "hmac(sha1)",
1287                .cra_driver_name        = "omap-hmac-sha1",
1288                .cra_priority           = 100,
1289                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1290                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1291                                                CRYPTO_ALG_ASYNC |
1292                                                CRYPTO_ALG_NEED_FALLBACK,
1293                .cra_blocksize          = SHA1_BLOCK_SIZE,
1294                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1295                                        sizeof(struct omap_sham_hmac_ctx),
1296                .cra_alignmask          = OMAP_ALIGN_MASK,
1297                .cra_module             = THIS_MODULE,
1298                .cra_init               = omap_sham_cra_sha1_init,
1299                .cra_exit               = omap_sham_cra_exit,
1300        }
1301},
1302{
1303        .init           = omap_sham_init,
1304        .update         = omap_sham_update,
1305        .final          = omap_sham_final,
1306        .finup          = omap_sham_finup,
1307        .digest         = omap_sham_digest,
1308        .setkey         = omap_sham_setkey,
1309        .halg.digestsize        = MD5_DIGEST_SIZE,
1310        .halg.base      = {
1311                .cra_name               = "hmac(md5)",
1312                .cra_driver_name        = "omap-hmac-md5",
1313                .cra_priority           = 100,
1314                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1315                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1316                                                CRYPTO_ALG_ASYNC |
1317                                                CRYPTO_ALG_NEED_FALLBACK,
1318                .cra_blocksize          = SHA1_BLOCK_SIZE,
1319                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1320                                        sizeof(struct omap_sham_hmac_ctx),
1321                .cra_alignmask          = OMAP_ALIGN_MASK,
1322                .cra_module             = THIS_MODULE,
1323                .cra_init               = omap_sham_cra_md5_init,
1324                .cra_exit               = omap_sham_cra_exit,
1325        }
1326}
1327};
1328
1329/* OMAP4 has some algs in addition to what OMAP2 has */
1330static struct ahash_alg algs_sha224_sha256[] = {
1331{
1332        .init           = omap_sham_init,
1333        .update         = omap_sham_update,
1334        .final          = omap_sham_final,
1335        .finup          = omap_sham_finup,
1336        .digest         = omap_sham_digest,
1337        .halg.digestsize        = SHA224_DIGEST_SIZE,
1338        .halg.base      = {
1339                .cra_name               = "sha224",
1340                .cra_driver_name        = "omap-sha224",
1341                .cra_priority           = 100,
1342                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1343                                                CRYPTO_ALG_ASYNC |
1344                                                CRYPTO_ALG_NEED_FALLBACK,
1345                .cra_blocksize          = SHA224_BLOCK_SIZE,
1346                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1347                .cra_alignmask          = 0,
1348                .cra_module             = THIS_MODULE,
1349                .cra_init               = omap_sham_cra_init,
1350                .cra_exit               = omap_sham_cra_exit,
1351        }
1352},
1353{
1354        .init           = omap_sham_init,
1355        .update         = omap_sham_update,
1356        .final          = omap_sham_final,
1357        .finup          = omap_sham_finup,
1358        .digest         = omap_sham_digest,
1359        .halg.digestsize        = SHA256_DIGEST_SIZE,
1360        .halg.base      = {
1361                .cra_name               = "sha256",
1362                .cra_driver_name        = "omap-sha256",
1363                .cra_priority           = 100,
1364                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1365                                                CRYPTO_ALG_ASYNC |
1366                                                CRYPTO_ALG_NEED_FALLBACK,
1367                .cra_blocksize          = SHA256_BLOCK_SIZE,
1368                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
1369                .cra_alignmask          = 0,
1370                .cra_module             = THIS_MODULE,
1371                .cra_init               = omap_sham_cra_init,
1372                .cra_exit               = omap_sham_cra_exit,
1373        }
1374},
1375{
1376        .init           = omap_sham_init,
1377        .update         = omap_sham_update,
1378        .final          = omap_sham_final,
1379        .finup          = omap_sham_finup,
1380        .digest         = omap_sham_digest,
1381        .setkey         = omap_sham_setkey,
1382        .halg.digestsize        = SHA224_DIGEST_SIZE,
1383        .halg.base      = {
1384                .cra_name               = "hmac(sha224)",
1385                .cra_driver_name        = "omap-hmac-sha224",
1386                .cra_priority           = 100,
1387                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1388                                                CRYPTO_ALG_ASYNC |
1389                                                CRYPTO_ALG_NEED_FALLBACK,
1390                .cra_blocksize          = SHA224_BLOCK_SIZE,
1391                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1392                                        sizeof(struct omap_sham_hmac_ctx),
1393                .cra_alignmask          = OMAP_ALIGN_MASK,
1394                .cra_module             = THIS_MODULE,
1395                .cra_init               = omap_sham_cra_sha224_init,
1396                .cra_exit               = omap_sham_cra_exit,
1397        }
1398},
1399{
1400        .init           = omap_sham_init,
1401        .update         = omap_sham_update,
1402        .final          = omap_sham_final,
1403        .finup          = omap_sham_finup,
1404        .digest         = omap_sham_digest,
1405        .setkey         = omap_sham_setkey,
1406        .halg.digestsize        = SHA256_DIGEST_SIZE,
1407        .halg.base      = {
1408                .cra_name               = "hmac(sha256)",
1409                .cra_driver_name        = "omap-hmac-sha256",
1410                .cra_priority           = 100,
1411                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1412                                                CRYPTO_ALG_ASYNC |
1413                                                CRYPTO_ALG_NEED_FALLBACK,
1414                .cra_blocksize          = SHA256_BLOCK_SIZE,
1415                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1416                                        sizeof(struct omap_sham_hmac_ctx),
1417                .cra_alignmask          = OMAP_ALIGN_MASK,
1418                .cra_module             = THIS_MODULE,
1419                .cra_init               = omap_sham_cra_sha256_init,
1420                .cra_exit               = omap_sham_cra_exit,
1421        }
1422},
1423};
1424
1425static void omap_sham_done_task(unsigned long data)
1426{
1427        struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1428        int err = 0;
1429
1430        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1431                omap_sham_handle_queue(dd, NULL);
1432                return;
1433        }
1434
1435        if (test_bit(FLAGS_CPU, &dd->flags)) {
1436                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1437                        goto finish;
1438        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1439                if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1440                        omap_sham_update_dma_stop(dd);
1441                        if (dd->err) {
1442                                err = dd->err;
1443                                goto finish;
1444                        }
1445                }
1446                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1447                        /* hash or semi-hash ready */
1448                        clear_bit(FLAGS_DMA_READY, &dd->flags);
1449                        err = omap_sham_update_dma_start(dd);
1450                        if (err != -EINPROGRESS)
1451                                goto finish;
1452                }
1453        }
1454
1455        return;
1456
1457finish:
1458        dev_dbg(dd->dev, "update done: err: %d\n", err);
1459        /* finish curent request */
1460        omap_sham_finish_req(dd->req, err);
1461}
1462
1463static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1464{
1465        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1466                dev_warn(dd->dev, "Interrupt when no active requests.\n");
1467        } else {
1468                set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1469                tasklet_schedule(&dd->done_task);
1470        }
1471
1472        return IRQ_HANDLED;
1473}
1474
1475static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1476{
1477        struct omap_sham_dev *dd = dev_id;
1478
1479        if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1480                /* final -> allow device to go to power-saving mode */
1481                omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1482
1483        omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1484                                 SHA_REG_CTRL_OUTPUT_READY);
1485        omap_sham_read(dd, SHA_REG_CTRL);
1486
1487        return omap_sham_irq_common(dd);
1488}
1489
1490static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1491{
1492        struct omap_sham_dev *dd = dev_id;
1493
1494        omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1495
1496        return omap_sham_irq_common(dd);
1497}
1498
1499static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1500        {
1501                .algs_list      = algs_sha1_md5,
1502                .size           = ARRAY_SIZE(algs_sha1_md5),
1503        },
1504};
1505
1506static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1507        .algs_info      = omap_sham_algs_info_omap2,
1508        .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1509        .flags          = BIT(FLAGS_BE32_SHA1),
1510        .digest_size    = SHA1_DIGEST_SIZE,
1511        .copy_hash      = omap_sham_copy_hash_omap2,
1512        .write_ctrl     = omap_sham_write_ctrl_omap2,
1513        .trigger        = omap_sham_trigger_omap2,
1514        .poll_irq       = omap_sham_poll_irq_omap2,
1515        .intr_hdlr      = omap_sham_irq_omap2,
1516        .idigest_ofs    = 0x00,
1517        .din_ofs        = 0x1c,
1518        .digcnt_ofs     = 0x14,
1519        .rev_ofs        = 0x5c,
1520        .mask_ofs       = 0x60,
1521        .sysstatus_ofs  = 0x64,
1522        .major_mask     = 0xf0,
1523        .major_shift    = 4,
1524        .minor_mask     = 0x0f,
1525        .minor_shift    = 0,
1526};
1527
1528#ifdef CONFIG_OF
1529static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1530        {
1531                .algs_list      = algs_sha1_md5,
1532                .size           = ARRAY_SIZE(algs_sha1_md5),
1533        },
1534        {
1535                .algs_list      = algs_sha224_sha256,
1536                .size           = ARRAY_SIZE(algs_sha224_sha256),
1537        },
1538};
1539
1540static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1541        .algs_info      = omap_sham_algs_info_omap4,
1542        .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1543        .flags          = BIT(FLAGS_AUTO_XOR),
1544        .digest_size    = SHA256_DIGEST_SIZE,
1545        .copy_hash      = omap_sham_copy_hash_omap4,
1546        .write_ctrl     = omap_sham_write_ctrl_omap4,
1547        .trigger        = omap_sham_trigger_omap4,
1548        .poll_irq       = omap_sham_poll_irq_omap4,
1549        .intr_hdlr      = omap_sham_irq_omap4,
1550        .idigest_ofs    = 0x020,
1551        .din_ofs        = 0x080,
1552        .digcnt_ofs     = 0x040,
1553        .rev_ofs        = 0x100,
1554        .mask_ofs       = 0x110,
1555        .sysstatus_ofs  = 0x114,
1556        .major_mask     = 0x0700,
1557        .major_shift    = 8,
1558        .minor_mask     = 0x003f,
1559        .minor_shift    = 0,
1560};
1561
1562static const struct of_device_id omap_sham_of_match[] = {
1563        {
1564                .compatible     = "ti,omap2-sham",
1565                .data           = &omap_sham_pdata_omap2,
1566        },
1567        {
1568                .compatible     = "ti,omap4-sham",
1569                .data           = &omap_sham_pdata_omap4,
1570        },
1571        {},
1572};
1573MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1574
1575static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1576                struct device *dev, struct resource *res)
1577{
1578        struct device_node *node = dev->of_node;
1579        const struct of_device_id *match;
1580        int err = 0;
1581
1582        match = of_match_device(of_match_ptr(omap_sham_of_match), dev);
1583        if (!match) {
1584                dev_err(dev, "no compatible OF match\n");
1585                err = -EINVAL;
1586                goto err;
1587        }
1588
1589        err = of_address_to_resource(node, 0, res);
1590        if (err < 0) {
1591                dev_err(dev, "can't translate OF node address\n");
1592                err = -EINVAL;
1593                goto err;
1594        }
1595
1596        dd->irq = of_irq_to_resource(node, 0, NULL);
1597        if (!dd->irq) {
1598                dev_err(dev, "can't translate OF irq value\n");
1599                err = -EINVAL;
1600                goto err;
1601        }
1602
1603        dd->dma = -1; /* Dummy value that's unused */
1604        dd->pdata = match->data;
1605
1606err:
1607        return err;
1608}
1609#else
1610static const struct of_device_id omap_sham_of_match[] = {
1611        {},
1612};
1613
1614static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1615                struct device *dev, struct resource *res)
1616{
1617        return -EINVAL;
1618}
1619#endif
1620
1621static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1622                struct platform_device *pdev, struct resource *res)
1623{
1624        struct device *dev = &pdev->dev;
1625        struct resource *r;
1626        int err = 0;
1627
1628        /* Get the base address */
1629        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1630        if (!r) {
1631                dev_err(dev, "no MEM resource info\n");
1632                err = -ENODEV;
1633                goto err;
1634        }
1635        memcpy(res, r, sizeof(*res));
1636
1637        /* Get the IRQ */
1638        dd->irq = platform_get_irq(pdev, 0);
1639        if (dd->irq < 0) {
1640                dev_err(dev, "no IRQ resource info\n");
1641                err = dd->irq;
1642                goto err;
1643        }
1644
1645        /* Get the DMA */
1646        r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1647        if (!r) {
1648                dev_err(dev, "no DMA resource info\n");
1649                err = -ENODEV;
1650                goto err;
1651        }
1652        dd->dma = r->start;
1653
1654        /* Only OMAP2/3 can be non-DT */
1655        dd->pdata = &omap_sham_pdata_omap2;
1656
1657err:
1658        return err;
1659}
1660
1661static int omap_sham_probe(struct platform_device *pdev)
1662{
1663        struct omap_sham_dev *dd;
1664        struct device *dev = &pdev->dev;
1665        struct resource res;
1666        dma_cap_mask_t mask;
1667        int err, i, j;
1668        u32 rev;
1669
1670        dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1671        if (dd == NULL) {
1672                dev_err(dev, "unable to alloc data struct.\n");
1673                err = -ENOMEM;
1674                goto data_err;
1675        }
1676        dd->dev = dev;
1677        platform_set_drvdata(pdev, dd);
1678
1679        INIT_LIST_HEAD(&dd->list);
1680        spin_lock_init(&dd->lock);
1681        tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1682        crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1683
1684        err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
1685                               omap_sham_get_res_pdev(dd, pdev, &res);
1686        if (err)
1687                goto res_err;
1688
1689        dd->io_base = devm_request_and_ioremap(dev, &res);
1690        if (!dd->io_base) {
1691                dev_err(dev, "can't ioremap\n");
1692                err = -ENOMEM;
1693                goto res_err;
1694        }
1695        dd->phys_base = res.start;
1696
1697        err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW,
1698                          dev_name(dev), dd);
1699        if (err) {
1700                dev_err(dev, "unable to request irq.\n");
1701                goto res_err;
1702        }
1703
1704        dma_cap_zero(mask);
1705        dma_cap_set(DMA_SLAVE, mask);
1706
1707        dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
1708                                                       &dd->dma, dev, "rx");
1709        if (!dd->dma_lch) {
1710                dev_err(dev, "unable to obtain RX DMA engine channel %u\n",
1711                        dd->dma);
1712                err = -ENXIO;
1713                goto dma_err;
1714        }
1715
1716        dd->flags |= dd->pdata->flags;
1717
1718        pm_runtime_enable(dev);
1719        pm_runtime_get_sync(dev);
1720        rev = omap_sham_read(dd, SHA_REG_REV(dd));
1721        pm_runtime_put_sync(&pdev->dev);
1722
1723        dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1724                (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
1725                (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1726
1727        spin_lock(&sham.lock);
1728        list_add_tail(&dd->list, &sham.dev_list);
1729        spin_unlock(&sham.lock);
1730
1731        for (i = 0; i < dd->pdata->algs_info_size; i++) {
1732                for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1733                        err = crypto_register_ahash(
1734                                        &dd->pdata->algs_info[i].algs_list[j]);
1735                        if (err)
1736                                goto err_algs;
1737
1738                        dd->pdata->algs_info[i].registered++;
1739                }
1740        }
1741
1742        return 0;
1743
1744err_algs:
1745        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1746                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1747                        crypto_unregister_ahash(
1748                                        &dd->pdata->algs_info[i].algs_list[j]);
1749        pm_runtime_disable(dev);
1750        dma_release_channel(dd->dma_lch);
1751dma_err:
1752        free_irq(dd->irq, dd);
1753res_err:
1754        kfree(dd);
1755        dd = NULL;
1756data_err:
1757        dev_err(dev, "initialization failed.\n");
1758
1759        return err;
1760}
1761
1762static int omap_sham_remove(struct platform_device *pdev)
1763{
1764        static struct omap_sham_dev *dd;
1765        int i, j;
1766
1767        dd = platform_get_drvdata(pdev);
1768        if (!dd)
1769                return -ENODEV;
1770        spin_lock(&sham.lock);
1771        list_del(&dd->list);
1772        spin_unlock(&sham.lock);
1773        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1774                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1775                        crypto_unregister_ahash(
1776                                        &dd->pdata->algs_info[i].algs_list[j]);
1777        tasklet_kill(&dd->done_task);
1778        pm_runtime_disable(&pdev->dev);
1779        dma_release_channel(dd->dma_lch);
1780        free_irq(dd->irq, dd);
1781        kfree(dd);
1782        dd = NULL;
1783
1784        return 0;
1785}
1786
1787#ifdef CONFIG_PM_SLEEP
1788static int omap_sham_suspend(struct device *dev)
1789{
1790        pm_runtime_put_sync(dev);
1791        return 0;
1792}
1793
1794static int omap_sham_resume(struct device *dev)
1795{
1796        pm_runtime_get_sync(dev);
1797        return 0;
1798}
1799#endif
1800
1801static const struct dev_pm_ops omap_sham_pm_ops = {
1802        SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume)
1803};
1804
1805static struct platform_driver omap_sham_driver = {
1806        .probe  = omap_sham_probe,
1807        .remove = omap_sham_remove,
1808        .driver = {
1809                .name   = "omap-sham",
1810                .owner  = THIS_MODULE,
1811                .pm     = &omap_sham_pm_ops,
1812                .of_match_table = omap_sham_of_match,
1813        },
1814};
1815
1816module_platform_driver(omap_sham_driver);
1817
1818MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1819MODULE_LICENSE("GPL v2");
1820MODULE_AUTHOR("Dmitry Kasatkin");
1821
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.