linux/drivers/crypto/omap-sham.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for OMAP SHA1/MD5 HW acceleration.
   5 *
   6 * Copyright (c) 2010 Nokia Corporation
   7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from old omap-sha1-md5.c driver.
  14 */
  15
  16#define pr_fmt(fmt) "%s: " fmt, __func__
  17
  18#include <linux/err.h>
  19#include <linux/device.h>
  20#include <linux/module.h>
  21#include <linux/init.h>
  22#include <linux/errno.h>
  23#include <linux/interrupt.h>
  24#include <linux/kernel.h>
  25#include <linux/clk.h>
  26#include <linux/irq.h>
  27#include <linux/io.h>
  28#include <linux/platform_device.h>
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/delay.h>
  32#include <linux/crypto.h>
  33#include <linux/cryptohash.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/sha.h>
  37#include <crypto/hash.h>
  38#include <crypto/internal/hash.h>
  39
  40#include <linux/omap-dma.h>
  41#include <mach/irqs.h>
  42
  43#define SHA_REG_DIGEST(x)               (0x00 + ((x) * 0x04))
  44#define SHA_REG_DIN(x)                  (0x1C + ((x) * 0x04))
  45
  46#define SHA1_MD5_BLOCK_SIZE             SHA1_BLOCK_SIZE
  47#define MD5_DIGEST_SIZE                 16
  48
  49#define SHA_REG_DIGCNT                  0x14
  50
  51#define SHA_REG_CTRL                    0x18
  52#define SHA_REG_CTRL_LENGTH             (0xFFFFFFFF << 5)
  53#define SHA_REG_CTRL_CLOSE_HASH         (1 << 4)
  54#define SHA_REG_CTRL_ALGO_CONST         (1 << 3)
  55#define SHA_REG_CTRL_ALGO               (1 << 2)
  56#define SHA_REG_CTRL_INPUT_READY        (1 << 1)
  57#define SHA_REG_CTRL_OUTPUT_READY       (1 << 0)
  58
  59#define SHA_REG_REV                     0x5C
  60#define SHA_REG_REV_MAJOR               0xF0
  61#define SHA_REG_REV_MINOR               0x0F
  62
  63#define SHA_REG_MASK                    0x60
  64#define SHA_REG_MASK_DMA_EN             (1 << 3)
  65#define SHA_REG_MASK_IT_EN              (1 << 2)
  66#define SHA_REG_MASK_SOFTRESET          (1 << 1)
  67#define SHA_REG_AUTOIDLE                (1 << 0)
  68
  69#define SHA_REG_SYSSTATUS               0x64
  70#define SHA_REG_SYSSTATUS_RESETDONE     (1 << 0)
  71
  72#define DEFAULT_TIMEOUT_INTERVAL        HZ
  73
  74/* mostly device flags */
  75#define FLAGS_BUSY              0
  76#define FLAGS_FINAL             1
  77#define FLAGS_DMA_ACTIVE        2
  78#define FLAGS_OUTPUT_READY      3
  79#define FLAGS_INIT              4
  80#define FLAGS_CPU               5
  81#define FLAGS_DMA_READY         6
  82/* context flags */
  83#define FLAGS_FINUP             16
  84#define FLAGS_SG                17
  85#define FLAGS_SHA1              18
  86#define FLAGS_HMAC              19
  87#define FLAGS_ERROR             20
  88
  89#define OP_UPDATE       1
  90#define OP_FINAL        2
  91
  92#define OMAP_ALIGN_MASK         (sizeof(u32)-1)
  93#define OMAP_ALIGNED            __attribute__((aligned(sizeof(u32))))
  94
  95#define BUFLEN          PAGE_SIZE
  96
  97struct omap_sham_dev;
  98
  99struct omap_sham_reqctx {
 100        struct omap_sham_dev    *dd;
 101        unsigned long           flags;
 102        unsigned long           op;
 103
 104        u8                      digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
 105        size_t                  digcnt;
 106        size_t                  bufcnt;
 107        size_t                  buflen;
 108        dma_addr_t              dma_addr;
 109
 110        /* walk state */
 111        struct scatterlist      *sg;
 112        unsigned int            offset; /* offset in current sg */
 113        unsigned int            total;  /* total request */
 114
 115        u8                      buffer[0] OMAP_ALIGNED;
 116};
 117
 118struct omap_sham_hmac_ctx {
 119        struct crypto_shash     *shash;
 120        u8                      ipad[SHA1_MD5_BLOCK_SIZE];
 121        u8                      opad[SHA1_MD5_BLOCK_SIZE];
 122};
 123
 124struct omap_sham_ctx {
 125        struct omap_sham_dev    *dd;
 126
 127        unsigned long           flags;
 128
 129        /* fallback stuff */
 130        struct crypto_shash     *fallback;
 131
 132        struct omap_sham_hmac_ctx base[0];
 133};
 134
 135#define OMAP_SHAM_QUEUE_LENGTH  1
 136
 137struct omap_sham_dev {
 138        struct list_head        list;
 139        unsigned long           phys_base;
 140        struct device           *dev;
 141        void __iomem            *io_base;
 142        int                     irq;
 143        struct clk              *iclk;
 144        spinlock_t              lock;
 145        int                     err;
 146        int                     dma;
 147        int                     dma_lch;
 148        struct tasklet_struct   done_task;
 149
 150        unsigned long           flags;
 151        struct crypto_queue     queue;
 152        struct ahash_request    *req;
 153};
 154
 155struct omap_sham_drv {
 156        struct list_head        dev_list;
 157        spinlock_t              lock;
 158        unsigned long           flags;
 159};
 160
 161static struct omap_sham_drv sham = {
 162        .dev_list = LIST_HEAD_INIT(sham.dev_list),
 163        .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 164};
 165
 166static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 167{
 168        return __raw_readl(dd->io_base + offset);
 169}
 170
 171static inline void omap_sham_write(struct omap_sham_dev *dd,
 172                                        u32 offset, u32 value)
 173{
 174        __raw_writel(value, dd->io_base + offset);
 175}
 176
 177static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 178                                        u32 value, u32 mask)
 179{
 180        u32 val;
 181
 182        val = omap_sham_read(dd, address);
 183        val &= ~mask;
 184        val |= value;
 185        omap_sham_write(dd, address, val);
 186}
 187
 188static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 189{
 190        unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 191
 192        while (!(omap_sham_read(dd, offset) & bit)) {
 193                if (time_is_before_jiffies(timeout))
 194                        return -ETIMEDOUT;
 195        }
 196
 197        return 0;
 198}
 199
 200static void omap_sham_copy_hash(struct ahash_request *req, int out)
 201{
 202        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 203        u32 *hash = (u32 *)ctx->digest;
 204        int i;
 205
 206        /* MD5 is almost unused. So copy sha1 size to reduce code */
 207        for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
 208                if (out)
 209                        hash[i] = omap_sham_read(ctx->dd,
 210                                                SHA_REG_DIGEST(i));
 211                else
 212                        omap_sham_write(ctx->dd,
 213                                        SHA_REG_DIGEST(i), hash[i]);
 214        }
 215}
 216
 217static void omap_sham_copy_ready_hash(struct ahash_request *req)
 218{
 219        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 220        u32 *in = (u32 *)ctx->digest;
 221        u32 *hash = (u32 *)req->result;
 222        int i;
 223
 224        if (!hash)
 225                return;
 226
 227        if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
 228                /* SHA1 results are in big endian */
 229                for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
 230                        hash[i] = be32_to_cpu(in[i]);
 231        } else {
 232                /* MD5 results are in little endian */
 233                for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
 234                        hash[i] = le32_to_cpu(in[i]);
 235        }
 236}
 237
 238static int omap_sham_hw_init(struct omap_sham_dev *dd)
 239{
 240        clk_enable(dd->iclk);
 241
 242        if (!test_bit(FLAGS_INIT, &dd->flags)) {
 243                omap_sham_write_mask(dd, SHA_REG_MASK,
 244                        SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
 245
 246                if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
 247                                        SHA_REG_SYSSTATUS_RESETDONE))
 248                        return -ETIMEDOUT;
 249
 250                set_bit(FLAGS_INIT, &dd->flags);
 251                dd->err = 0;
 252        }
 253
 254        return 0;
 255}
 256
 257static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
 258                                 int final, int dma)
 259{
 260        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 261        u32 val = length << 5, mask;
 262
 263        if (likely(ctx->digcnt))
 264                omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
 265
 266        omap_sham_write_mask(dd, SHA_REG_MASK,
 267                SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 268                SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 269        /*
 270         * Setting ALGO_CONST only for the first iteration
 271         * and CLOSE_HASH only for the last one.
 272         */
 273        if (ctx->flags & BIT(FLAGS_SHA1))
 274                val |= SHA_REG_CTRL_ALGO;
 275        if (!ctx->digcnt)
 276                val |= SHA_REG_CTRL_ALGO_CONST;
 277        if (final)
 278                val |= SHA_REG_CTRL_CLOSE_HASH;
 279
 280        mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 281                        SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 282
 283        omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 284}
 285
 286static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 287                              size_t length, int final)
 288{
 289        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 290        int count, len32;
 291        const u32 *buffer = (const u32 *)buf;
 292
 293        dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 294                                                ctx->digcnt, length, final);
 295
 296        omap_sham_write_ctrl(dd, length, final, 0);
 297
 298        /* should be non-zero before next lines to disable clocks later */
 299        ctx->digcnt += length;
 300
 301        if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
 302                return -ETIMEDOUT;
 303
 304        if (final)
 305                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 306
 307        set_bit(FLAGS_CPU, &dd->flags);
 308
 309        len32 = DIV_ROUND_UP(length, sizeof(u32));
 310
 311        for (count = 0; count < len32; count++)
 312                omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
 313
 314        return -EINPROGRESS;
 315}
 316
 317static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
 318                              size_t length, int final)
 319{
 320        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 321        int len32;
 322
 323        dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 324                                                ctx->digcnt, length, final);
 325
 326        len32 = DIV_ROUND_UP(length, sizeof(u32));
 327
 328        omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
 329                        1, OMAP_DMA_SYNC_PACKET, dd->dma,
 330                                OMAP_DMA_DST_SYNC_PREFETCH);
 331
 332        omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
 333                                dma_addr, 0, 0);
 334
 335        omap_sham_write_ctrl(dd, length, final, 1);
 336
 337        ctx->digcnt += length;
 338
 339        if (final)
 340                set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 341
 342        set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 343
 344        omap_start_dma(dd->dma_lch);
 345
 346        return -EINPROGRESS;
 347}
 348
 349static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
 350                                const u8 *data, size_t length)
 351{
 352        size_t count = min(length, ctx->buflen - ctx->bufcnt);
 353
 354        count = min(count, ctx->total);
 355        if (count <= 0)
 356                return 0;
 357        memcpy(ctx->buffer + ctx->bufcnt, data, count);
 358        ctx->bufcnt += count;
 359
 360        return count;
 361}
 362
 363static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
 364{
 365        size_t count;
 366
 367        while (ctx->sg) {
 368                count = omap_sham_append_buffer(ctx,
 369                                sg_virt(ctx->sg) + ctx->offset,
 370                                ctx->sg->length - ctx->offset);
 371                if (!count)
 372                        break;
 373                ctx->offset += count;
 374                ctx->total -= count;
 375                if (ctx->offset == ctx->sg->length) {
 376                        ctx->sg = sg_next(ctx->sg);
 377                        if (ctx->sg)
 378                                ctx->offset = 0;
 379                        else
 380                                ctx->total = 0;
 381                }
 382        }
 383
 384        return 0;
 385}
 386
 387static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
 388                                        struct omap_sham_reqctx *ctx,
 389                                        size_t length, int final)
 390{
 391        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
 392                                       DMA_TO_DEVICE);
 393        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 394                dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
 395                return -EINVAL;
 396        }
 397
 398        ctx->flags &= ~BIT(FLAGS_SG);
 399
 400        /* next call does not fail... so no unmap in the case of error */
 401        return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
 402}
 403
 404static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 405{
 406        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 407        unsigned int final;
 408        size_t count;
 409
 410        omap_sham_append_sg(ctx);
 411
 412        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 413
 414        dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
 415                                         ctx->bufcnt, ctx->digcnt, final);
 416
 417        if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 418                count = ctx->bufcnt;
 419                ctx->bufcnt = 0;
 420                return omap_sham_xmit_dma_map(dd, ctx, count, final);
 421        }
 422
 423        return 0;
 424}
 425
 426/* Start address alignment */
 427#define SG_AA(sg)       (IS_ALIGNED(sg->offset, sizeof(u32)))
 428/* SHA1 block size alignment */
 429#define SG_SA(sg)       (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
 430
 431static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 432{
 433        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 434        unsigned int length, final, tail;
 435        struct scatterlist *sg;
 436
 437        if (!ctx->total)
 438                return 0;
 439
 440        if (ctx->bufcnt || ctx->offset)
 441                return omap_sham_update_dma_slow(dd);
 442
 443        dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
 444                        ctx->digcnt, ctx->bufcnt, ctx->total);
 445
 446        sg = ctx->sg;
 447
 448        if (!SG_AA(sg))
 449                return omap_sham_update_dma_slow(dd);
 450
 451        if (!sg_is_last(sg) && !SG_SA(sg))
 452                /* size is not SHA1_BLOCK_SIZE aligned */
 453                return omap_sham_update_dma_slow(dd);
 454
 455        length = min(ctx->total, sg->length);
 456
 457        if (sg_is_last(sg)) {
 458                if (!(ctx->flags & BIT(FLAGS_FINUP))) {
 459                        /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
 460                        tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
 461                        /* without finup() we need one block to close hash */
 462                        if (!tail)
 463                                tail = SHA1_MD5_BLOCK_SIZE;
 464                        length -= tail;
 465                }
 466        }
 467
 468        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 469                dev_err(dd->dev, "dma_map_sg  error\n");
 470                return -EINVAL;
 471        }
 472
 473        ctx->flags |= BIT(FLAGS_SG);
 474
 475        ctx->total -= length;
 476        ctx->offset = length; /* offset where to start slow */
 477
 478        final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 479
 480        /* next call does not fail... so no unmap in the case of error */
 481        return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
 482}
 483
 484static int omap_sham_update_cpu(struct omap_sham_dev *dd)
 485{
 486        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 487        int bufcnt;
 488
 489        omap_sham_append_sg(ctx);
 490        bufcnt = ctx->bufcnt;
 491        ctx->bufcnt = 0;
 492
 493        return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 494}
 495
 496static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 497{
 498        struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 499
 500        omap_stop_dma(dd->dma_lch);
 501        if (ctx->flags & BIT(FLAGS_SG)) {
 502                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 503                if (ctx->sg->length == ctx->offset) {
 504                        ctx->sg = sg_next(ctx->sg);
 505                        if (ctx->sg)
 506                                ctx->offset = 0;
 507                }
 508        } else {
 509                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 510                                 DMA_TO_DEVICE);
 511        }
 512
 513        return 0;
 514}
 515
 516static int omap_sham_init(struct ahash_request *req)
 517{
 518        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 519        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 520        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 521        struct omap_sham_dev *dd = NULL, *tmp;
 522
 523        spin_lock_bh(&sham.lock);
 524        if (!tctx->dd) {
 525                list_for_each_entry(tmp, &sham.dev_list, list) {
 526                        dd = tmp;
 527                        break;
 528                }
 529                tctx->dd = dd;
 530        } else {
 531                dd = tctx->dd;
 532        }
 533        spin_unlock_bh(&sham.lock);
 534
 535        ctx->dd = dd;
 536
 537        ctx->flags = 0;
 538
 539        dev_dbg(dd->dev, "init: digest size: %d\n",
 540                crypto_ahash_digestsize(tfm));
 541
 542        if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
 543                ctx->flags |= BIT(FLAGS_SHA1);
 544
 545        ctx->bufcnt = 0;
 546        ctx->digcnt = 0;
 547        ctx->buflen = BUFLEN;
 548
 549        if (tctx->flags & BIT(FLAGS_HMAC)) {
 550                struct omap_sham_hmac_ctx *bctx = tctx->base;
 551
 552                memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
 553                ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
 554                ctx->flags |= BIT(FLAGS_HMAC);
 555        }
 556
 557        return 0;
 558
 559}
 560
 561static int omap_sham_update_req(struct omap_sham_dev *dd)
 562{
 563        struct ahash_request *req = dd->req;
 564        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 565        int err;
 566
 567        dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 568                 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
 569
 570        if (ctx->flags & BIT(FLAGS_CPU))
 571                err = omap_sham_update_cpu(dd);
 572        else
 573                err = omap_sham_update_dma_start(dd);
 574
 575        /* wait for dma completion before can take more data */
 576        dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
 577
 578        return err;
 579}
 580
 581static int omap_sham_final_req(struct omap_sham_dev *dd)
 582{
 583        struct ahash_request *req = dd->req;
 584        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 585        int err = 0, use_dma = 1;
 586
 587        if (ctx->bufcnt <= 64)
 588                /* faster to handle last block with cpu */
 589                use_dma = 0;
 590
 591        if (use_dma)
 592                err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 593        else
 594                err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 595
 596        ctx->bufcnt = 0;
 597
 598        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 599
 600        return err;
 601}
 602
 603static int omap_sham_finish_hmac(struct ahash_request *req)
 604{
 605        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 606        struct omap_sham_hmac_ctx *bctx = tctx->base;
 607        int bs = crypto_shash_blocksize(bctx->shash);
 608        int ds = crypto_shash_digestsize(bctx->shash);
 609        struct {
 610                struct shash_desc shash;
 611                char ctx[crypto_shash_descsize(bctx->shash)];
 612        } desc;
 613
 614        desc.shash.tfm = bctx->shash;
 615        desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 616
 617        return crypto_shash_init(&desc.shash) ?:
 618               crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
 619               crypto_shash_finup(&desc.shash, req->result, ds, req->result);
 620}
 621
 622static int omap_sham_finish(struct ahash_request *req)
 623{
 624        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 625        struct omap_sham_dev *dd = ctx->dd;
 626        int err = 0;
 627
 628        if (ctx->digcnt) {
 629                omap_sham_copy_ready_hash(req);
 630                if (ctx->flags & BIT(FLAGS_HMAC))
 631                        err = omap_sham_finish_hmac(req);
 632        }
 633
 634        dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 635
 636        return err;
 637}
 638
 639static void omap_sham_finish_req(struct ahash_request *req, int err)
 640{
 641        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 642        struct omap_sham_dev *dd = ctx->dd;
 643
 644        if (!err) {
 645                omap_sham_copy_hash(req, 1);
 646                if (test_bit(FLAGS_FINAL, &dd->flags))
 647                        err = omap_sham_finish(req);
 648        } else {
 649                ctx->flags |= BIT(FLAGS_ERROR);
 650        }
 651
 652        /* atomic operation is not needed here */
 653        dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
 654                        BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
 655        clk_disable(dd->iclk);
 656
 657        if (req->base.complete)
 658                req->base.complete(&req->base, err);
 659
 660        /* handle new request */
 661        tasklet_schedule(&dd->done_task);
 662}
 663
 664static int omap_sham_handle_queue(struct omap_sham_dev *dd,
 665                                  struct ahash_request *req)
 666{
 667        struct crypto_async_request *async_req, *backlog;
 668        struct omap_sham_reqctx *ctx;
 669        unsigned long flags;
 670        int err = 0, ret = 0;
 671
 672        spin_lock_irqsave(&dd->lock, flags);
 673        if (req)
 674                ret = ahash_enqueue_request(&dd->queue, req);
 675        if (test_bit(FLAGS_BUSY, &dd->flags)) {
 676                spin_unlock_irqrestore(&dd->lock, flags);
 677                return ret;
 678        }
 679        backlog = crypto_get_backlog(&dd->queue);
 680        async_req = crypto_dequeue_request(&dd->queue);
 681        if (async_req)
 682                set_bit(FLAGS_BUSY, &dd->flags);
 683        spin_unlock_irqrestore(&dd->lock, flags);
 684
 685        if (!async_req)
 686                return ret;
 687
 688        if (backlog)
 689                backlog->complete(backlog, -EINPROGRESS);
 690
 691        req = ahash_request_cast(async_req);
 692        dd->req = req;
 693        ctx = ahash_request_ctx(req);
 694
 695        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 696                                                ctx->op, req->nbytes);
 697
 698        err = omap_sham_hw_init(dd);
 699        if (err)
 700                goto err1;
 701
 702        omap_set_dma_dest_params(dd->dma_lch, 0,
 703                        OMAP_DMA_AMODE_CONSTANT,
 704                        dd->phys_base + SHA_REG_DIN(0), 0, 16);
 705
 706        omap_set_dma_dest_burst_mode(dd->dma_lch,
 707                        OMAP_DMA_DATA_BURST_16);
 708
 709        omap_set_dma_src_burst_mode(dd->dma_lch,
 710                        OMAP_DMA_DATA_BURST_4);
 711
 712        if (ctx->digcnt)
 713                /* request has changed - restore hash */
 714                omap_sham_copy_hash(req, 0);
 715
 716        if (ctx->op == OP_UPDATE) {
 717                err = omap_sham_update_req(dd);
 718                if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
 719                        /* no final() after finup() */
 720                        err = omap_sham_final_req(dd);
 721        } else if (ctx->op == OP_FINAL) {
 722                err = omap_sham_final_req(dd);
 723        }
 724err1:
 725        if (err != -EINPROGRESS)
 726                /* done_task will not finish it, so do it here */
 727                omap_sham_finish_req(req, err);
 728
 729        dev_dbg(dd->dev, "exit, err: %d\n", err);
 730
 731        return ret;
 732}
 733
 734static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 735{
 736        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 737        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 738        struct omap_sham_dev *dd = tctx->dd;
 739
 740        ctx->op = op;
 741
 742        return omap_sham_handle_queue(dd, req);
 743}
 744
 745static int omap_sham_update(struct ahash_request *req)
 746{
 747        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 748
 749        if (!req->nbytes)
 750                return 0;
 751
 752        ctx->total = req->nbytes;
 753        ctx->sg = req->src;
 754        ctx->offset = 0;
 755
 756        if (ctx->flags & BIT(FLAGS_FINUP)) {
 757                if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
 758                        /*
 759                        * OMAP HW accel works only with buffers >= 9
 760                        * will switch to bypass in final()
 761                        * final has the same request and data
 762                        */
 763                        omap_sham_append_sg(ctx);
 764                        return 0;
 765                } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
 766                        /*
 767                        * faster to use CPU for short transfers
 768                        */
 769                        ctx->flags |= BIT(FLAGS_CPU);
 770                }
 771        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 772                omap_sham_append_sg(ctx);
 773                return 0;
 774        }
 775
 776        return omap_sham_enqueue(req, OP_UPDATE);
 777}
 778
 779static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
 780                                  const u8 *data, unsigned int len, u8 *out)
 781{
 782        struct {
 783                struct shash_desc shash;
 784                char ctx[crypto_shash_descsize(shash)];
 785        } desc;
 786
 787        desc.shash.tfm = shash;
 788        desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 789
 790        return crypto_shash_digest(&desc.shash, data, len, out);
 791}
 792
 793static int omap_sham_final_shash(struct ahash_request *req)
 794{
 795        struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 796        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 797
 798        return omap_sham_shash_digest(tctx->fallback, req->base.flags,
 799                                      ctx->buffer, ctx->bufcnt, req->result);
 800}
 801
 802static int omap_sham_final(struct ahash_request *req)
 803{
 804        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 805
 806        ctx->flags |= BIT(FLAGS_FINUP);
 807
 808        if (ctx->flags & BIT(FLAGS_ERROR))
 809                return 0; /* uncompleted hash is not needed */
 810
 811        /* OMAP HW accel works only with buffers >= 9 */
 812        /* HMAC is always >= 9 because ipad == block size */
 813        if ((ctx->digcnt + ctx->bufcnt) < 9)
 814                return omap_sham_final_shash(req);
 815        else if (ctx->bufcnt)
 816                return omap_sham_enqueue(req, OP_FINAL);
 817
 818        /* copy ready hash (+ finalize hmac) */
 819        return omap_sham_finish(req);
 820}
 821
 822static int omap_sham_finup(struct ahash_request *req)
 823{
 824        struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 825        int err1, err2;
 826
 827        ctx->flags |= BIT(FLAGS_FINUP);
 828
 829        err1 = omap_sham_update(req);
 830        if (err1 == -EINPROGRESS || err1 == -EBUSY)
 831                return err1;
 832        /*
 833         * final() has to be always called to cleanup resources
 834         * even if udpate() failed, except EINPROGRESS
 835         */
 836        err2 = omap_sham_final(req);
 837
 838        return err1 ?: err2;
 839}
 840
 841static int omap_sham_digest(struct ahash_request *req)
 842{
 843        return omap_sham_init(req) ?: omap_sham_finup(req);
 844}
 845
 846static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
 847                      unsigned int keylen)
 848{
 849        struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 850        struct omap_sham_hmac_ctx *bctx = tctx->base;
 851        int bs = crypto_shash_blocksize(bctx->shash);
 852        int ds = crypto_shash_digestsize(bctx->shash);
 853        int err, i;
 854        err = crypto_shash_setkey(tctx->fallback, key, keylen);
 855        if (err)
 856                return err;
 857
 858        if (keylen > bs) {
 859                err = omap_sham_shash_digest(bctx->shash,
 860                                crypto_shash_get_flags(bctx->shash),
 861                                key, keylen, bctx->ipad);
 862                if (err)
 863                        return err;
 864                keylen = ds;
 865        } else {
 866                memcpy(bctx->ipad, key, keylen);
 867        }
 868
 869        memset(bctx->ipad + keylen, 0, bs - keylen);
 870        memcpy(bctx->opad, bctx->ipad, bs);
 871
 872        for (i = 0; i < bs; i++) {
 873                bctx->ipad[i] ^= 0x36;
 874                bctx->opad[i] ^= 0x5c;
 875        }
 876
 877        return err;
 878}
 879
 880static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
 881{
 882        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 883        const char *alg_name = crypto_tfm_alg_name(tfm);
 884
 885        /* Allocate a fallback and abort if it failed. */
 886        tctx->fallback = crypto_alloc_shash(alg_name, 0,
 887                                            CRYPTO_ALG_NEED_FALLBACK);
 888        if (IS_ERR(tctx->fallback)) {
 889                pr_err("omap-sham: fallback driver '%s' "
 890                                "could not be loaded.\n", alg_name);
 891                return PTR_ERR(tctx->fallback);
 892        }
 893
 894        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 895                                 sizeof(struct omap_sham_reqctx) + BUFLEN);
 896
 897        if (alg_base) {
 898                struct omap_sham_hmac_ctx *bctx = tctx->base;
 899                tctx->flags |= BIT(FLAGS_HMAC);
 900                bctx->shash = crypto_alloc_shash(alg_base, 0,
 901                                                CRYPTO_ALG_NEED_FALLBACK);
 902                if (IS_ERR(bctx->shash)) {
 903                        pr_err("omap-sham: base driver '%s' "
 904                                        "could not be loaded.\n", alg_base);
 905                        crypto_free_shash(tctx->fallback);
 906                        return PTR_ERR(bctx->shash);
 907                }
 908
 909        }
 910
 911        return 0;
 912}
 913
 914static int omap_sham_cra_init(struct crypto_tfm *tfm)
 915{
 916        return omap_sham_cra_init_alg(tfm, NULL);
 917}
 918
 919static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
 920{
 921        return omap_sham_cra_init_alg(tfm, "sha1");
 922}
 923
 924static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
 925{
 926        return omap_sham_cra_init_alg(tfm, "md5");
 927}
 928
 929static void omap_sham_cra_exit(struct crypto_tfm *tfm)
 930{
 931        struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 932
 933        crypto_free_shash(tctx->fallback);
 934        tctx->fallback = NULL;
 935
 936        if (tctx->flags & BIT(FLAGS_HMAC)) {
 937                struct omap_sham_hmac_ctx *bctx = tctx->base;
 938                crypto_free_shash(bctx->shash);
 939        }
 940}
 941
 942static struct ahash_alg algs[] = {
 943{
 944        .init           = omap_sham_init,
 945        .update         = omap_sham_update,
 946        .final          = omap_sham_final,
 947        .finup          = omap_sham_finup,
 948        .digest         = omap_sham_digest,
 949        .halg.digestsize        = SHA1_DIGEST_SIZE,
 950        .halg.base      = {
 951                .cra_name               = "sha1",
 952                .cra_driver_name        = "omap-sha1",
 953                .cra_priority           = 100,
 954                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
 955                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
 956                                                CRYPTO_ALG_ASYNC |
 957                                                CRYPTO_ALG_NEED_FALLBACK,
 958                .cra_blocksize          = SHA1_BLOCK_SIZE,
 959                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
 960                .cra_alignmask          = 0,
 961                .cra_module             = THIS_MODULE,
 962                .cra_init               = omap_sham_cra_init,
 963                .cra_exit               = omap_sham_cra_exit,
 964        }
 965},
 966{
 967        .init           = omap_sham_init,
 968        .update         = omap_sham_update,
 969        .final          = omap_sham_final,
 970        .finup          = omap_sham_finup,
 971        .digest         = omap_sham_digest,
 972        .halg.digestsize        = MD5_DIGEST_SIZE,
 973        .halg.base      = {
 974                .cra_name               = "md5",
 975                .cra_driver_name        = "omap-md5",
 976                .cra_priority           = 100,
 977                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
 978                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
 979                                                CRYPTO_ALG_ASYNC |
 980                                                CRYPTO_ALG_NEED_FALLBACK,
 981                .cra_blocksize          = SHA1_BLOCK_SIZE,
 982                .cra_ctxsize            = sizeof(struct omap_sham_ctx),
 983                .cra_alignmask          = OMAP_ALIGN_MASK,
 984                .cra_module             = THIS_MODULE,
 985                .cra_init               = omap_sham_cra_init,
 986                .cra_exit               = omap_sham_cra_exit,
 987        }
 988},
 989{
 990        .init           = omap_sham_init,
 991        .update         = omap_sham_update,
 992        .final          = omap_sham_final,
 993        .finup          = omap_sham_finup,
 994        .digest         = omap_sham_digest,
 995        .setkey         = omap_sham_setkey,
 996        .halg.digestsize        = SHA1_DIGEST_SIZE,
 997        .halg.base      = {
 998                .cra_name               = "hmac(sha1)",
 999                .cra_driver_name        = "omap-hmac-sha1",
1000                .cra_priority           = 100,
1001                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1002                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1003                                                CRYPTO_ALG_ASYNC |
1004                                                CRYPTO_ALG_NEED_FALLBACK,
1005                .cra_blocksize          = SHA1_BLOCK_SIZE,
1006                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1007                                        sizeof(struct omap_sham_hmac_ctx),
1008                .cra_alignmask          = OMAP_ALIGN_MASK,
1009                .cra_module             = THIS_MODULE,
1010                .cra_init               = omap_sham_cra_sha1_init,
1011                .cra_exit               = omap_sham_cra_exit,
1012        }
1013},
1014{
1015        .init           = omap_sham_init,
1016        .update         = omap_sham_update,
1017        .final          = omap_sham_final,
1018        .finup          = omap_sham_finup,
1019        .digest         = omap_sham_digest,
1020        .setkey         = omap_sham_setkey,
1021        .halg.digestsize        = MD5_DIGEST_SIZE,
1022        .halg.base      = {
1023                .cra_name               = "hmac(md5)",
1024                .cra_driver_name        = "omap-hmac-md5",
1025                .cra_priority           = 100,
1026                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1027                                                CRYPTO_ALG_KERN_DRIVER_ONLY |
1028                                                CRYPTO_ALG_ASYNC |
1029                                                CRYPTO_ALG_NEED_FALLBACK,
1030                .cra_blocksize          = SHA1_BLOCK_SIZE,
1031                .cra_ctxsize            = sizeof(struct omap_sham_ctx) +
1032                                        sizeof(struct omap_sham_hmac_ctx),
1033                .cra_alignmask          = OMAP_ALIGN_MASK,
1034                .cra_module             = THIS_MODULE,
1035                .cra_init               = omap_sham_cra_md5_init,
1036                .cra_exit               = omap_sham_cra_exit,
1037        }
1038}
1039};
1040
1041static void omap_sham_done_task(unsigned long data)
1042{
1043        struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1044        int err = 0;
1045
1046        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1047                omap_sham_handle_queue(dd, NULL);
1048                return;
1049        }
1050
1051        if (test_bit(FLAGS_CPU, &dd->flags)) {
1052                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1053                        goto finish;
1054        } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1055                if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1056                        omap_sham_update_dma_stop(dd);
1057                        if (dd->err) {
1058                                err = dd->err;
1059                                goto finish;
1060                        }
1061                }
1062                if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1063                        /* hash or semi-hash ready */
1064                        clear_bit(FLAGS_DMA_READY, &dd->flags);
1065                        err = omap_sham_update_dma_start(dd);
1066                        if (err != -EINPROGRESS)
1067                                goto finish;
1068                }
1069        }
1070
1071        return;
1072
1073finish:
1074        dev_dbg(dd->dev, "update done: err: %d\n", err);
1075        /* finish curent request */
1076        omap_sham_finish_req(dd->req, err);
1077}
1078
1079static irqreturn_t omap_sham_irq(int irq, void *dev_id)
1080{
1081        struct omap_sham_dev *dd = dev_id;
1082
1083        if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1084                /* final -> allow device to go to power-saving mode */
1085                omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1086
1087        omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1088                                 SHA_REG_CTRL_OUTPUT_READY);
1089        omap_sham_read(dd, SHA_REG_CTRL);
1090
1091        if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1092                dev_warn(dd->dev, "Interrupt when no active requests.\n");
1093                return IRQ_HANDLED;
1094        }
1095
1096        set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1097        tasklet_schedule(&dd->done_task);
1098
1099        return IRQ_HANDLED;
1100}
1101
1102static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
1103{
1104        struct omap_sham_dev *dd = data;
1105
1106        if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1107                pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1108                dd->err = -EIO;
1109                clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
1110        }
1111
1112        set_bit(FLAGS_DMA_READY, &dd->flags);
1113        tasklet_schedule(&dd->done_task);
1114}
1115
1116static int omap_sham_dma_init(struct omap_sham_dev *dd)
1117{
1118        int err;
1119
1120        dd->dma_lch = -1;
1121
1122        err = omap_request_dma(dd->dma, dev_name(dd->dev),
1123                        omap_sham_dma_callback, dd, &dd->dma_lch);
1124        if (err) {
1125                dev_err(dd->dev, "Unable to request DMA channel\n");
1126                return err;
1127        }
1128
1129        return 0;
1130}
1131
1132static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
1133{
1134        if (dd->dma_lch >= 0) {
1135                omap_free_dma(dd->dma_lch);
1136                dd->dma_lch = -1;
1137        }
1138}
1139
1140static int omap_sham_probe(struct platform_device *pdev)
1141{
1142        struct omap_sham_dev *dd;
1143        struct device *dev = &pdev->dev;
1144        struct resource *res;
1145        int err, i, j;
1146
1147        dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1148        if (dd == NULL) {
1149                dev_err(dev, "unable to alloc data struct.\n");
1150                err = -ENOMEM;
1151                goto data_err;
1152        }
1153        dd->dev = dev;
1154        platform_set_drvdata(pdev, dd);
1155
1156        INIT_LIST_HEAD(&dd->list);
1157        spin_lock_init(&dd->lock);
1158        tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1159        crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1160
1161        dd->irq = -1;
1162
1163        /* Get the base address */
1164        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1165        if (!res) {
1166                dev_err(dev, "no MEM resource info\n");
1167                err = -ENODEV;
1168                goto res_err;
1169        }
1170        dd->phys_base = res->start;
1171
1172        /* Get the DMA */
1173        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1174        if (!res) {
1175                dev_err(dev, "no DMA resource info\n");
1176                err = -ENODEV;
1177                goto res_err;
1178        }
1179        dd->dma = res->start;
1180
1181        /* Get the IRQ */
1182        dd->irq = platform_get_irq(pdev,  0);
1183        if (dd->irq < 0) {
1184                dev_err(dev, "no IRQ resource info\n");
1185                err = dd->irq;
1186                goto res_err;
1187        }
1188
1189        err = request_irq(dd->irq, omap_sham_irq,
1190                        IRQF_TRIGGER_LOW, dev_name(dev), dd);
1191        if (err) {
1192                dev_err(dev, "unable to request irq.\n");
1193                goto res_err;
1194        }
1195
1196        err = omap_sham_dma_init(dd);
1197        if (err)
1198                goto dma_err;
1199
1200        /* Initializing the clock */
1201        dd->iclk = clk_get(dev, "ick");
1202        if (IS_ERR(dd->iclk)) {
1203                dev_err(dev, "clock intialization failed.\n");
1204                err = PTR_ERR(dd->iclk);
1205                goto clk_err;
1206        }
1207
1208        dd->io_base = ioremap(dd->phys_base, SZ_4K);
1209        if (!dd->io_base) {
1210                dev_err(dev, "can't ioremap\n");
1211                err = -ENOMEM;
1212                goto io_err;
1213        }
1214
1215        clk_enable(dd->iclk);
1216        dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1217                (omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1218                omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1219        clk_disable(dd->iclk);
1220
1221        spin_lock(&sham.lock);
1222        list_add_tail(&dd->list, &sham.dev_list);
1223        spin_unlock(&sham.lock);
1224
1225        for (i = 0; i < ARRAY_SIZE(algs); i++) {
1226                err = crypto_register_ahash(&algs[i]);
1227                if (err)
1228                        goto err_algs;
1229        }
1230
1231        return 0;
1232
1233err_algs:
1234        for (j = 0; j < i; j++)
1235                crypto_unregister_ahash(&algs[j]);
1236        iounmap(dd->io_base);
1237io_err:
1238        clk_put(dd->iclk);
1239clk_err:
1240        omap_sham_dma_cleanup(dd);
1241dma_err:
1242        if (dd->irq >= 0)
1243                free_irq(dd->irq, dd);
1244res_err:
1245        kfree(dd);
1246        dd = NULL;
1247data_err:
1248        dev_err(dev, "initialization failed.\n");
1249
1250        return err;
1251}
1252
1253static int omap_sham_remove(struct platform_device *pdev)
1254{
1255        static struct omap_sham_dev *dd;
1256        int i;
1257
1258        dd = platform_get_drvdata(pdev);
1259        if (!dd)
1260                return -ENODEV;
1261        spin_lock(&sham.lock);
1262        list_del(&dd->list);
1263        spin_unlock(&sham.lock);
1264        for (i = 0; i < ARRAY_SIZE(algs); i++)
1265                crypto_unregister_ahash(&algs[i]);
1266        tasklet_kill(&dd->done_task);
1267        iounmap(dd->io_base);
1268        clk_put(dd->iclk);
1269        omap_sham_dma_cleanup(dd);
1270        if (dd->irq >= 0)
1271                free_irq(dd->irq, dd);
1272        kfree(dd);
1273        dd = NULL;
1274
1275        return 0;
1276}
1277
1278static struct platform_driver omap_sham_driver = {
1279        .probe  = omap_sham_probe,
1280        .remove = omap_sham_remove,
1281        .driver = {
1282                .name   = "omap-sham",
1283                .owner  = THIS_MODULE,
1284        },
1285};
1286
1287static int __init omap_sham_mod_init(void)
1288{
1289        pr_info("loading %s driver\n", "omap-sham");
1290
1291        return platform_driver_register(&omap_sham_driver);
1292}
1293
1294static void __exit omap_sham_mod_exit(void)
1295{
1296        platform_driver_unregister(&omap_sham_driver);
1297}
1298
1299module_init(omap_sham_mod_init);
1300module_exit(omap_sham_mod_exit);
1301
1302MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1303MODULE_LICENSE("GPL v2");
1304MODULE_AUTHOR("Dmitry Kasatkin");
1305
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.