linux/drivers/crypto/atmel-sha.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL SHA1/SHA256 HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-sham.c drivers.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/delay.h>
  34#include <linux/crypto.h>
  35#include <linux/cryptohash.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/sha.h>
  39#include <crypto/hash.h>
  40#include <crypto/internal/hash.h>
  41#include "atmel-sha-regs.h"
  42
  43/* SHA flags */
  44#define SHA_FLAGS_BUSY                  BIT(0)
  45#define SHA_FLAGS_FINAL                 BIT(1)
  46#define SHA_FLAGS_DMA_ACTIVE    BIT(2)
  47#define SHA_FLAGS_OUTPUT_READY  BIT(3)
  48#define SHA_FLAGS_INIT                  BIT(4)
  49#define SHA_FLAGS_CPU                   BIT(5)
  50#define SHA_FLAGS_DMA_READY             BIT(6)
  51
  52#define SHA_FLAGS_FINUP         BIT(16)
  53#define SHA_FLAGS_SG            BIT(17)
  54#define SHA_FLAGS_SHA1          BIT(18)
  55#define SHA_FLAGS_SHA256        BIT(19)
  56#define SHA_FLAGS_ERROR         BIT(20)
  57#define SHA_FLAGS_PAD           BIT(21)
  58
  59#define SHA_FLAGS_DUALBUFF      BIT(24)
  60
  61#define SHA_OP_UPDATE   1
  62#define SHA_OP_FINAL    2
  63
  64#define SHA_BUFFER_LEN          PAGE_SIZE
  65
  66#define ATMEL_SHA_DMA_THRESHOLD         56
  67
  68
  69struct atmel_sha_dev;
  70
  71struct atmel_sha_reqctx {
  72        struct atmel_sha_dev    *dd;
  73        unsigned long   flags;
  74        unsigned long   op;
  75
  76        u8      digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  77        size_t  digcnt;
  78        size_t  bufcnt;
  79        size_t  buflen;
  80        dma_addr_t      dma_addr;
  81
  82        /* walk state */
  83        struct scatterlist      *sg;
  84        unsigned int    offset; /* offset in current sg */
  85        unsigned int    total;  /* total request */
  86
  87        u8      buffer[0] __aligned(sizeof(u32));
  88};
  89
  90struct atmel_sha_ctx {
  91        struct atmel_sha_dev    *dd;
  92
  93        unsigned long           flags;
  94
  95        /* fallback stuff */
  96        struct crypto_shash     *fallback;
  97
  98};
  99
 100#define ATMEL_SHA_QUEUE_LENGTH  1
 101
 102struct atmel_sha_dev {
 103        struct list_head        list;
 104        unsigned long           phys_base;
 105        struct device           *dev;
 106        struct clk                      *iclk;
 107        int                                     irq;
 108        void __iomem            *io_base;
 109
 110        spinlock_t              lock;
 111        int                     err;
 112        struct tasklet_struct   done_task;
 113
 114        unsigned long           flags;
 115        struct crypto_queue     queue;
 116        struct ahash_request    *req;
 117};
 118
 119struct atmel_sha_drv {
 120        struct list_head        dev_list;
 121        spinlock_t              lock;
 122};
 123
 124static struct atmel_sha_drv atmel_sha = {
 125        .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list),
 126        .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock),
 127};
 128
 129static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset)
 130{
 131        return readl_relaxed(dd->io_base + offset);
 132}
 133
 134static inline void atmel_sha_write(struct atmel_sha_dev *dd,
 135                                        u32 offset, u32 value)
 136{
 137        writel_relaxed(value, dd->io_base + offset);
 138}
 139
 140static void atmel_sha_dualbuff_test(struct atmel_sha_dev *dd)
 141{
 142        atmel_sha_write(dd, SHA_MR, SHA_MR_DUALBUFF);
 143
 144        if (atmel_sha_read(dd, SHA_MR) & SHA_MR_DUALBUFF)
 145                dd->flags |= SHA_FLAGS_DUALBUFF;
 146}
 147
 148static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx)
 149{
 150        size_t count;
 151
 152        while ((ctx->bufcnt < ctx->buflen) && ctx->total) {
 153                count = min(ctx->sg->length - ctx->offset, ctx->total);
 154                count = min(count, ctx->buflen - ctx->bufcnt);
 155
 156                if (count <= 0)
 157                        break;
 158
 159                scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg,
 160                        ctx->offset, count, 0);
 161
 162                ctx->bufcnt += count;
 163                ctx->offset += count;
 164                ctx->total -= count;
 165
 166                if (ctx->offset == ctx->sg->length) {
 167                        ctx->sg = sg_next(ctx->sg);
 168                        if (ctx->sg)
 169                                ctx->offset = 0;
 170                        else
 171                                ctx->total = 0;
 172                }
 173        }
 174
 175        return 0;
 176}
 177
 178/*
 179 * The purpose of this padding is to ensure that the padded message
 180 * is a multiple of 512 bits. The bit "1" is appended at the end of
 181 * the message followed by "padlen-1" zero bits. Then a 64 bits block
 182 * equals to the message length in bits is appended.
 183 *
 184 * padlen is calculated as followed:
 185 *  - if message length < 56 bytes then padlen = 56 - message length
 186 *  - else padlen = 64 + 56 - message length
 187 */
 188static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length)
 189{
 190        unsigned int index, padlen;
 191        u64 bits;
 192        u64 size;
 193
 194        bits = (ctx->bufcnt + ctx->digcnt + length) << 3;
 195        size = cpu_to_be64(bits);
 196
 197        index = ctx->bufcnt & 0x3f;
 198        padlen = (index < 56) ? (56 - index) : ((64+56) - index);
 199        *(ctx->buffer + ctx->bufcnt) = 0x80;
 200        memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1);
 201        memcpy(ctx->buffer + ctx->bufcnt + padlen, &size, 8);
 202        ctx->bufcnt += padlen + 8;
 203        ctx->flags |= SHA_FLAGS_PAD;
 204}
 205
 206static int atmel_sha_init(struct ahash_request *req)
 207{
 208        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 209        struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm);
 210        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 211        struct atmel_sha_dev *dd = NULL;
 212        struct atmel_sha_dev *tmp;
 213
 214        spin_lock_bh(&atmel_sha.lock);
 215        if (!tctx->dd) {
 216                list_for_each_entry(tmp, &atmel_sha.dev_list, list) {
 217                        dd = tmp;
 218                        break;
 219                }
 220                tctx->dd = dd;
 221        } else {
 222                dd = tctx->dd;
 223        }
 224
 225        spin_unlock_bh(&atmel_sha.lock);
 226
 227        ctx->dd = dd;
 228
 229        ctx->flags = 0;
 230
 231        dev_dbg(dd->dev, "init: digest size: %d\n",
 232                crypto_ahash_digestsize(tfm));
 233
 234        if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
 235                ctx->flags |= SHA_FLAGS_SHA1;
 236        else if (crypto_ahash_digestsize(tfm) == SHA256_DIGEST_SIZE)
 237                ctx->flags |= SHA_FLAGS_SHA256;
 238
 239        ctx->bufcnt = 0;
 240        ctx->digcnt = 0;
 241        ctx->buflen = SHA_BUFFER_LEN;
 242
 243        return 0;
 244}
 245
 246static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 247{
 248        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 249        u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
 250
 251        if (likely(dma)) {
 252                atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE);
 253                valmr = SHA_MR_MODE_PDC;
 254                if (dd->flags & SHA_FLAGS_DUALBUFF)
 255                        valmr = SHA_MR_DUALBUFF;
 256        } else {
 257                atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
 258        }
 259
 260        if (ctx->flags & SHA_FLAGS_SHA256)
 261                valmr |= SHA_MR_ALGO_SHA256;
 262
 263        /* Setting CR_FIRST only for the first iteration */
 264        if (!ctx->digcnt)
 265                valcr = SHA_CR_FIRST;
 266
 267        atmel_sha_write(dd, SHA_CR, valcr);
 268        atmel_sha_write(dd, SHA_MR, valmr);
 269}
 270
 271static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf,
 272                              size_t length, int final)
 273{
 274        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 275        int count, len32;
 276        const u32 *buffer = (const u32 *)buf;
 277
 278        dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 279                                                ctx->digcnt, length, final);
 280
 281        atmel_sha_write_ctrl(dd, 0);
 282
 283        /* should be non-zero before next lines to disable clocks later */
 284        ctx->digcnt += length;
 285
 286        if (final)
 287                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 288
 289        len32 = DIV_ROUND_UP(length, sizeof(u32));
 290
 291        dd->flags |= SHA_FLAGS_CPU;
 292
 293        for (count = 0; count < len32; count++)
 294                atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]);
 295
 296        return -EINPROGRESS;
 297}
 298
 299static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1,
 300                size_t length1, dma_addr_t dma_addr2, size_t length2, int final)
 301{
 302        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 303        int len32;
 304
 305        dev_dbg(dd->dev, "xmit_pdc: digcnt: %d, length: %d, final: %d\n",
 306                                                ctx->digcnt, length1, final);
 307
 308        len32 = DIV_ROUND_UP(length1, sizeof(u32));
 309        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS);
 310        atmel_sha_write(dd, SHA_TPR, dma_addr1);
 311        atmel_sha_write(dd, SHA_TCR, len32);
 312
 313        len32 = DIV_ROUND_UP(length2, sizeof(u32));
 314        atmel_sha_write(dd, SHA_TNPR, dma_addr2);
 315        atmel_sha_write(dd, SHA_TNCR, len32);
 316
 317        atmel_sha_write_ctrl(dd, 1);
 318
 319        /* should be non-zero before next lines to disable clocks later */
 320        ctx->digcnt += length1;
 321
 322        if (final)
 323                dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */
 324
 325        dd->flags |=  SHA_FLAGS_DMA_ACTIVE;
 326
 327        /* Start DMA transfer */
 328        atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN);
 329
 330        return -EINPROGRESS;
 331}
 332
 333static int atmel_sha_update_cpu(struct atmel_sha_dev *dd)
 334{
 335        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 336        int bufcnt;
 337
 338        atmel_sha_append_sg(ctx);
 339        atmel_sha_fill_padding(ctx, 0);
 340
 341        bufcnt = ctx->bufcnt;
 342        ctx->bufcnt = 0;
 343
 344        return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 345}
 346
 347static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd,
 348                                        struct atmel_sha_reqctx *ctx,
 349                                        size_t length, int final)
 350{
 351        ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 352                                ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
 353        if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 354                dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen +
 355                                SHA1_BLOCK_SIZE);
 356                return -EINVAL;
 357        }
 358
 359        ctx->flags &= ~SHA_FLAGS_SG;
 360
 361        /* next call does not fail... so no unmap in the case of error */
 362        return atmel_sha_xmit_pdc(dd, ctx->dma_addr, length, 0, 0, final);
 363}
 364
 365static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd)
 366{
 367        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 368        unsigned int final;
 369        size_t count;
 370
 371        atmel_sha_append_sg(ctx);
 372
 373        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 374
 375        dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
 376                                         ctx->bufcnt, ctx->digcnt, final);
 377
 378        if (final)
 379                atmel_sha_fill_padding(ctx, 0);
 380
 381        if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 382                count = ctx->bufcnt;
 383                ctx->bufcnt = 0;
 384                return atmel_sha_xmit_dma_map(dd, ctx, count, final);
 385        }
 386
 387        return 0;
 388}
 389
 390static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd)
 391{
 392        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 393        unsigned int length, final, tail;
 394        struct scatterlist *sg;
 395        unsigned int count;
 396
 397        if (!ctx->total)
 398                return 0;
 399
 400        if (ctx->bufcnt || ctx->offset)
 401                return atmel_sha_update_dma_slow(dd);
 402
 403        dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
 404                        ctx->digcnt, ctx->bufcnt, ctx->total);
 405
 406        sg = ctx->sg;
 407
 408        if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 409                return atmel_sha_update_dma_slow(dd);
 410
 411        if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, SHA1_BLOCK_SIZE))
 412                /* size is not SHA1_BLOCK_SIZE aligned */
 413                return atmel_sha_update_dma_slow(dd);
 414
 415        length = min(ctx->total, sg->length);
 416
 417        if (sg_is_last(sg)) {
 418                if (!(ctx->flags & SHA_FLAGS_FINUP)) {
 419                        /* not last sg must be SHA1_BLOCK_SIZE aligned */
 420                        tail = length & (SHA1_BLOCK_SIZE - 1);
 421                        length -= tail;
 422                        if (length == 0) {
 423                                /* offset where to start slow */
 424                                ctx->offset = length;
 425                                return atmel_sha_update_dma_slow(dd);
 426                        }
 427                }
 428        }
 429
 430        ctx->total -= length;
 431        ctx->offset = length; /* offset where to start slow */
 432
 433        final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total;
 434
 435        /* Add padding */
 436        if (final) {
 437                tail = length & (SHA1_BLOCK_SIZE - 1);
 438                length -= tail;
 439                ctx->total += tail;
 440                ctx->offset = length; /* offset where to start slow */
 441
 442                sg = ctx->sg;
 443                atmel_sha_append_sg(ctx);
 444
 445                atmel_sha_fill_padding(ctx, length);
 446
 447                ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer,
 448                        ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
 449                if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 450                        dev_err(dd->dev, "dma %u bytes error\n",
 451                                ctx->buflen + SHA1_BLOCK_SIZE);
 452                        return -EINVAL;
 453                }
 454
 455                if (length == 0) {
 456                        ctx->flags &= ~SHA_FLAGS_SG;
 457                        count = ctx->bufcnt;
 458                        ctx->bufcnt = 0;
 459                        return atmel_sha_xmit_pdc(dd, ctx->dma_addr, count, 0,
 460                                        0, final);
 461                } else {
 462                        ctx->sg = sg;
 463                        if (!dma_map_sg(dd->dev, ctx->sg, 1,
 464                                DMA_TO_DEVICE)) {
 465                                        dev_err(dd->dev, "dma_map_sg  error\n");
 466                                        return -EINVAL;
 467                        }
 468
 469                        ctx->flags |= SHA_FLAGS_SG;
 470
 471                        count = ctx->bufcnt;
 472                        ctx->bufcnt = 0;
 473                        return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg),
 474                                        length, ctx->dma_addr, count, final);
 475                }
 476        }
 477
 478        if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 479                dev_err(dd->dev, "dma_map_sg  error\n");
 480                return -EINVAL;
 481        }
 482
 483        ctx->flags |= SHA_FLAGS_SG;
 484
 485        /* next call does not fail... so no unmap in the case of error */
 486        return atmel_sha_xmit_pdc(dd, sg_dma_address(ctx->sg), length, 0,
 487                                                                0, final);
 488}
 489
 490static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd)
 491{
 492        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
 493
 494        if (ctx->flags & SHA_FLAGS_SG) {
 495                dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 496                if (ctx->sg->length == ctx->offset) {
 497                        ctx->sg = sg_next(ctx->sg);
 498                        if (ctx->sg)
 499                                ctx->offset = 0;
 500                }
 501                if (ctx->flags & SHA_FLAGS_PAD)
 502                        dma_unmap_single(dd->dev, ctx->dma_addr,
 503                                ctx->buflen + SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
 504        } else {
 505                dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen +
 506                                                SHA1_BLOCK_SIZE, DMA_TO_DEVICE);
 507        }
 508
 509        return 0;
 510}
 511
 512static int atmel_sha_update_req(struct atmel_sha_dev *dd)
 513{
 514        struct ahash_request *req = dd->req;
 515        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 516        int err;
 517
 518        dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 519                 ctx->total, ctx->digcnt, (ctx->flags & SHA_FLAGS_FINUP) != 0);
 520
 521        if (ctx->flags & SHA_FLAGS_CPU)
 522                err = atmel_sha_update_cpu(dd);
 523        else
 524                err = atmel_sha_update_dma_start(dd);
 525
 526        /* wait for dma completion before can take more data */
 527        dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n",
 528                        err, ctx->digcnt);
 529
 530        return err;
 531}
 532
 533static int atmel_sha_final_req(struct atmel_sha_dev *dd)
 534{
 535        struct ahash_request *req = dd->req;
 536        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 537        int err = 0;
 538        int count;
 539
 540        if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) {
 541                atmel_sha_fill_padding(ctx, 0);
 542                count = ctx->bufcnt;
 543                ctx->bufcnt = 0;
 544                err = atmel_sha_xmit_dma_map(dd, ctx, count, 1);
 545        }
 546        /* faster to handle last block with cpu */
 547        else {
 548                atmel_sha_fill_padding(ctx, 0);
 549                count = ctx->bufcnt;
 550                ctx->bufcnt = 0;
 551                err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1);
 552        }
 553
 554        dev_dbg(dd->dev, "final_req: err: %d\n", err);
 555
 556        return err;
 557}
 558
 559static void atmel_sha_copy_hash(struct ahash_request *req)
 560{
 561        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 562        u32 *hash = (u32 *)ctx->digest;
 563        int i;
 564
 565        if (likely(ctx->flags & SHA_FLAGS_SHA1))
 566                for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
 567                        hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
 568        else
 569                for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
 570                        hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
 571}
 572
 573static void atmel_sha_copy_ready_hash(struct ahash_request *req)
 574{
 575        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 576
 577        if (!req->result)
 578                return;
 579
 580        if (likely(ctx->flags & SHA_FLAGS_SHA1))
 581                memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE);
 582        else
 583                memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE);
 584}
 585
 586static int atmel_sha_finish(struct ahash_request *req)
 587{
 588        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 589        struct atmel_sha_dev *dd = ctx->dd;
 590        int err = 0;
 591
 592        if (ctx->digcnt)
 593                atmel_sha_copy_ready_hash(req);
 594
 595        dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt,
 596                ctx->bufcnt);
 597
 598        return err;
 599}
 600
 601static void atmel_sha_finish_req(struct ahash_request *req, int err)
 602{
 603        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 604        struct atmel_sha_dev *dd = ctx->dd;
 605
 606        if (!err) {
 607                atmel_sha_copy_hash(req);
 608                if (SHA_FLAGS_FINAL & dd->flags)
 609                        err = atmel_sha_finish(req);
 610        } else {
 611                ctx->flags |= SHA_FLAGS_ERROR;
 612        }
 613
 614        /* atomic operation is not needed here */
 615        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
 616                        SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 617
 618        clk_disable_unprepare(dd->iclk);
 619
 620        if (req->base.complete)
 621                req->base.complete(&req->base, err);
 622
 623        /* handle new request */
 624        tasklet_schedule(&dd->done_task);
 625}
 626
 627static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
 628{
 629        clk_prepare_enable(dd->iclk);
 630
 631        if (SHA_FLAGS_INIT & dd->flags) {
 632                atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
 633                atmel_sha_dualbuff_test(dd);
 634                dd->flags |= SHA_FLAGS_INIT;
 635                dd->err = 0;
 636        }
 637
 638        return 0;
 639}
 640
 641static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
 642                                  struct ahash_request *req)
 643{
 644        struct crypto_async_request *async_req, *backlog;
 645        struct atmel_sha_reqctx *ctx;
 646        unsigned long flags;
 647        int err = 0, ret = 0;
 648
 649        spin_lock_irqsave(&dd->lock, flags);
 650        if (req)
 651                ret = ahash_enqueue_request(&dd->queue, req);
 652
 653        if (SHA_FLAGS_BUSY & dd->flags) {
 654                spin_unlock_irqrestore(&dd->lock, flags);
 655                return ret;
 656        }
 657
 658        backlog = crypto_get_backlog(&dd->queue);
 659        async_req = crypto_dequeue_request(&dd->queue);
 660        if (async_req)
 661                dd->flags |= SHA_FLAGS_BUSY;
 662
 663        spin_unlock_irqrestore(&dd->lock, flags);
 664
 665        if (!async_req)
 666                return ret;
 667
 668        if (backlog)
 669                backlog->complete(backlog, -EINPROGRESS);
 670
 671        req = ahash_request_cast(async_req);
 672        dd->req = req;
 673        ctx = ahash_request_ctx(req);
 674
 675        dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 676                                                ctx->op, req->nbytes);
 677
 678        err = atmel_sha_hw_init(dd);
 679
 680        if (err)
 681                goto err1;
 682
 683        if (ctx->op == SHA_OP_UPDATE) {
 684                err = atmel_sha_update_req(dd);
 685                if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) {
 686                        /* no final() after finup() */
 687                        err = atmel_sha_final_req(dd);
 688                }
 689        } else if (ctx->op == SHA_OP_FINAL) {
 690                err = atmel_sha_final_req(dd);
 691        }
 692
 693err1:
 694        if (err != -EINPROGRESS)
 695                /* done_task will not finish it, so do it here */
 696                atmel_sha_finish_req(req, err);
 697
 698        dev_dbg(dd->dev, "exit, err: %d\n", err);
 699
 700        return ret;
 701}
 702
 703static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op)
 704{
 705        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 706        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 707        struct atmel_sha_dev *dd = tctx->dd;
 708
 709        ctx->op = op;
 710
 711        return atmel_sha_handle_queue(dd, req);
 712}
 713
 714static int atmel_sha_update(struct ahash_request *req)
 715{
 716        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 717
 718        if (!req->nbytes)
 719                return 0;
 720
 721        ctx->total = req->nbytes;
 722        ctx->sg = req->src;
 723        ctx->offset = 0;
 724
 725        if (ctx->flags & SHA_FLAGS_FINUP) {
 726                if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD)
 727                        /* faster to use CPU for short transfers */
 728                        ctx->flags |= SHA_FLAGS_CPU;
 729        } else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 730                atmel_sha_append_sg(ctx);
 731                return 0;
 732        }
 733        return atmel_sha_enqueue(req, SHA_OP_UPDATE);
 734}
 735
 736static int atmel_sha_final(struct ahash_request *req)
 737{
 738        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 739        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 740        struct atmel_sha_dev *dd = tctx->dd;
 741
 742        int err = 0;
 743
 744        ctx->flags |= SHA_FLAGS_FINUP;
 745
 746        if (ctx->flags & SHA_FLAGS_ERROR)
 747                return 0; /* uncompleted hash is not needed */
 748
 749        if (ctx->bufcnt) {
 750                return atmel_sha_enqueue(req, SHA_OP_FINAL);
 751        } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */
 752                err = atmel_sha_hw_init(dd);
 753                if (err)
 754                        goto err1;
 755
 756                dd->flags |= SHA_FLAGS_BUSY;
 757                err = atmel_sha_final_req(dd);
 758        } else {
 759                /* copy ready hash (+ finalize hmac) */
 760                return atmel_sha_finish(req);
 761        }
 762
 763err1:
 764        if (err != -EINPROGRESS)
 765                /* done_task will not finish it, so do it here */
 766                atmel_sha_finish_req(req, err);
 767
 768        return err;
 769}
 770
 771static int atmel_sha_finup(struct ahash_request *req)
 772{
 773        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
 774        int err1, err2;
 775
 776        ctx->flags |= SHA_FLAGS_FINUP;
 777
 778        err1 = atmel_sha_update(req);
 779        if (err1 == -EINPROGRESS || err1 == -EBUSY)
 780                return err1;
 781
 782        /*
 783         * final() has to be always called to cleanup resources
 784         * even if udpate() failed, except EINPROGRESS
 785         */
 786        err2 = atmel_sha_final(req);
 787
 788        return err1 ?: err2;
 789}
 790
 791static int atmel_sha_digest(struct ahash_request *req)
 792{
 793        return atmel_sha_init(req) ?: atmel_sha_finup(req);
 794}
 795
 796static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
 797{
 798        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 799        const char *alg_name = crypto_tfm_alg_name(tfm);
 800
 801        /* Allocate a fallback and abort if it failed. */
 802        tctx->fallback = crypto_alloc_shash(alg_name, 0,
 803                                            CRYPTO_ALG_NEED_FALLBACK);
 804        if (IS_ERR(tctx->fallback)) {
 805                pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
 806                                alg_name);
 807                return PTR_ERR(tctx->fallback);
 808        }
 809        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 810                                 sizeof(struct atmel_sha_reqctx) +
 811                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
 812
 813        return 0;
 814}
 815
 816static int atmel_sha_cra_init(struct crypto_tfm *tfm)
 817{
 818        return atmel_sha_cra_init_alg(tfm, NULL);
 819}
 820
 821static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
 822{
 823        struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
 824
 825        crypto_free_shash(tctx->fallback);
 826        tctx->fallback = NULL;
 827}
 828
 829static struct ahash_alg sha_algs[] = {
 830{
 831        .init           = atmel_sha_init,
 832        .update         = atmel_sha_update,
 833        .final          = atmel_sha_final,
 834        .finup          = atmel_sha_finup,
 835        .digest         = atmel_sha_digest,
 836        .halg = {
 837                .digestsize     = SHA1_DIGEST_SIZE,
 838                .base   = {
 839                        .cra_name               = "sha1",
 840                        .cra_driver_name        = "atmel-sha1",
 841                        .cra_priority           = 100,
 842                        .cra_flags              = CRYPTO_ALG_ASYNC |
 843                                                CRYPTO_ALG_NEED_FALLBACK,
 844                        .cra_blocksize          = SHA1_BLOCK_SIZE,
 845                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
 846                        .cra_alignmask          = 0,
 847                        .cra_module             = THIS_MODULE,
 848                        .cra_init               = atmel_sha_cra_init,
 849                        .cra_exit               = atmel_sha_cra_exit,
 850                }
 851        }
 852},
 853{
 854        .init           = atmel_sha_init,
 855        .update         = atmel_sha_update,
 856        .final          = atmel_sha_final,
 857        .finup          = atmel_sha_finup,
 858        .digest         = atmel_sha_digest,
 859        .halg = {
 860                .digestsize     = SHA256_DIGEST_SIZE,
 861                .base   = {
 862                        .cra_name               = "sha256",
 863                        .cra_driver_name        = "atmel-sha256",
 864                        .cra_priority           = 100,
 865                        .cra_flags              = CRYPTO_ALG_ASYNC |
 866                                                CRYPTO_ALG_NEED_FALLBACK,
 867                        .cra_blocksize          = SHA256_BLOCK_SIZE,
 868                        .cra_ctxsize            = sizeof(struct atmel_sha_ctx),
 869                        .cra_alignmask          = 0,
 870                        .cra_module             = THIS_MODULE,
 871                        .cra_init               = atmel_sha_cra_init,
 872                        .cra_exit               = atmel_sha_cra_exit,
 873                }
 874        }
 875},
 876};
 877
 878static void atmel_sha_done_task(unsigned long data)
 879{
 880        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
 881        int err = 0;
 882
 883        if (!(SHA_FLAGS_BUSY & dd->flags)) {
 884                atmel_sha_handle_queue(dd, NULL);
 885                return;
 886        }
 887
 888        if (SHA_FLAGS_CPU & dd->flags) {
 889                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
 890                        dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
 891                        goto finish;
 892                }
 893        } else if (SHA_FLAGS_DMA_READY & dd->flags) {
 894                if (SHA_FLAGS_DMA_ACTIVE & dd->flags) {
 895                        dd->flags &= ~SHA_FLAGS_DMA_ACTIVE;
 896                        atmel_sha_update_dma_stop(dd);
 897                        if (dd->err) {
 898                                err = dd->err;
 899                                goto finish;
 900                        }
 901                }
 902                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
 903                        /* hash or semi-hash ready */
 904                        dd->flags &= ~(SHA_FLAGS_DMA_READY |
 905                                                SHA_FLAGS_OUTPUT_READY);
 906                        err = atmel_sha_update_dma_start(dd);
 907                        if (err != -EINPROGRESS)
 908                                goto finish;
 909                }
 910        }
 911        return;
 912
 913finish:
 914        /* finish curent request */
 915        atmel_sha_finish_req(dd->req, err);
 916}
 917
 918static irqreturn_t atmel_sha_irq(int irq, void *dev_id)
 919{
 920        struct atmel_sha_dev *sha_dd = dev_id;
 921        u32 reg;
 922
 923        reg = atmel_sha_read(sha_dd, SHA_ISR);
 924        if (reg & atmel_sha_read(sha_dd, SHA_IMR)) {
 925                atmel_sha_write(sha_dd, SHA_IDR, reg);
 926                if (SHA_FLAGS_BUSY & sha_dd->flags) {
 927                        sha_dd->flags |= SHA_FLAGS_OUTPUT_READY;
 928                        if (!(SHA_FLAGS_CPU & sha_dd->flags))
 929                                sha_dd->flags |= SHA_FLAGS_DMA_READY;
 930                        tasklet_schedule(&sha_dd->done_task);
 931                } else {
 932                        dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n");
 933                }
 934                return IRQ_HANDLED;
 935        }
 936
 937        return IRQ_NONE;
 938}
 939
 940static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd)
 941{
 942        int i;
 943
 944        for (i = 0; i < ARRAY_SIZE(sha_algs); i++)
 945                crypto_unregister_ahash(&sha_algs[i]);
 946}
 947
 948static int atmel_sha_register_algs(struct atmel_sha_dev *dd)
 949{
 950        int err, i, j;
 951
 952        for (i = 0; i < ARRAY_SIZE(sha_algs); i++) {
 953                err = crypto_register_ahash(&sha_algs[i]);
 954                if (err)
 955                        goto err_sha_algs;
 956        }
 957
 958        return 0;
 959
 960err_sha_algs:
 961        for (j = 0; j < i; j++)
 962                crypto_unregister_ahash(&sha_algs[j]);
 963
 964        return err;
 965}
 966
 967static int atmel_sha_probe(struct platform_device *pdev)
 968{
 969        struct atmel_sha_dev *sha_dd;
 970        struct device *dev = &pdev->dev;
 971        struct resource *sha_res;
 972        unsigned long sha_phys_size;
 973        int err;
 974
 975        sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
 976        if (sha_dd == NULL) {
 977                dev_err(dev, "unable to alloc data struct.\n");
 978                err = -ENOMEM;
 979                goto sha_dd_err;
 980        }
 981
 982        sha_dd->dev = dev;
 983
 984        platform_set_drvdata(pdev, sha_dd);
 985
 986        INIT_LIST_HEAD(&sha_dd->list);
 987
 988        tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
 989                                        (unsigned long)sha_dd);
 990
 991        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
 992
 993        sha_dd->irq = -1;
 994
 995        /* Get the base address */
 996        sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 997        if (!sha_res) {
 998                dev_err(dev, "no MEM resource info\n");
 999                err = -ENODEV;
1000                goto res_err;
1001        }
1002        sha_dd->phys_base = sha_res->start;
1003        sha_phys_size = resource_size(sha_res);
1004
1005        /* Get the IRQ */
1006        sha_dd->irq = platform_get_irq(pdev,  0);
1007        if (sha_dd->irq < 0) {
1008                dev_err(dev, "no IRQ resource info\n");
1009                err = sha_dd->irq;
1010                goto res_err;
1011        }
1012
1013        err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
1014                                                sha_dd);
1015        if (err) {
1016                dev_err(dev, "unable to request sha irq.\n");
1017                goto res_err;
1018        }
1019
1020        /* Initializing the clock */
1021        sha_dd->iclk = clk_get(&pdev->dev, NULL);
1022        if (IS_ERR(sha_dd->iclk)) {
1023                dev_err(dev, "clock intialization failed.\n");
1024                err = PTR_ERR(sha_dd->iclk);
1025                goto clk_err;
1026        }
1027
1028        sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
1029        if (!sha_dd->io_base) {
1030                dev_err(dev, "can't ioremap\n");
1031                err = -ENOMEM;
1032                goto sha_io_err;
1033        }
1034
1035        spin_lock(&atmel_sha.lock);
1036        list_add_tail(&sha_dd->list, &atmel_sha.dev_list);
1037        spin_unlock(&atmel_sha.lock);
1038
1039        err = atmel_sha_register_algs(sha_dd);
1040        if (err)
1041                goto err_algs;
1042
1043        dev_info(dev, "Atmel SHA1/SHA256\n");
1044
1045        return 0;
1046
1047err_algs:
1048        spin_lock(&atmel_sha.lock);
1049        list_del(&sha_dd->list);
1050        spin_unlock(&atmel_sha.lock);
1051        iounmap(sha_dd->io_base);
1052sha_io_err:
1053        clk_put(sha_dd->iclk);
1054clk_err:
1055        free_irq(sha_dd->irq, sha_dd);
1056res_err:
1057        tasklet_kill(&sha_dd->done_task);
1058        kfree(sha_dd);
1059        sha_dd = NULL;
1060sha_dd_err:
1061        dev_err(dev, "initialization failed.\n");
1062
1063        return err;
1064}
1065
1066static int atmel_sha_remove(struct platform_device *pdev)
1067{
1068        static struct atmel_sha_dev *sha_dd;
1069
1070        sha_dd = platform_get_drvdata(pdev);
1071        if (!sha_dd)
1072                return -ENODEV;
1073        spin_lock(&atmel_sha.lock);
1074        list_del(&sha_dd->list);
1075        spin_unlock(&atmel_sha.lock);
1076
1077        atmel_sha_unregister_algs(sha_dd);
1078
1079        tasklet_kill(&sha_dd->done_task);
1080
1081        iounmap(sha_dd->io_base);
1082
1083        clk_put(sha_dd->iclk);
1084
1085        if (sha_dd->irq >= 0)
1086                free_irq(sha_dd->irq, sha_dd);
1087
1088        kfree(sha_dd);
1089        sha_dd = NULL;
1090
1091        return 0;
1092}
1093
1094static struct platform_driver atmel_sha_driver = {
1095        .probe          = atmel_sha_probe,
1096        .remove         = atmel_sha_remove,
1097        .driver         = {
1098                .name   = "atmel_sha",
1099                .owner  = THIS_MODULE,
1100        },
1101};
1102
1103module_platform_driver(atmel_sha_driver);
1104
1105MODULE_DESCRIPTION("Atmel SHA1/SHA256 hw acceleration support.");
1106MODULE_LICENSE("GPL v2");
1107MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1108
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.