linux/drivers/crypto/sahara.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
   6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   7 * Copyright (c) 2013 Vista Silicon S.L.
   8 * Author: Javier Martin <javier.martin@vista-silicon.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Based on omap-aes.c and tegra-aes.c
  15 */
  16
  17#include <crypto/algapi.h>
  18#include <crypto/aes.h>
  19#include <crypto/hash.h>
  20#include <crypto/internal/hash.h>
  21#include <crypto/scatterwalk.h>
  22#include <crypto/sha.h>
  23
  24#include <linux/clk.h>
  25#include <linux/crypto.h>
  26#include <linux/interrupt.h>
  27#include <linux/io.h>
  28#include <linux/irq.h>
  29#include <linux/kernel.h>
  30#include <linux/kthread.h>
  31#include <linux/module.h>
  32#include <linux/mutex.h>
  33#include <linux/of.h>
  34#include <linux/of_device.h>
  35#include <linux/platform_device.h>
  36
  37#define SHA_BUFFER_LEN          PAGE_SIZE
  38#define SAHARA_MAX_SHA_BLOCK_SIZE       SHA256_BLOCK_SIZE
  39
  40#define SAHARA_NAME "sahara"
  41#define SAHARA_VERSION_3        3
  42#define SAHARA_VERSION_4        4
  43#define SAHARA_TIMEOUT_MS       1000
  44#define SAHARA_MAX_HW_DESC      2
  45#define SAHARA_MAX_HW_LINK      20
  46
  47#define FLAGS_MODE_MASK         0x000f
  48#define FLAGS_ENCRYPT           BIT(0)
  49#define FLAGS_CBC               BIT(1)
  50#define FLAGS_NEW_KEY           BIT(3)
  51
  52#define SAHARA_HDR_BASE                 0x00800000
  53#define SAHARA_HDR_SKHA_ALG_AES 0
  54#define SAHARA_HDR_SKHA_OP_ENC          (1 << 2)
  55#define SAHARA_HDR_SKHA_MODE_ECB        (0 << 3)
  56#define SAHARA_HDR_SKHA_MODE_CBC        (1 << 3)
  57#define SAHARA_HDR_FORM_DATA            (5 << 16)
  58#define SAHARA_HDR_FORM_KEY             (8 << 16)
  59#define SAHARA_HDR_LLO                  (1 << 24)
  60#define SAHARA_HDR_CHA_SKHA             (1 << 28)
  61#define SAHARA_HDR_CHA_MDHA             (2 << 28)
  62#define SAHARA_HDR_PARITY_BIT           (1 << 31)
  63
  64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
  65#define SAHARA_HDR_MDHA_SET_MODE_HASH   0x208D0000
  66#define SAHARA_HDR_MDHA_HASH            0xA0850000
  67#define SAHARA_HDR_MDHA_STORE_DIGEST    0x20820000
  68#define SAHARA_HDR_MDHA_ALG_SHA1        0
  69#define SAHARA_HDR_MDHA_ALG_MD5         1
  70#define SAHARA_HDR_MDHA_ALG_SHA256      2
  71#define SAHARA_HDR_MDHA_ALG_SHA224      3
  72#define SAHARA_HDR_MDHA_PDATA           (1 << 2)
  73#define SAHARA_HDR_MDHA_HMAC            (1 << 3)
  74#define SAHARA_HDR_MDHA_INIT            (1 << 5)
  75#define SAHARA_HDR_MDHA_IPAD            (1 << 6)
  76#define SAHARA_HDR_MDHA_OPAD            (1 << 7)
  77#define SAHARA_HDR_MDHA_SWAP            (1 << 8)
  78#define SAHARA_HDR_MDHA_MAC_FULL        (1 << 9)
  79#define SAHARA_HDR_MDHA_SSL             (1 << 10)
  80
  81/* SAHARA can only process one request at a time */
  82#define SAHARA_QUEUE_LENGTH     1
  83
  84#define SAHARA_REG_VERSION      0x00
  85#define SAHARA_REG_DAR          0x04
  86#define SAHARA_REG_CONTROL      0x08
  87#define         SAHARA_CONTROL_SET_THROTTLE(x)  (((x) & 0xff) << 24)
  88#define         SAHARA_CONTROL_SET_MAXBURST(x)  (((x) & 0xff) << 16)
  89#define         SAHARA_CONTROL_RNG_AUTORSD      (1 << 7)
  90#define         SAHARA_CONTROL_ENABLE_INT       (1 << 4)
  91#define SAHARA_REG_CMD          0x0C
  92#define         SAHARA_CMD_RESET                (1 << 0)
  93#define         SAHARA_CMD_CLEAR_INT            (1 << 8)
  94#define         SAHARA_CMD_CLEAR_ERR            (1 << 9)
  95#define         SAHARA_CMD_SINGLE_STEP          (1 << 10)
  96#define         SAHARA_CMD_MODE_BATCH           (1 << 16)
  97#define         SAHARA_CMD_MODE_DEBUG           (1 << 18)
  98#define SAHARA_REG_STATUS       0x10
  99#define         SAHARA_STATUS_GET_STATE(x)      ((x) & 0x7)
 100#define                 SAHARA_STATE_IDLE       0
 101#define                 SAHARA_STATE_BUSY       1
 102#define                 SAHARA_STATE_ERR        2
 103#define                 SAHARA_STATE_FAULT      3
 104#define                 SAHARA_STATE_COMPLETE   4
 105#define                 SAHARA_STATE_COMP_FLAG  (1 << 2)
 106#define         SAHARA_STATUS_DAR_FULL          (1 << 3)
 107#define         SAHARA_STATUS_ERROR             (1 << 4)
 108#define         SAHARA_STATUS_SECURE            (1 << 5)
 109#define         SAHARA_STATUS_FAIL              (1 << 6)
 110#define         SAHARA_STATUS_INIT              (1 << 7)
 111#define         SAHARA_STATUS_RNG_RESEED        (1 << 8)
 112#define         SAHARA_STATUS_ACTIVE_RNG        (1 << 9)
 113#define         SAHARA_STATUS_ACTIVE_MDHA       (1 << 10)
 114#define         SAHARA_STATUS_ACTIVE_SKHA       (1 << 11)
 115#define         SAHARA_STATUS_MODE_BATCH        (1 << 16)
 116#define         SAHARA_STATUS_MODE_DEDICATED    (1 << 17)
 117#define         SAHARA_STATUS_MODE_DEBUG        (1 << 18)
 118#define         SAHARA_STATUS_GET_ISTATE(x)     (((x) >> 24) & 0xff)
 119#define SAHARA_REG_ERRSTATUS    0x14
 120#define         SAHARA_ERRSTATUS_GET_SOURCE(x)  ((x) & 0xf)
 121#define                 SAHARA_ERRSOURCE_CHA    14
 122#define                 SAHARA_ERRSOURCE_DMA    15
 123#define         SAHARA_ERRSTATUS_DMA_DIR        (1 << 8)
 124#define         SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 125#define         SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 126#define         SAHARA_ERRSTATUS_GET_CHASRC(x)  (((x) >> 16) & 0xfff)
 127#define         SAHARA_ERRSTATUS_GET_CHAERR(x)  (((x) >> 28) & 0x3)
 128#define SAHARA_REG_FADDR        0x18
 129#define SAHARA_REG_CDAR         0x1C
 130#define SAHARA_REG_IDAR         0x20
 131
 132struct sahara_hw_desc {
 133        u32             hdr;
 134        u32             len1;
 135        dma_addr_t      p1;
 136        u32             len2;
 137        dma_addr_t      p2;
 138        dma_addr_t      next;
 139};
 140
 141struct sahara_hw_link {
 142        u32             len;
 143        dma_addr_t      p;
 144        dma_addr_t      next;
 145};
 146
 147struct sahara_ctx {
 148        unsigned long flags;
 149
 150        /* AES-specific context */
 151        int keylen;
 152        u8 key[AES_KEYSIZE_128];
 153        struct crypto_ablkcipher *fallback;
 154
 155        /* SHA-specific context */
 156        struct crypto_shash *shash_fallback;
 157};
 158
 159struct sahara_aes_reqctx {
 160        unsigned long mode;
 161};
 162
 163/*
 164 * struct sahara_sha_reqctx - private data per request
 165 * @buf: holds data for requests smaller than block_size
 166 * @rembuf: used to prepare one block_size-aligned request
 167 * @context: hw-specific context for request. Digest is extracted from this
 168 * @mode: specifies what type of hw-descriptor needs to be built
 169 * @digest_size: length of digest for this request
 170 * @context_size: length of hw-context for this request.
 171 *                Always digest_size + 4
 172 * @buf_cnt: number of bytes saved in buf
 173 * @sg_in_idx: number of hw links
 174 * @in_sg: scatterlist for input data
 175 * @in_sg_chain: scatterlists for chained input data
 176 * @in_sg_chained: specifies if chained scatterlists are used or not
 177 * @total: total number of bytes for transfer
 178 * @last: is this the last block
 179 * @first: is this the first block
 180 * @active: inside a transfer
 181 */
 182struct sahara_sha_reqctx {
 183        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 184        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 185        u8                      context[SHA256_DIGEST_SIZE + 4];
 186        struct mutex            mutex;
 187        unsigned int            mode;
 188        unsigned int            digest_size;
 189        unsigned int            context_size;
 190        unsigned int            buf_cnt;
 191        unsigned int            sg_in_idx;
 192        struct scatterlist      *in_sg;
 193        struct scatterlist      in_sg_chain[2];
 194        bool                    in_sg_chained;
 195        size_t                  total;
 196        unsigned int            last;
 197        unsigned int            first;
 198        unsigned int            active;
 199};
 200
 201struct sahara_dev {
 202        struct device           *device;
 203        unsigned int            version;
 204        void __iomem            *regs_base;
 205        struct clk              *clk_ipg;
 206        struct clk              *clk_ahb;
 207        struct mutex            queue_mutex;
 208        struct task_struct      *kthread;
 209        struct completion       dma_completion;
 210
 211        struct sahara_ctx       *ctx;
 212        spinlock_t              lock;
 213        struct crypto_queue     queue;
 214        unsigned long           flags;
 215
 216        struct sahara_hw_desc   *hw_desc[SAHARA_MAX_HW_DESC];
 217        dma_addr_t              hw_phys_desc[SAHARA_MAX_HW_DESC];
 218
 219        u8                      *key_base;
 220        dma_addr_t              key_phys_base;
 221
 222        u8                      *iv_base;
 223        dma_addr_t              iv_phys_base;
 224
 225        u8                      *context_base;
 226        dma_addr_t              context_phys_base;
 227
 228        struct sahara_hw_link   *hw_link[SAHARA_MAX_HW_LINK];
 229        dma_addr_t              hw_phys_link[SAHARA_MAX_HW_LINK];
 230
 231        size_t                  total;
 232        struct scatterlist      *in_sg;
 233        unsigned int            nb_in_sg;
 234        struct scatterlist      *out_sg;
 235        unsigned int            nb_out_sg;
 236
 237        u32                     error;
 238};
 239
 240static struct sahara_dev *dev_ptr;
 241
 242static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 243{
 244        writel(data, dev->regs_base + reg);
 245}
 246
 247static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 248{
 249        return readl(dev->regs_base + reg);
 250}
 251
 252static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 253{
 254        u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 255                        SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 256                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 257
 258        if (dev->flags & FLAGS_CBC) {
 259                hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 260                hdr ^= SAHARA_HDR_PARITY_BIT;
 261        }
 262
 263        if (dev->flags & FLAGS_ENCRYPT) {
 264                hdr |= SAHARA_HDR_SKHA_OP_ENC;
 265                hdr ^= SAHARA_HDR_PARITY_BIT;
 266        }
 267
 268        return hdr;
 269}
 270
 271static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 272{
 273        return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 274                        SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 275}
 276
 277static int sahara_sg_length(struct scatterlist *sg,
 278                            unsigned int total)
 279{
 280        int sg_nb;
 281        unsigned int len;
 282        struct scatterlist *sg_list;
 283
 284        sg_nb = 0;
 285        sg_list = sg;
 286
 287        while (total) {
 288                len = min(sg_list->length, total);
 289
 290                sg_nb++;
 291                total -= len;
 292
 293                sg_list = sg_next(sg_list);
 294                if (!sg_list)
 295                        total = 0;
 296        }
 297
 298        return sg_nb;
 299}
 300
 301static char *sahara_err_src[16] = {
 302        "No error",
 303        "Header error",
 304        "Descriptor length error",
 305        "Descriptor length or pointer error",
 306        "Link length error",
 307        "Link pointer error",
 308        "Input buffer error",
 309        "Output buffer error",
 310        "Output buffer starvation",
 311        "Internal state fault",
 312        "General descriptor problem",
 313        "Reserved",
 314        "Descriptor address error",
 315        "Link address error",
 316        "CHA error",
 317        "DMA error"
 318};
 319
 320static char *sahara_err_dmasize[4] = {
 321        "Byte transfer",
 322        "Half-word transfer",
 323        "Word transfer",
 324        "Reserved"
 325};
 326
 327static char *sahara_err_dmasrc[8] = {
 328        "No error",
 329        "AHB bus error",
 330        "Internal IP bus error",
 331        "Parity error",
 332        "DMA crosses 256 byte boundary",
 333        "DMA is busy",
 334        "Reserved",
 335        "DMA HW error"
 336};
 337
 338static char *sahara_cha_errsrc[12] = {
 339        "Input buffer non-empty",
 340        "Illegal address",
 341        "Illegal mode",
 342        "Illegal data size",
 343        "Illegal key size",
 344        "Write during processing",
 345        "CTX read during processing",
 346        "HW error",
 347        "Input buffer disabled/underflow",
 348        "Output buffer disabled/overflow",
 349        "DES key parity error",
 350        "Reserved"
 351};
 352
 353static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 354
 355static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 356{
 357        u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 358        u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 359
 360        dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 361
 362        dev_err(dev->device, "  - %s.\n", sahara_err_src[source]);
 363
 364        if (source == SAHARA_ERRSOURCE_DMA) {
 365                if (error & SAHARA_ERRSTATUS_DMA_DIR)
 366                        dev_err(dev->device, "          * DMA read.\n");
 367                else
 368                        dev_err(dev->device, "          * DMA write.\n");
 369
 370                dev_err(dev->device, "          * %s.\n",
 371                       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 372                dev_err(dev->device, "          * %s.\n",
 373                       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 374        } else if (source == SAHARA_ERRSOURCE_CHA) {
 375                dev_err(dev->device, "          * %s.\n",
 376                        sahara_cha_errsrc[chasrc]);
 377                dev_err(dev->device, "          * %s.\n",
 378                       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 379        }
 380        dev_err(dev->device, "\n");
 381}
 382
 383static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 384
 385static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 386{
 387        u8 state;
 388
 389        if (!IS_ENABLED(DEBUG))
 390                return;
 391
 392        state = SAHARA_STATUS_GET_STATE(status);
 393
 394        dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 395                __func__, status);
 396
 397        dev_dbg(dev->device, "  - State = %d:\n", state);
 398        if (state & SAHARA_STATE_COMP_FLAG)
 399                dev_dbg(dev->device, "          * Descriptor completed. IRQ pending.\n");
 400
 401        dev_dbg(dev->device, "          * %s.\n",
 402               sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 403
 404        if (status & SAHARA_STATUS_DAR_FULL)
 405                dev_dbg(dev->device, "  - DAR Full.\n");
 406        if (status & SAHARA_STATUS_ERROR)
 407                dev_dbg(dev->device, "  - Error.\n");
 408        if (status & SAHARA_STATUS_SECURE)
 409                dev_dbg(dev->device, "  - Secure.\n");
 410        if (status & SAHARA_STATUS_FAIL)
 411                dev_dbg(dev->device, "  - Fail.\n");
 412        if (status & SAHARA_STATUS_RNG_RESEED)
 413                dev_dbg(dev->device, "  - RNG Reseed Request.\n");
 414        if (status & SAHARA_STATUS_ACTIVE_RNG)
 415                dev_dbg(dev->device, "  - RNG Active.\n");
 416        if (status & SAHARA_STATUS_ACTIVE_MDHA)
 417                dev_dbg(dev->device, "  - MDHA Active.\n");
 418        if (status & SAHARA_STATUS_ACTIVE_SKHA)
 419                dev_dbg(dev->device, "  - SKHA Active.\n");
 420
 421        if (status & SAHARA_STATUS_MODE_BATCH)
 422                dev_dbg(dev->device, "  - Batch Mode.\n");
 423        else if (status & SAHARA_STATUS_MODE_DEDICATED)
 424                dev_dbg(dev->device, "  - Decidated Mode.\n");
 425        else if (status & SAHARA_STATUS_MODE_DEBUG)
 426                dev_dbg(dev->device, "  - Debug Mode.\n");
 427
 428        dev_dbg(dev->device, "  - Internal state = 0x%02x\n",
 429               SAHARA_STATUS_GET_ISTATE(status));
 430
 431        dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 432                sahara_read(dev, SAHARA_REG_CDAR));
 433        dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 434                sahara_read(dev, SAHARA_REG_IDAR));
 435}
 436
 437static void sahara_dump_descriptors(struct sahara_dev *dev)
 438{
 439        int i;
 440
 441        if (!IS_ENABLED(DEBUG))
 442                return;
 443
 444        for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 445                dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
 446                        i, dev->hw_phys_desc[i]);
 447                dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 448                dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 449                dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 450                dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 451                dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 452                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 453                        dev->hw_desc[i]->next);
 454        }
 455        dev_dbg(dev->device, "\n");
 456}
 457
 458static void sahara_dump_links(struct sahara_dev *dev)
 459{
 460        int i;
 461
 462        if (!IS_ENABLED(DEBUG))
 463                return;
 464
 465        for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 466                dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
 467                        i, dev->hw_phys_link[i]);
 468                dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 469                dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 470                dev_dbg(dev->device, "\tnext = 0x%08x\n",
 471                        dev->hw_link[i]->next);
 472        }
 473        dev_dbg(dev->device, "\n");
 474}
 475
 476static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 477{
 478        struct sahara_ctx *ctx = dev->ctx;
 479        struct scatterlist *sg;
 480        int ret;
 481        int i, j;
 482
 483        /* Copy new key if necessary */
 484        if (ctx->flags & FLAGS_NEW_KEY) {
 485                memcpy(dev->key_base, ctx->key, ctx->keylen);
 486                ctx->flags &= ~FLAGS_NEW_KEY;
 487
 488                if (dev->flags & FLAGS_CBC) {
 489                        dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
 490                        dev->hw_desc[0]->p1 = dev->iv_phys_base;
 491                } else {
 492                        dev->hw_desc[0]->len1 = 0;
 493                        dev->hw_desc[0]->p1 = 0;
 494                }
 495                dev->hw_desc[0]->len2 = ctx->keylen;
 496                dev->hw_desc[0]->p2 = dev->key_phys_base;
 497                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
 498        }
 499        dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
 500
 501        dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
 502        dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
 503        if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 504                dev_err(dev->device, "not enough hw links (%d)\n",
 505                        dev->nb_in_sg + dev->nb_out_sg);
 506                return -EINVAL;
 507        }
 508
 509        ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 510                         DMA_TO_DEVICE);
 511        if (ret != dev->nb_in_sg) {
 512                dev_err(dev->device, "couldn't map in sg\n");
 513                goto unmap_in;
 514        }
 515        ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 516                         DMA_FROM_DEVICE);
 517        if (ret != dev->nb_out_sg) {
 518                dev_err(dev->device, "couldn't map out sg\n");
 519                goto unmap_out;
 520        }
 521
 522        /* Create input links */
 523        dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
 524        sg = dev->in_sg;
 525        for (i = 0; i < dev->nb_in_sg; i++) {
 526                dev->hw_link[i]->len = sg->length;
 527                dev->hw_link[i]->p = sg->dma_address;
 528                if (i == (dev->nb_in_sg - 1)) {
 529                        dev->hw_link[i]->next = 0;
 530                } else {
 531                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 532                        sg = sg_next(sg);
 533                }
 534        }
 535
 536        /* Create output links */
 537        dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
 538        sg = dev->out_sg;
 539        for (j = i; j < dev->nb_out_sg + i; j++) {
 540                dev->hw_link[j]->len = sg->length;
 541                dev->hw_link[j]->p = sg->dma_address;
 542                if (j == (dev->nb_out_sg + i - 1)) {
 543                        dev->hw_link[j]->next = 0;
 544                } else {
 545                        dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 546                        sg = sg_next(sg);
 547                }
 548        }
 549
 550        /* Fill remaining fields of hw_desc[1] */
 551        dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
 552        dev->hw_desc[1]->len1 = dev->total;
 553        dev->hw_desc[1]->len2 = dev->total;
 554        dev->hw_desc[1]->next = 0;
 555
 556        sahara_dump_descriptors(dev);
 557        sahara_dump_links(dev);
 558
 559        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 560
 561        return 0;
 562
 563unmap_out:
 564        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 565                DMA_TO_DEVICE);
 566unmap_in:
 567        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 568                DMA_FROM_DEVICE);
 569
 570        return -EINVAL;
 571}
 572
 573static int sahara_aes_process(struct ablkcipher_request *req)
 574{
 575        struct sahara_dev *dev = dev_ptr;
 576        struct sahara_ctx *ctx;
 577        struct sahara_aes_reqctx *rctx;
 578        int ret;
 579
 580        /* Request is ready to be dispatched by the device */
 581        dev_dbg(dev->device,
 582                "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 583                req->nbytes, req->src, req->dst);
 584
 585        /* assign new request to device */
 586        dev->total = req->nbytes;
 587        dev->in_sg = req->src;
 588        dev->out_sg = req->dst;
 589
 590        rctx = ablkcipher_request_ctx(req);
 591        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 592        rctx->mode &= FLAGS_MODE_MASK;
 593        dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 594
 595        if ((dev->flags & FLAGS_CBC) && req->info)
 596                memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 597
 598        /* assign new context to device */
 599        dev->ctx = ctx;
 600
 601        reinit_completion(&dev->dma_completion);
 602
 603        ret = sahara_hw_descriptor_create(dev);
 604
 605        ret = wait_for_completion_timeout(&dev->dma_completion,
 606                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 607        if (!ret) {
 608                dev_err(dev->device, "AES timeout\n");
 609                return -ETIMEDOUT;
 610        }
 611
 612        dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 613                DMA_TO_DEVICE);
 614        dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 615                DMA_FROM_DEVICE);
 616
 617        return 0;
 618}
 619
 620static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 621                             unsigned int keylen)
 622{
 623        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 624        int ret;
 625
 626        ctx->keylen = keylen;
 627
 628        /* SAHARA only supports 128bit keys */
 629        if (keylen == AES_KEYSIZE_128) {
 630                memcpy(ctx->key, key, keylen);
 631                ctx->flags |= FLAGS_NEW_KEY;
 632                return 0;
 633        }
 634
 635        if (keylen != AES_KEYSIZE_128 &&
 636            keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 637                return -EINVAL;
 638
 639        /*
 640         * The requested key size is not supported by HW, do a fallback.
 641         */
 642        ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 643        ctx->fallback->base.crt_flags |=
 644                (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 645
 646        ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
 647        if (ret) {
 648                struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
 649
 650                tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 651                tfm_aux->crt_flags |=
 652                        (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
 653        }
 654        return ret;
 655}
 656
 657static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 658{
 659        struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 660        struct sahara_dev *dev = dev_ptr;
 661        int err = 0;
 662
 663        dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 664                req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 665
 666        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 667                dev_err(dev->device,
 668                        "request size is not exact amount of AES blocks\n");
 669                return -EINVAL;
 670        }
 671
 672        rctx->mode = mode;
 673
 674        mutex_lock(&dev->queue_mutex);
 675        err = ablkcipher_enqueue_request(&dev->queue, req);
 676        mutex_unlock(&dev->queue_mutex);
 677
 678        wake_up_process(dev->kthread);
 679
 680        return err;
 681}
 682
 683static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 684{
 685        struct crypto_tfm *tfm =
 686                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 687        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 688                crypto_ablkcipher_reqtfm(req));
 689        int err;
 690
 691        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 692                ablkcipher_request_set_tfm(req, ctx->fallback);
 693                err = crypto_ablkcipher_encrypt(req);
 694                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 695                return err;
 696        }
 697
 698        return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 699}
 700
 701static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 702{
 703        struct crypto_tfm *tfm =
 704                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 705        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 706                crypto_ablkcipher_reqtfm(req));
 707        int err;
 708
 709        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 710                ablkcipher_request_set_tfm(req, ctx->fallback);
 711                err = crypto_ablkcipher_decrypt(req);
 712                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 713                return err;
 714        }
 715
 716        return sahara_aes_crypt(req, 0);
 717}
 718
 719static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 720{
 721        struct crypto_tfm *tfm =
 722                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 723        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 724                crypto_ablkcipher_reqtfm(req));
 725        int err;
 726
 727        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 728                ablkcipher_request_set_tfm(req, ctx->fallback);
 729                err = crypto_ablkcipher_encrypt(req);
 730                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 731                return err;
 732        }
 733
 734        return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 735}
 736
 737static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 738{
 739        struct crypto_tfm *tfm =
 740                crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 741        struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 742                crypto_ablkcipher_reqtfm(req));
 743        int err;
 744
 745        if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 746                ablkcipher_request_set_tfm(req, ctx->fallback);
 747                err = crypto_ablkcipher_decrypt(req);
 748                ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 749                return err;
 750        }
 751
 752        return sahara_aes_crypt(req, FLAGS_CBC);
 753}
 754
 755static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 756{
 757        const char *name = crypto_tfm_alg_name(tfm);
 758        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 759
 760        ctx->fallback = crypto_alloc_ablkcipher(name, 0,
 761                                CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 762        if (IS_ERR(ctx->fallback)) {
 763                pr_err("Error allocating fallback algo %s\n", name);
 764                return PTR_ERR(ctx->fallback);
 765        }
 766
 767        tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 768
 769        return 0;
 770}
 771
 772static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 773{
 774        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 775
 776        if (ctx->fallback)
 777                crypto_free_ablkcipher(ctx->fallback);
 778        ctx->fallback = NULL;
 779}
 780
 781static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 782                              struct sahara_sha_reqctx *rctx)
 783{
 784        u32 hdr = 0;
 785
 786        hdr = rctx->mode;
 787
 788        if (rctx->first) {
 789                hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 790                hdr |= SAHARA_HDR_MDHA_INIT;
 791        } else {
 792                hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 793        }
 794
 795        if (rctx->last)
 796                hdr |= SAHARA_HDR_MDHA_PDATA;
 797
 798        if (hweight_long(hdr) % 2 == 0)
 799                hdr |= SAHARA_HDR_PARITY_BIT;
 800
 801        return hdr;
 802}
 803
 804static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 805                                       struct sahara_sha_reqctx *rctx,
 806                                       int start)
 807{
 808        struct scatterlist *sg;
 809        unsigned int i;
 810        int ret;
 811
 812        dev->in_sg = rctx->in_sg;
 813
 814        dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
 815        if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 816                dev_err(dev->device, "not enough hw links (%d)\n",
 817                        dev->nb_in_sg + dev->nb_out_sg);
 818                return -EINVAL;
 819        }
 820
 821        if (rctx->in_sg_chained) {
 822                i = start;
 823                sg = dev->in_sg;
 824                while (sg) {
 825                        ret = dma_map_sg(dev->device, sg, 1,
 826                                         DMA_TO_DEVICE);
 827                        if (!ret)
 828                                return -EFAULT;
 829
 830                        dev->hw_link[i]->len = sg->length;
 831                        dev->hw_link[i]->p = sg->dma_address;
 832                        dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 833                        sg = sg_next(sg);
 834                        i += 1;
 835                }
 836                dev->hw_link[i-1]->next = 0;
 837        } else {
 838                sg = dev->in_sg;
 839                ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 840                                 DMA_TO_DEVICE);
 841                if (!ret)
 842                        return -EFAULT;
 843
 844                for (i = start; i < dev->nb_in_sg + start; i++) {
 845                        dev->hw_link[i]->len = sg->length;
 846                        dev->hw_link[i]->p = sg->dma_address;
 847                        if (i == (dev->nb_in_sg + start - 1)) {
 848                                dev->hw_link[i]->next = 0;
 849                        } else {
 850                                dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 851                                sg = sg_next(sg);
 852                        }
 853                }
 854        }
 855
 856        return i;
 857}
 858
 859static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 860                                                struct sahara_sha_reqctx *rctx,
 861                                                struct ahash_request *req,
 862                                                int index)
 863{
 864        unsigned result_len;
 865        int i = index;
 866
 867        if (rctx->first)
 868                /* Create initial descriptor: #8*/
 869                dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 870        else
 871                /* Create hash descriptor: #10. Must follow #6. */
 872                dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 873
 874        dev->hw_desc[index]->len1 = rctx->total;
 875        if (dev->hw_desc[index]->len1 == 0) {
 876                /* if len1 is 0, p1 must be 0, too */
 877                dev->hw_desc[index]->p1 = 0;
 878                rctx->sg_in_idx = 0;
 879        } else {
 880                /* Create input links */
 881                dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 882                i = sahara_sha_hw_links_create(dev, rctx, index);
 883
 884                rctx->sg_in_idx = index;
 885                if (i < 0)
 886                        return i;
 887        }
 888
 889        dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 890
 891        /* Save the context for the next operation */
 892        result_len = rctx->context_size;
 893        dev->hw_link[i]->p = dev->context_phys_base;
 894
 895        dev->hw_link[i]->len = result_len;
 896        dev->hw_desc[index]->len2 = result_len;
 897
 898        dev->hw_link[i]->next = 0;
 899
 900        return 0;
 901}
 902
 903/*
 904 * Load descriptor aka #6
 905 *
 906 * To load a previously saved context back to the MDHA unit
 907 *
 908 * p1: Saved Context
 909 * p2: NULL
 910 *
 911 */
 912static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 913                                                struct sahara_sha_reqctx *rctx,
 914                                                struct ahash_request *req,
 915                                                int index)
 916{
 917        dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 918
 919        dev->hw_desc[index]->len1 = rctx->context_size;
 920        dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 921        dev->hw_desc[index]->len2 = 0;
 922        dev->hw_desc[index]->p2 = 0;
 923
 924        dev->hw_link[index]->len = rctx->context_size;
 925        dev->hw_link[index]->p = dev->context_phys_base;
 926        dev->hw_link[index]->next = 0;
 927
 928        return 0;
 929}
 930
 931static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 932{
 933        if (!sg || !sg->length)
 934                return nbytes;
 935
 936        while (nbytes && sg) {
 937                if (nbytes <= sg->length) {
 938                        sg->length = nbytes;
 939                        sg_mark_end(sg);
 940                        break;
 941                }
 942                nbytes -= sg->length;
 943                sg = scatterwalk_sg_next(sg);
 944        }
 945
 946        return nbytes;
 947}
 948
 949static int sahara_sha_prepare_request(struct ahash_request *req)
 950{
 951        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 952        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 953        unsigned int hash_later;
 954        unsigned int block_size;
 955        unsigned int len;
 956
 957        block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 958
 959        /* append bytes from previous operation */
 960        len = rctx->buf_cnt + req->nbytes;
 961
 962        /* only the last transfer can be padded in hardware */
 963        if (!rctx->last && (len < block_size)) {
 964                /* to few data, save for next operation */
 965                scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 966                                         0, req->nbytes, 0);
 967                rctx->buf_cnt += req->nbytes;
 968
 969                return 0;
 970        }
 971
 972        /* add data from previous operation first */
 973        if (rctx->buf_cnt)
 974                memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 975
 976        /* data must always be a multiple of block_size */
 977        hash_later = rctx->last ? 0 : len & (block_size - 1);
 978        if (hash_later) {
 979                unsigned int offset = req->nbytes - hash_later;
 980                /* Save remaining bytes for later use */
 981                scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 982                                        hash_later, 0);
 983        }
 984
 985        /* nbytes should now be multiple of blocksize */
 986        req->nbytes = req->nbytes - hash_later;
 987
 988        sahara_walk_and_recalc(req->src, req->nbytes);
 989
 990        /* have data from previous operation and current */
 991        if (rctx->buf_cnt && req->nbytes) {
 992                sg_init_table(rctx->in_sg_chain, 2);
 993                sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 994
 995                scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
 996
 997                rctx->total = req->nbytes + rctx->buf_cnt;
 998                rctx->in_sg = rctx->in_sg_chain;
 999
1000                rctx->in_sg_chained = true;
1001                req->src = rctx->in_sg_chain;
1002        /* only data from previous operation */
1003        } else if (rctx->buf_cnt) {
1004                if (req->src)
1005                        rctx->in_sg = req->src;
1006                else
1007                        rctx->in_sg = rctx->in_sg_chain;
1008                /* buf was copied into rembuf above */
1009                sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1010                rctx->total = rctx->buf_cnt;
1011                rctx->in_sg_chained = false;
1012        /* no data from previous operation */
1013        } else {
1014                rctx->in_sg = req->src;
1015                rctx->total = req->nbytes;
1016                req->src = rctx->in_sg;
1017                rctx->in_sg_chained = false;
1018        }
1019
1020        /* on next call, we only have the remaining data in the buffer */
1021        rctx->buf_cnt = hash_later;
1022
1023        return -EINPROGRESS;
1024}
1025
1026static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1027                                struct sahara_sha_reqctx *rctx)
1028{
1029        struct scatterlist *sg;
1030
1031        if (rctx->in_sg_chained) {
1032                sg = dev->in_sg;
1033                while (sg) {
1034                        dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1035                        sg = sg_next(sg);
1036                }
1037        } else {
1038                dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1039                        DMA_TO_DEVICE);
1040        }
1041}
1042
1043static int sahara_sha_process(struct ahash_request *req)
1044{
1045        struct sahara_dev *dev = dev_ptr;
1046        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1047        int ret = -EINPROGRESS;
1048
1049        ret = sahara_sha_prepare_request(req);
1050        if (!ret)
1051                return ret;
1052
1053        if (rctx->first) {
1054                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1055                dev->hw_desc[0]->next = 0;
1056                rctx->first = 0;
1057        } else {
1058                memcpy(dev->context_base, rctx->context, rctx->context_size);
1059
1060                sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1061                dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1062                sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1063                dev->hw_desc[1]->next = 0;
1064        }
1065
1066        sahara_dump_descriptors(dev);
1067        sahara_dump_links(dev);
1068
1069        reinit_completion(&dev->dma_completion);
1070
1071        sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1072
1073        ret = wait_for_completion_timeout(&dev->dma_completion,
1074                                msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1075        if (!ret) {
1076                dev_err(dev->device, "SHA timeout\n");
1077                return -ETIMEDOUT;
1078        }
1079
1080        if (rctx->sg_in_idx)
1081                sahara_sha_unmap_sg(dev, rctx);
1082
1083        memcpy(rctx->context, dev->context_base, rctx->context_size);
1084
1085        if (req->result)
1086                memcpy(req->result, rctx->context, rctx->digest_size);
1087
1088        return 0;
1089}
1090
1091static int sahara_queue_manage(void *data)
1092{
1093        struct sahara_dev *dev = (struct sahara_dev *)data;
1094        struct crypto_async_request *async_req;
1095        int ret = 0;
1096
1097        do {
1098                __set_current_state(TASK_INTERRUPTIBLE);
1099
1100                mutex_lock(&dev->queue_mutex);
1101                async_req = crypto_dequeue_request(&dev->queue);
1102                mutex_unlock(&dev->queue_mutex);
1103
1104                if (async_req) {
1105                        if (crypto_tfm_alg_type(async_req->tfm) ==
1106                            CRYPTO_ALG_TYPE_AHASH) {
1107                                struct ahash_request *req =
1108                                        ahash_request_cast(async_req);
1109
1110                                ret = sahara_sha_process(req);
1111                        } else {
1112                                struct ablkcipher_request *req =
1113                                        ablkcipher_request_cast(async_req);
1114
1115                                ret = sahara_aes_process(req);
1116                        }
1117
1118                        async_req->complete(async_req, ret);
1119
1120                        continue;
1121                }
1122
1123                schedule();
1124        } while (!kthread_should_stop());
1125
1126        return 0;
1127}
1128
1129static int sahara_sha_enqueue(struct ahash_request *req, int last)
1130{
1131        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1132        struct sahara_dev *dev = dev_ptr;
1133        int ret;
1134
1135        if (!req->nbytes && !last)
1136                return 0;
1137
1138        mutex_lock(&rctx->mutex);
1139        rctx->last = last;
1140
1141        if (!rctx->active) {
1142                rctx->active = 1;
1143                rctx->first = 1;
1144        }
1145
1146        mutex_lock(&dev->queue_mutex);
1147        ret = crypto_enqueue_request(&dev->queue, &req->base);
1148        mutex_unlock(&dev->queue_mutex);
1149
1150        wake_up_process(dev->kthread);
1151        mutex_unlock(&rctx->mutex);
1152
1153        return ret;
1154}
1155
1156static int sahara_sha_init(struct ahash_request *req)
1157{
1158        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1159        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1160
1161        memset(rctx, 0, sizeof(*rctx));
1162
1163        switch (crypto_ahash_digestsize(tfm)) {
1164        case SHA1_DIGEST_SIZE:
1165                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1166                rctx->digest_size = SHA1_DIGEST_SIZE;
1167                break;
1168        case SHA256_DIGEST_SIZE:
1169                rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1170                rctx->digest_size = SHA256_DIGEST_SIZE;
1171                break;
1172        default:
1173                return -EINVAL;
1174        }
1175
1176        rctx->context_size = rctx->digest_size + 4;
1177        rctx->active = 0;
1178
1179        mutex_init(&rctx->mutex);
1180
1181        return 0;
1182}
1183
1184static int sahara_sha_update(struct ahash_request *req)
1185{
1186        return sahara_sha_enqueue(req, 0);
1187}
1188
1189static int sahara_sha_final(struct ahash_request *req)
1190{
1191        req->nbytes = 0;
1192        return sahara_sha_enqueue(req, 1);
1193}
1194
1195static int sahara_sha_finup(struct ahash_request *req)
1196{
1197        return sahara_sha_enqueue(req, 1);
1198}
1199
1200static int sahara_sha_digest(struct ahash_request *req)
1201{
1202        sahara_sha_init(req);
1203
1204        return sahara_sha_finup(req);
1205}
1206
1207static int sahara_sha_export(struct ahash_request *req, void *out)
1208{
1209        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1210        struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1211        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1212
1213        memcpy(out, ctx, sizeof(struct sahara_ctx));
1214        memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1215               sizeof(struct sahara_sha_reqctx));
1216
1217        return 0;
1218}
1219
1220static int sahara_sha_import(struct ahash_request *req, const void *in)
1221{
1222        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1223        struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1224        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1225
1226        memcpy(ctx, in, sizeof(struct sahara_ctx));
1227        memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1228               sizeof(struct sahara_sha_reqctx));
1229
1230        return 0;
1231}
1232
1233static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1234{
1235        const char *name = crypto_tfm_alg_name(tfm);
1236        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1237
1238        ctx->shash_fallback = crypto_alloc_shash(name, 0,
1239                                        CRYPTO_ALG_NEED_FALLBACK);
1240        if (IS_ERR(ctx->shash_fallback)) {
1241                pr_err("Error allocating fallback algo %s\n", name);
1242                return PTR_ERR(ctx->shash_fallback);
1243        }
1244        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1245                                 sizeof(struct sahara_sha_reqctx) +
1246                                 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1247
1248        return 0;
1249}
1250
1251static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1252{
1253        struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1254
1255        crypto_free_shash(ctx->shash_fallback);
1256        ctx->shash_fallback = NULL;
1257}
1258
1259static struct crypto_alg aes_algs[] = {
1260{
1261        .cra_name               = "ecb(aes)",
1262        .cra_driver_name        = "sahara-ecb-aes",
1263        .cra_priority           = 300,
1264        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1265                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1266        .cra_blocksize          = AES_BLOCK_SIZE,
1267        .cra_ctxsize            = sizeof(struct sahara_ctx),
1268        .cra_alignmask          = 0x0,
1269        .cra_type               = &crypto_ablkcipher_type,
1270        .cra_module             = THIS_MODULE,
1271        .cra_init               = sahara_aes_cra_init,
1272        .cra_exit               = sahara_aes_cra_exit,
1273        .cra_u.ablkcipher = {
1274                .min_keysize    = AES_MIN_KEY_SIZE ,
1275                .max_keysize    = AES_MAX_KEY_SIZE,
1276                .setkey         = sahara_aes_setkey,
1277                .encrypt        = sahara_aes_ecb_encrypt,
1278                .decrypt        = sahara_aes_ecb_decrypt,
1279        }
1280}, {
1281        .cra_name               = "cbc(aes)",
1282        .cra_driver_name        = "sahara-cbc-aes",
1283        .cra_priority           = 300,
1284        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER |
1285                        CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1286        .cra_blocksize          = AES_BLOCK_SIZE,
1287        .cra_ctxsize            = sizeof(struct sahara_ctx),
1288        .cra_alignmask          = 0x0,
1289        .cra_type               = &crypto_ablkcipher_type,
1290        .cra_module             = THIS_MODULE,
1291        .cra_init               = sahara_aes_cra_init,
1292        .cra_exit               = sahara_aes_cra_exit,
1293        .cra_u.ablkcipher = {
1294                .min_keysize    = AES_MIN_KEY_SIZE ,
1295                .max_keysize    = AES_MAX_KEY_SIZE,
1296                .ivsize         = AES_BLOCK_SIZE,
1297                .setkey         = sahara_aes_setkey,
1298                .encrypt        = sahara_aes_cbc_encrypt,
1299                .decrypt        = sahara_aes_cbc_decrypt,
1300        }
1301}
1302};
1303
1304static struct ahash_alg sha_v3_algs[] = {
1305{
1306        .init           = sahara_sha_init,
1307        .update         = sahara_sha_update,
1308        .final          = sahara_sha_final,
1309        .finup          = sahara_sha_finup,
1310        .digest         = sahara_sha_digest,
1311        .export         = sahara_sha_export,
1312        .import         = sahara_sha_import,
1313        .halg.digestsize        = SHA1_DIGEST_SIZE,
1314        .halg.base      = {
1315                .cra_name               = "sha1",
1316                .cra_driver_name        = "sahara-sha1",
1317                .cra_priority           = 300,
1318                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1319                                                CRYPTO_ALG_ASYNC |
1320                                                CRYPTO_ALG_NEED_FALLBACK,
1321                .cra_blocksize          = SHA1_BLOCK_SIZE,
1322                .cra_ctxsize            = sizeof(struct sahara_ctx),
1323                .cra_alignmask          = 0,
1324                .cra_module             = THIS_MODULE,
1325                .cra_init               = sahara_sha_cra_init,
1326                .cra_exit               = sahara_sha_cra_exit,
1327        }
1328},
1329};
1330
1331static struct ahash_alg sha_v4_algs[] = {
1332{
1333        .init           = sahara_sha_init,
1334        .update         = sahara_sha_update,
1335        .final          = sahara_sha_final,
1336        .finup          = sahara_sha_finup,
1337        .digest         = sahara_sha_digest,
1338        .export         = sahara_sha_export,
1339        .import         = sahara_sha_import,
1340        .halg.digestsize        = SHA256_DIGEST_SIZE,
1341        .halg.base      = {
1342                .cra_name               = "sha256",
1343                .cra_driver_name        = "sahara-sha256",
1344                .cra_priority           = 300,
1345                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
1346                                                CRYPTO_ALG_ASYNC |
1347                                                CRYPTO_ALG_NEED_FALLBACK,
1348                .cra_blocksize          = SHA256_BLOCK_SIZE,
1349                .cra_ctxsize            = sizeof(struct sahara_ctx),
1350                .cra_alignmask          = 0,
1351                .cra_module             = THIS_MODULE,
1352                .cra_init               = sahara_sha_cra_init,
1353                .cra_exit               = sahara_sha_cra_exit,
1354        }
1355},
1356};
1357
1358static irqreturn_t sahara_irq_handler(int irq, void *data)
1359{
1360        struct sahara_dev *dev = (struct sahara_dev *)data;
1361        unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1362        unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1363
1364        sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1365                     SAHARA_REG_CMD);
1366
1367        sahara_decode_status(dev, stat);
1368
1369        if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1370                return IRQ_NONE;
1371        } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1372                dev->error = 0;
1373        } else {
1374                sahara_decode_error(dev, err);
1375                dev->error = -EINVAL;
1376        }
1377
1378        complete(&dev->dma_completion);
1379
1380        return IRQ_HANDLED;
1381}
1382
1383
1384static int sahara_register_algs(struct sahara_dev *dev)
1385{
1386        int err;
1387        unsigned int i, j, k, l;
1388
1389        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1390                INIT_LIST_HEAD(&aes_algs[i].cra_list);
1391                err = crypto_register_alg(&aes_algs[i]);
1392                if (err)
1393                        goto err_aes_algs;
1394        }
1395
1396        for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1397                err = crypto_register_ahash(&sha_v3_algs[k]);
1398                if (err)
1399                        goto err_sha_v3_algs;
1400        }
1401
1402        if (dev->version > SAHARA_VERSION_3)
1403                for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1404                        err = crypto_register_ahash(&sha_v4_algs[l]);
1405                        if (err)
1406                                goto err_sha_v4_algs;
1407                }
1408
1409        return 0;
1410
1411err_sha_v4_algs:
1412        for (j = 0; j < l; j++)
1413                crypto_unregister_ahash(&sha_v4_algs[j]);
1414
1415err_sha_v3_algs:
1416        for (j = 0; j < k; j++)
1417                crypto_unregister_ahash(&sha_v4_algs[j]);
1418
1419err_aes_algs:
1420        for (j = 0; j < i; j++)
1421                crypto_unregister_alg(&aes_algs[j]);
1422
1423        return err;
1424}
1425
1426static void sahara_unregister_algs(struct sahara_dev *dev)
1427{
1428        unsigned int i;
1429
1430        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1431                crypto_unregister_alg(&aes_algs[i]);
1432
1433        for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1434                crypto_unregister_ahash(&sha_v3_algs[i]);
1435
1436        if (dev->version > SAHARA_VERSION_3)
1437                for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1438                        crypto_unregister_ahash(&sha_v4_algs[i]);
1439}
1440
1441static struct platform_device_id sahara_platform_ids[] = {
1442        { .name = "sahara-imx27" },
1443        { /* sentinel */ }
1444};
1445MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1446
1447static struct of_device_id sahara_dt_ids[] = {
1448        { .compatible = "fsl,imx53-sahara" },
1449        { .compatible = "fsl,imx27-sahara" },
1450        { /* sentinel */ }
1451};
1452MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1453
1454static int sahara_probe(struct platform_device *pdev)
1455{
1456        struct sahara_dev *dev;
1457        struct resource *res;
1458        u32 version;
1459        int irq;
1460        int err;
1461        int i;
1462
1463        dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1464        if (dev == NULL) {
1465                dev_err(&pdev->dev, "unable to alloc data struct.\n");
1466                return -ENOMEM;
1467        }
1468
1469        dev->device = &pdev->dev;
1470        platform_set_drvdata(pdev, dev);
1471
1472        /* Get the base address */
1473        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1474        dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1475        if (IS_ERR(dev->regs_base))
1476                return PTR_ERR(dev->regs_base);
1477
1478        /* Get the IRQ */
1479        irq = platform_get_irq(pdev,  0);
1480        if (irq < 0) {
1481                dev_err(&pdev->dev, "failed to get irq resource\n");
1482                return irq;
1483        }
1484
1485        err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1486                               0, dev_name(&pdev->dev), dev);
1487        if (err) {
1488                dev_err(&pdev->dev, "failed to request irq\n");
1489                return err;
1490        }
1491
1492        /* clocks */
1493        dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1494        if (IS_ERR(dev->clk_ipg)) {
1495                dev_err(&pdev->dev, "Could not get ipg clock\n");
1496                return PTR_ERR(dev->clk_ipg);
1497        }
1498
1499        dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1500        if (IS_ERR(dev->clk_ahb)) {
1501                dev_err(&pdev->dev, "Could not get ahb clock\n");
1502                return PTR_ERR(dev->clk_ahb);
1503        }
1504
1505        /* Allocate HW descriptors */
1506        dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
1507                        SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1508                        &dev->hw_phys_desc[0], GFP_KERNEL);
1509        if (!dev->hw_desc[0]) {
1510                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1511                return -ENOMEM;
1512        }
1513        dev->hw_desc[1] = dev->hw_desc[0] + 1;
1514        dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1515                                sizeof(struct sahara_hw_desc);
1516
1517        /* Allocate space for iv and key */
1518        dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1519                                &dev->key_phys_base, GFP_KERNEL);
1520        if (!dev->key_base) {
1521                dev_err(&pdev->dev, "Could not allocate memory for key\n");
1522                err = -ENOMEM;
1523                goto err_key;
1524        }
1525        dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1526        dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1527
1528        /* Allocate space for context: largest digest + message length field */
1529        dev->context_base = dma_alloc_coherent(&pdev->dev,
1530                                        SHA256_DIGEST_SIZE + 4,
1531                                        &dev->context_phys_base, GFP_KERNEL);
1532        if (!dev->context_base) {
1533                dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1534                err = -ENOMEM;
1535                goto err_key;
1536        }
1537
1538        /* Allocate space for HW links */
1539        dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
1540                        SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1541                        &dev->hw_phys_link[0], GFP_KERNEL);
1542        if (!dev->hw_link[0]) {
1543                dev_err(&pdev->dev, "Could not allocate hw links\n");
1544                err = -ENOMEM;
1545                goto err_link;
1546        }
1547        for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1548                dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1549                                        sizeof(struct sahara_hw_link);
1550                dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1551        }
1552
1553        crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1554
1555        spin_lock_init(&dev->lock);
1556        mutex_init(&dev->queue_mutex);
1557
1558        dev_ptr = dev;
1559
1560        dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1561        if (IS_ERR(dev->kthread)) {
1562                err = PTR_ERR(dev->kthread);
1563                goto err_link;
1564        }
1565
1566        init_completion(&dev->dma_completion);
1567
1568        clk_prepare_enable(dev->clk_ipg);
1569        clk_prepare_enable(dev->clk_ahb);
1570
1571        version = sahara_read(dev, SAHARA_REG_VERSION);
1572        if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1573                if (version != SAHARA_VERSION_3)
1574                        err = -ENODEV;
1575        } else if (of_device_is_compatible(pdev->dev.of_node,
1576                        "fsl,imx53-sahara")) {
1577                if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1578                        err = -ENODEV;
1579                version = (version >> 8) & 0xff;
1580        }
1581        if (err == -ENODEV) {
1582                dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1583                                version);
1584                goto err_algs;
1585        }
1586
1587        dev->version = version;
1588
1589        sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1590                     SAHARA_REG_CMD);
1591        sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1592                        SAHARA_CONTROL_SET_MAXBURST(8) |
1593                        SAHARA_CONTROL_RNG_AUTORSD |
1594                        SAHARA_CONTROL_ENABLE_INT,
1595                        SAHARA_REG_CONTROL);
1596
1597        err = sahara_register_algs(dev);
1598        if (err)
1599                goto err_algs;
1600
1601        dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1602
1603        return 0;
1604
1605err_algs:
1606        dma_free_coherent(&pdev->dev,
1607                          SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1608                          dev->hw_link[0], dev->hw_phys_link[0]);
1609        clk_disable_unprepare(dev->clk_ipg);
1610        clk_disable_unprepare(dev->clk_ahb);
1611        kthread_stop(dev->kthread);
1612        dev_ptr = NULL;
1613err_link:
1614        dma_free_coherent(&pdev->dev,
1615                          2 * AES_KEYSIZE_128,
1616                          dev->key_base, dev->key_phys_base);
1617        dma_free_coherent(&pdev->dev,
1618                          SHA256_DIGEST_SIZE,
1619                          dev->context_base, dev->context_phys_base);
1620err_key:
1621        dma_free_coherent(&pdev->dev,
1622                          SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1623                          dev->hw_desc[0], dev->hw_phys_desc[0]);
1624
1625        return err;
1626}
1627
1628static int sahara_remove(struct platform_device *pdev)
1629{
1630        struct sahara_dev *dev = platform_get_drvdata(pdev);
1631
1632        dma_free_coherent(&pdev->dev,
1633                          SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1634                          dev->hw_link[0], dev->hw_phys_link[0]);
1635        dma_free_coherent(&pdev->dev,
1636                          2 * AES_KEYSIZE_128,
1637                          dev->key_base, dev->key_phys_base);
1638        dma_free_coherent(&pdev->dev,
1639                          SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1640                          dev->hw_desc[0], dev->hw_phys_desc[0]);
1641
1642        kthread_stop(dev->kthread);
1643
1644        sahara_unregister_algs(dev);
1645
1646        clk_disable_unprepare(dev->clk_ipg);
1647        clk_disable_unprepare(dev->clk_ahb);
1648
1649        dev_ptr = NULL;
1650
1651        return 0;
1652}
1653
1654static struct platform_driver sahara_driver = {
1655        .probe          = sahara_probe,
1656        .remove         = sahara_remove,
1657        .driver         = {
1658                .name   = SAHARA_NAME,
1659                .of_match_table = sahara_dt_ids,
1660        },
1661        .id_table = sahara_platform_ids,
1662};
1663
1664module_platform_driver(sahara_driver);
1665
1666MODULE_LICENSE("GPL");
1667MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1668MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1669MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
1670
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.