linux/drivers/crypto/ixp4xx_crypto.c
<<
>>
Prefs
   1/*
   2 * Intel IXP4xx NPE-C crypto driver
   3 *
   4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of version 2 of the GNU General Public License
   8 * as published by the Free Software Foundation.
   9 *
  10 */
  11
  12#include <linux/platform_device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/crypto.h>
  16#include <linux/kernel.h>
  17#include <linux/rtnetlink.h>
  18#include <linux/interrupt.h>
  19#include <linux/spinlock.h>
  20#include <linux/gfp.h>
  21#include <linux/module.h>
  22
  23#include <crypto/ctr.h>
  24#include <crypto/des.h>
  25#include <crypto/aes.h>
  26#include <crypto/sha.h>
  27#include <crypto/algapi.h>
  28#include <crypto/aead.h>
  29#include <crypto/authenc.h>
  30#include <crypto/scatterwalk.h>
  31
  32#include <mach/npe.h>
  33#include <mach/qmgr.h>
  34
  35#define MAX_KEYLEN 32
  36
  37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  38#define NPE_CTX_LEN 80
  39#define AES_BLOCK128 16
  40
  41#define NPE_OP_HASH_VERIFY   0x01
  42#define NPE_OP_CCM_ENABLE    0x04
  43#define NPE_OP_CRYPT_ENABLE  0x08
  44#define NPE_OP_HASH_ENABLE   0x10
  45#define NPE_OP_NOT_IN_PLACE  0x20
  46#define NPE_OP_HMAC_DISABLE  0x40
  47#define NPE_OP_CRYPT_ENCRYPT 0x80
  48
  49#define NPE_OP_CCM_GEN_MIC   0xcc
  50#define NPE_OP_HASH_GEN_ICV  0x50
  51#define NPE_OP_ENC_GEN_KEY   0xc9
  52
  53#define MOD_ECB     0x0000
  54#define MOD_CTR     0x1000
  55#define MOD_CBC_ENC 0x2000
  56#define MOD_CBC_DEC 0x3000
  57#define MOD_CCM_ENC 0x4000
  58#define MOD_CCM_DEC 0x5000
  59
  60#define KEYLEN_128  4
  61#define KEYLEN_192  6
  62#define KEYLEN_256  8
  63
  64#define CIPH_DECR   0x0000
  65#define CIPH_ENCR   0x0400
  66
  67#define MOD_DES     0x0000
  68#define MOD_TDEA2   0x0100
  69#define MOD_3DES   0x0200
  70#define MOD_AES     0x0800
  71#define MOD_AES128  (0x0800 | KEYLEN_128)
  72#define MOD_AES192  (0x0900 | KEYLEN_192)
  73#define MOD_AES256  (0x0a00 | KEYLEN_256)
  74
  75#define MAX_IVLEN   16
  76#define NPE_ID      2  /* NPE C */
  77#define NPE_QLEN    16
  78/* Space for registering when the first
  79 * NPE_QLEN crypt_ctl are busy */
  80#define NPE_QLEN_TOTAL 64
  81
  82#define SEND_QID    29
  83#define RECV_QID    30
  84
  85#define CTL_FLAG_UNUSED         0x0000
  86#define CTL_FLAG_USED           0x1000
  87#define CTL_FLAG_PERFORM_ABLK   0x0001
  88#define CTL_FLAG_GEN_ICV        0x0002
  89#define CTL_FLAG_GEN_REVAES     0x0004
  90#define CTL_FLAG_PERFORM_AEAD   0x0008
  91#define CTL_FLAG_MASK           0x000f
  92
  93#define HMAC_IPAD_VALUE   0x36
  94#define HMAC_OPAD_VALUE   0x5C
  95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  96
  97#define MD5_DIGEST_SIZE   16
  98
  99struct buffer_desc {
 100        u32 phys_next;
 101#ifdef __ARMEB__
 102        u16 buf_len;
 103        u16 pkt_len;
 104#else
 105        u16 pkt_len;
 106        u16 buf_len;
 107#endif
 108        u32 phys_addr;
 109        u32 __reserved[4];
 110        struct buffer_desc *next;
 111        enum dma_data_direction dir;
 112};
 113
 114struct crypt_ctl {
 115#ifdef __ARMEB__
 116        u8 mode;                /* NPE_OP_*  operation mode */
 117        u8 init_len;
 118        u16 reserved;
 119#else
 120        u16 reserved;
 121        u8 init_len;
 122        u8 mode;                /* NPE_OP_*  operation mode */
 123#endif
 124        u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
 125        u32 icv_rev_aes;        /* icv or rev aes */
 126        u32 src_buf;
 127        u32 dst_buf;
 128#ifdef __ARMEB__
 129        u16 auth_offs;          /* Authentication start offset */
 130        u16 auth_len;           /* Authentication data length */
 131        u16 crypt_offs;         /* Cryption start offset */
 132        u16 crypt_len;          /* Cryption data length */
 133#else
 134        u16 auth_len;           /* Authentication data length */
 135        u16 auth_offs;          /* Authentication start offset */
 136        u16 crypt_len;          /* Cryption data length */
 137        u16 crypt_offs;         /* Cryption start offset */
 138#endif
 139        u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
 140        u32 crypto_ctx;         /* NPE Crypto Param structure address */
 141
 142        /* Used by Host: 4*4 bytes*/
 143        unsigned ctl_flags;
 144        union {
 145                struct ablkcipher_request *ablk_req;
 146                struct aead_request *aead_req;
 147                struct crypto_tfm *tfm;
 148        } data;
 149        struct buffer_desc *regist_buf;
 150        u8 *regist_ptr;
 151};
 152
 153struct ablk_ctx {
 154        struct buffer_desc *src;
 155        struct buffer_desc *dst;
 156};
 157
 158struct aead_ctx {
 159        struct buffer_desc *buffer;
 160        struct scatterlist ivlist;
 161        /* used when the hmac is not on one sg entry */
 162        u8 *hmac_virt;
 163        int encrypt;
 164};
 165
 166struct ix_hash_algo {
 167        u32 cfgword;
 168        unsigned char *icv;
 169};
 170
 171struct ix_sa_dir {
 172        unsigned char *npe_ctx;
 173        dma_addr_t npe_ctx_phys;
 174        int npe_ctx_idx;
 175        u8 npe_mode;
 176};
 177
 178struct ixp_ctx {
 179        struct ix_sa_dir encrypt;
 180        struct ix_sa_dir decrypt;
 181        int authkey_len;
 182        u8 authkey[MAX_KEYLEN];
 183        int enckey_len;
 184        u8 enckey[MAX_KEYLEN];
 185        u8 salt[MAX_IVLEN];
 186        u8 nonce[CTR_RFC3686_NONCE_SIZE];
 187        unsigned salted;
 188        atomic_t configuring;
 189        struct completion completion;
 190};
 191
 192struct ixp_alg {
 193        struct crypto_alg crypto;
 194        const struct ix_hash_algo *hash;
 195        u32 cfg_enc;
 196        u32 cfg_dec;
 197
 198        int registered;
 199};
 200
 201static const struct ix_hash_algo hash_alg_md5 = {
 202        .cfgword        = 0xAA010004,
 203        .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 204                          "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 205};
 206static const struct ix_hash_algo hash_alg_sha1 = {
 207        .cfgword        = 0x00000005,
 208        .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 209                          "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 210};
 211
 212static struct npe *npe_c;
 213static struct dma_pool *buffer_pool = NULL;
 214static struct dma_pool *ctx_pool = NULL;
 215
 216static struct crypt_ctl *crypt_virt = NULL;
 217static dma_addr_t crypt_phys;
 218
 219static int support_aes = 1;
 220
 221static void dev_release(struct device *dev)
 222{
 223        return;
 224}
 225
 226#define DRIVER_NAME "ixp4xx_crypto"
 227static struct platform_device pseudo_dev = {
 228        .name = DRIVER_NAME,
 229        .id   = 0,
 230        .num_resources = 0,
 231        .dev  = {
 232                .coherent_dma_mask = DMA_BIT_MASK(32),
 233                .release = dev_release,
 234        }
 235};
 236
 237static struct device *dev = &pseudo_dev.dev;
 238
 239static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 240{
 241        return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 242}
 243
 244static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 245{
 246        return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 247}
 248
 249static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 250{
 251        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
 252}
 253
 254static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 255{
 256        return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
 257}
 258
 259static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 260{
 261        return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
 262}
 263
 264static int setup_crypt_desc(void)
 265{
 266        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 267        crypt_virt = dma_alloc_coherent(dev,
 268                        NPE_QLEN * sizeof(struct crypt_ctl),
 269                        &crypt_phys, GFP_ATOMIC);
 270        if (!crypt_virt)
 271                return -ENOMEM;
 272        memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
 273        return 0;
 274}
 275
 276static spinlock_t desc_lock;
 277static struct crypt_ctl *get_crypt_desc(void)
 278{
 279        int i;
 280        static int idx = 0;
 281        unsigned long flags;
 282
 283        spin_lock_irqsave(&desc_lock, flags);
 284
 285        if (unlikely(!crypt_virt))
 286                setup_crypt_desc();
 287        if (unlikely(!crypt_virt)) {
 288                spin_unlock_irqrestore(&desc_lock, flags);
 289                return NULL;
 290        }
 291        i = idx;
 292        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 293                if (++idx >= NPE_QLEN)
 294                        idx = 0;
 295                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 296                spin_unlock_irqrestore(&desc_lock, flags);
 297                return crypt_virt +i;
 298        } else {
 299                spin_unlock_irqrestore(&desc_lock, flags);
 300                return NULL;
 301        }
 302}
 303
 304static spinlock_t emerg_lock;
 305static struct crypt_ctl *get_crypt_desc_emerg(void)
 306{
 307        int i;
 308        static int idx = NPE_QLEN;
 309        struct crypt_ctl *desc;
 310        unsigned long flags;
 311
 312        desc = get_crypt_desc();
 313        if (desc)
 314                return desc;
 315        if (unlikely(!crypt_virt))
 316                return NULL;
 317
 318        spin_lock_irqsave(&emerg_lock, flags);
 319        i = idx;
 320        if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 321                if (++idx >= NPE_QLEN_TOTAL)
 322                        idx = NPE_QLEN;
 323                crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 324                spin_unlock_irqrestore(&emerg_lock, flags);
 325                return crypt_virt +i;
 326        } else {
 327                spin_unlock_irqrestore(&emerg_lock, flags);
 328                return NULL;
 329        }
 330}
 331
 332static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
 333{
 334        while (buf) {
 335                struct buffer_desc *buf1;
 336                u32 phys1;
 337
 338                buf1 = buf->next;
 339                phys1 = buf->phys_next;
 340                dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
 341                dma_pool_free(buffer_pool, buf, phys);
 342                buf = buf1;
 343                phys = phys1;
 344        }
 345}
 346
 347static struct tasklet_struct crypto_done_tasklet;
 348
 349static void finish_scattered_hmac(struct crypt_ctl *crypt)
 350{
 351        struct aead_request *req = crypt->data.aead_req;
 352        struct aead_ctx *req_ctx = aead_request_ctx(req);
 353        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 354        int authsize = crypto_aead_authsize(tfm);
 355        int decryptlen = req->cryptlen - authsize;
 356
 357        if (req_ctx->encrypt) {
 358                scatterwalk_map_and_copy(req_ctx->hmac_virt,
 359                        req->src, decryptlen, authsize, 1);
 360        }
 361        dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 362}
 363
 364static void one_packet(dma_addr_t phys)
 365{
 366        struct crypt_ctl *crypt;
 367        struct ixp_ctx *ctx;
 368        int failed;
 369
 370        failed = phys & 0x1 ? -EBADMSG : 0;
 371        phys &= ~0x3;
 372        crypt = crypt_phys2virt(phys);
 373
 374        switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 375        case CTL_FLAG_PERFORM_AEAD: {
 376                struct aead_request *req = crypt->data.aead_req;
 377                struct aead_ctx *req_ctx = aead_request_ctx(req);
 378
 379                free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
 380                if (req_ctx->hmac_virt) {
 381                        finish_scattered_hmac(crypt);
 382                }
 383                req->base.complete(&req->base, failed);
 384                break;
 385        }
 386        case CTL_FLAG_PERFORM_ABLK: {
 387                struct ablkcipher_request *req = crypt->data.ablk_req;
 388                struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 389
 390                if (req_ctx->dst) {
 391                        free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 392                }
 393                free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 394                req->base.complete(&req->base, failed);
 395                break;
 396        }
 397        case CTL_FLAG_GEN_ICV:
 398                ctx = crypto_tfm_ctx(crypt->data.tfm);
 399                dma_pool_free(ctx_pool, crypt->regist_ptr,
 400                                crypt->regist_buf->phys_addr);
 401                dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 402                if (atomic_dec_and_test(&ctx->configuring))
 403                        complete(&ctx->completion);
 404                break;
 405        case CTL_FLAG_GEN_REVAES:
 406                ctx = crypto_tfm_ctx(crypt->data.tfm);
 407                *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 408                if (atomic_dec_and_test(&ctx->configuring))
 409                        complete(&ctx->completion);
 410                break;
 411        default:
 412                BUG();
 413        }
 414        crypt->ctl_flags = CTL_FLAG_UNUSED;
 415}
 416
 417static void irqhandler(void *_unused)
 418{
 419        tasklet_schedule(&crypto_done_tasklet);
 420}
 421
 422static void crypto_done_action(unsigned long arg)
 423{
 424        int i;
 425
 426        for(i=0; i<4; i++) {
 427                dma_addr_t phys = qmgr_get_entry(RECV_QID);
 428                if (!phys)
 429                        return;
 430                one_packet(phys);
 431        }
 432        tasklet_schedule(&crypto_done_tasklet);
 433}
 434
 435static int init_ixp_crypto(void)
 436{
 437        int ret = -ENODEV;
 438        u32 msg[2] = { 0, 0 };
 439
 440        if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 441                                IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 442                printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 443                return ret;
 444        }
 445        npe_c = npe_request(NPE_ID);
 446        if (!npe_c)
 447                return ret;
 448
 449        if (!npe_running(npe_c)) {
 450                ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 451                if (ret) {
 452                        return ret;
 453                }
 454                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 455                        goto npe_error;
 456        } else {
 457                if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 458                        goto npe_error;
 459
 460                if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 461                        goto npe_error;
 462        }
 463
 464        switch ((msg[1]>>16) & 0xff) {
 465        case 3:
 466                printk(KERN_WARNING "Firmware of %s lacks AES support\n",
 467                                npe_name(npe_c));
 468                support_aes = 0;
 469                break;
 470        case 4:
 471        case 5:
 472                support_aes = 1;
 473                break;
 474        default:
 475                printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 476                        npe_name(npe_c));
 477                return -ENODEV;
 478        }
 479        /* buffer_pool will also be used to sometimes store the hmac,
 480         * so assure it is large enough
 481         */
 482        BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 483        buffer_pool = dma_pool_create("buffer", dev,
 484                        sizeof(struct buffer_desc), 32, 0);
 485        ret = -ENOMEM;
 486        if (!buffer_pool) {
 487                goto err;
 488        }
 489        ctx_pool = dma_pool_create("context", dev,
 490                        NPE_CTX_LEN, 16, 0);
 491        if (!ctx_pool) {
 492                goto err;
 493        }
 494        ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 495                                 "ixp_crypto:out", NULL);
 496        if (ret)
 497                goto err;
 498        ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
 499                                 "ixp_crypto:in", NULL);
 500        if (ret) {
 501                qmgr_release_queue(SEND_QID);
 502                goto err;
 503        }
 504        qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 505        tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 506
 507        qmgr_enable_irq(RECV_QID);
 508        return 0;
 509
 510npe_error:
 511        printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 512        ret = -EIO;
 513err:
 514        if (ctx_pool)
 515                dma_pool_destroy(ctx_pool);
 516        if (buffer_pool)
 517                dma_pool_destroy(buffer_pool);
 518        npe_release(npe_c);
 519        return ret;
 520}
 521
 522static void release_ixp_crypto(void)
 523{
 524        qmgr_disable_irq(RECV_QID);
 525        tasklet_kill(&crypto_done_tasklet);
 526
 527        qmgr_release_queue(SEND_QID);
 528        qmgr_release_queue(RECV_QID);
 529
 530        dma_pool_destroy(ctx_pool);
 531        dma_pool_destroy(buffer_pool);
 532
 533        npe_release(npe_c);
 534
 535        if (crypt_virt) {
 536                dma_free_coherent(dev,
 537                        NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 538                        crypt_virt, crypt_phys);
 539        }
 540        return;
 541}
 542
 543static void reset_sa_dir(struct ix_sa_dir *dir)
 544{
 545        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 546        dir->npe_ctx_idx = 0;
 547        dir->npe_mode = 0;
 548}
 549
 550static int init_sa_dir(struct ix_sa_dir *dir)
 551{
 552        dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 553        if (!dir->npe_ctx) {
 554                return -ENOMEM;
 555        }
 556        reset_sa_dir(dir);
 557        return 0;
 558}
 559
 560static void free_sa_dir(struct ix_sa_dir *dir)
 561{
 562        memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 563        dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 564}
 565
 566static int init_tfm(struct crypto_tfm *tfm)
 567{
 568        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 569        int ret;
 570
 571        atomic_set(&ctx->configuring, 0);
 572        ret = init_sa_dir(&ctx->encrypt);
 573        if (ret)
 574                return ret;
 575        ret = init_sa_dir(&ctx->decrypt);
 576        if (ret) {
 577                free_sa_dir(&ctx->encrypt);
 578        }
 579        return ret;
 580}
 581
 582static int init_tfm_ablk(struct crypto_tfm *tfm)
 583{
 584        tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
 585        return init_tfm(tfm);
 586}
 587
 588static int init_tfm_aead(struct crypto_tfm *tfm)
 589{
 590        tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
 591        return init_tfm(tfm);
 592}
 593
 594static void exit_tfm(struct crypto_tfm *tfm)
 595{
 596        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 597        free_sa_dir(&ctx->encrypt);
 598        free_sa_dir(&ctx->decrypt);
 599}
 600
 601static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 602                int init_len, u32 ctx_addr, const u8 *key, int key_len)
 603{
 604        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 605        struct crypt_ctl *crypt;
 606        struct buffer_desc *buf;
 607        int i;
 608        u8 *pad;
 609        u32 pad_phys, buf_phys;
 610
 611        BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 612        pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 613        if (!pad)
 614                return -ENOMEM;
 615        buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 616        if (!buf) {
 617                dma_pool_free(ctx_pool, pad, pad_phys);
 618                return -ENOMEM;
 619        }
 620        crypt = get_crypt_desc_emerg();
 621        if (!crypt) {
 622                dma_pool_free(ctx_pool, pad, pad_phys);
 623                dma_pool_free(buffer_pool, buf, buf_phys);
 624                return -EAGAIN;
 625        }
 626
 627        memcpy(pad, key, key_len);
 628        memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 629        for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
 630                pad[i] ^= xpad;
 631        }
 632
 633        crypt->data.tfm = tfm;
 634        crypt->regist_ptr = pad;
 635        crypt->regist_buf = buf;
 636
 637        crypt->auth_offs = 0;
 638        crypt->auth_len = HMAC_PAD_BLOCKLEN;
 639        crypt->crypto_ctx = ctx_addr;
 640        crypt->src_buf = buf_phys;
 641        crypt->icv_rev_aes = target;
 642        crypt->mode = NPE_OP_HASH_GEN_ICV;
 643        crypt->init_len = init_len;
 644        crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 645
 646        buf->next = 0;
 647        buf->buf_len = HMAC_PAD_BLOCKLEN;
 648        buf->pkt_len = 0;
 649        buf->phys_addr = pad_phys;
 650
 651        atomic_inc(&ctx->configuring);
 652        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 653        BUG_ON(qmgr_stat_overflow(SEND_QID));
 654        return 0;
 655}
 656
 657static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
 658                const u8 *key, int key_len, unsigned digest_len)
 659{
 660        u32 itarget, otarget, npe_ctx_addr;
 661        unsigned char *cinfo;
 662        int init_len, ret = 0;
 663        u32 cfgword;
 664        struct ix_sa_dir *dir;
 665        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 666        const struct ix_hash_algo *algo;
 667
 668        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 669        cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 670        algo = ix_hash(tfm);
 671
 672        /* write cfg word to cryptinfo */
 673        cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
 674#ifndef __ARMEB__
 675        cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 676#endif
 677        *(u32*)cinfo = cpu_to_be32(cfgword);
 678        cinfo += sizeof(cfgword);
 679
 680        /* write ICV to cryptinfo */
 681        memcpy(cinfo, algo->icv, digest_len);
 682        cinfo += digest_len;
 683
 684        itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 685                                + sizeof(algo->cfgword);
 686        otarget = itarget + digest_len;
 687        init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 688        npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 689
 690        dir->npe_ctx_idx += init_len;
 691        dir->npe_mode |= NPE_OP_HASH_ENABLE;
 692
 693        if (!encrypt)
 694                dir->npe_mode |= NPE_OP_HASH_VERIFY;
 695
 696        ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 697                        init_len, npe_ctx_addr, key, key_len);
 698        if (ret)
 699                return ret;
 700        return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 701                        init_len, npe_ctx_addr, key, key_len);
 702}
 703
 704static int gen_rev_aes_key(struct crypto_tfm *tfm)
 705{
 706        struct crypt_ctl *crypt;
 707        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 708        struct ix_sa_dir *dir = &ctx->decrypt;
 709
 710        crypt = get_crypt_desc_emerg();
 711        if (!crypt) {
 712                return -EAGAIN;
 713        }
 714        *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 715
 716        crypt->data.tfm = tfm;
 717        crypt->crypt_offs = 0;
 718        crypt->crypt_len = AES_BLOCK128;
 719        crypt->src_buf = 0;
 720        crypt->crypto_ctx = dir->npe_ctx_phys;
 721        crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 722        crypt->mode = NPE_OP_ENC_GEN_KEY;
 723        crypt->init_len = dir->npe_ctx_idx;
 724        crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 725
 726        atomic_inc(&ctx->configuring);
 727        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 728        BUG_ON(qmgr_stat_overflow(SEND_QID));
 729        return 0;
 730}
 731
 732static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 733                const u8 *key, int key_len)
 734{
 735        u8 *cinfo;
 736        u32 cipher_cfg;
 737        u32 keylen_cfg = 0;
 738        struct ix_sa_dir *dir;
 739        struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 740        u32 *flags = &tfm->crt_flags;
 741
 742        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 743        cinfo = dir->npe_ctx;
 744
 745        if (encrypt) {
 746                cipher_cfg = cipher_cfg_enc(tfm);
 747                dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 748        } else {
 749                cipher_cfg = cipher_cfg_dec(tfm);
 750        }
 751        if (cipher_cfg & MOD_AES) {
 752                switch (key_len) {
 753                        case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
 754                        case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
 755                        case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
 756                        default:
 757                                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 758                                return -EINVAL;
 759                }
 760                cipher_cfg |= keylen_cfg;
 761        } else if (cipher_cfg & MOD_3DES) {
 762                const u32 *K = (const u32 *)key;
 763                if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 764                             !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
 765                {
 766                        *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
 767                        return -EINVAL;
 768                }
 769        } else {
 770                u32 tmp[DES_EXPKEY_WORDS];
 771                if (des_ekey(tmp, key) == 0) {
 772                        *flags |= CRYPTO_TFM_RES_WEAK_KEY;
 773                }
 774        }
 775        /* write cfg word to cryptinfo */
 776        *(u32*)cinfo = cpu_to_be32(cipher_cfg);
 777        cinfo += sizeof(cipher_cfg);
 778
 779        /* write cipher key to cryptinfo */
 780        memcpy(cinfo, key, key_len);
 781        /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 782        if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 783                memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
 784                key_len = DES3_EDE_KEY_SIZE;
 785        }
 786        dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 787        dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 788        if ((cipher_cfg & MOD_AES) && !encrypt) {
 789                return gen_rev_aes_key(tfm);
 790        }
 791        return 0;
 792}
 793
 794static struct buffer_desc *chainup_buffers(struct device *dev,
 795                struct scatterlist *sg, unsigned nbytes,
 796                struct buffer_desc *buf, gfp_t flags,
 797                enum dma_data_direction dir)
 798{
 799        for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
 800                unsigned len = min(nbytes, sg->length);
 801                struct buffer_desc *next_buf;
 802                u32 next_buf_phys;
 803                void *ptr;
 804
 805                nbytes -= len;
 806                ptr = page_address(sg_page(sg)) + sg->offset;
 807                next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 808                if (!next_buf) {
 809                        buf = NULL;
 810                        break;
 811                }
 812                sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 813                buf->next = next_buf;
 814                buf->phys_next = next_buf_phys;
 815                buf = next_buf;
 816
 817                buf->phys_addr = sg_dma_address(sg);
 818                buf->buf_len = len;
 819                buf->dir = dir;
 820        }
 821        buf->next = NULL;
 822        buf->phys_next = 0;
 823        return buf;
 824}
 825
 826static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 827                        unsigned int key_len)
 828{
 829        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 830        u32 *flags = &tfm->base.crt_flags;
 831        int ret;
 832
 833        init_completion(&ctx->completion);
 834        atomic_inc(&ctx->configuring);
 835
 836        reset_sa_dir(&ctx->encrypt);
 837        reset_sa_dir(&ctx->decrypt);
 838
 839        ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 840        ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 841
 842        ret = setup_cipher(&tfm->base, 0, key, key_len);
 843        if (ret)
 844                goto out;
 845        ret = setup_cipher(&tfm->base, 1, key, key_len);
 846        if (ret)
 847                goto out;
 848
 849        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
 850                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
 851                        ret = -EINVAL;
 852                } else {
 853                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
 854                }
 855        }
 856out:
 857        if (!atomic_dec_and_test(&ctx->configuring))
 858                wait_for_completion(&ctx->completion);
 859        return ret;
 860}
 861
 862static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 863                unsigned int key_len)
 864{
 865        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 866
 867        /* the nonce is stored in bytes at end of key */
 868        if (key_len < CTR_RFC3686_NONCE_SIZE)
 869                return -EINVAL;
 870
 871        memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 872                        CTR_RFC3686_NONCE_SIZE);
 873
 874        key_len -= CTR_RFC3686_NONCE_SIZE;
 875        return ablk_setkey(tfm, key, key_len);
 876}
 877
 878static int ablk_perform(struct ablkcipher_request *req, int encrypt)
 879{
 880        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 881        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 882        unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
 883        struct ix_sa_dir *dir;
 884        struct crypt_ctl *crypt;
 885        unsigned int nbytes = req->nbytes;
 886        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 887        struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 888        struct buffer_desc src_hook;
 889        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 890                                GFP_KERNEL : GFP_ATOMIC;
 891
 892        if (qmgr_stat_full(SEND_QID))
 893                return -EAGAIN;
 894        if (atomic_read(&ctx->configuring))
 895                return -EAGAIN;
 896
 897        dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 898
 899        crypt = get_crypt_desc();
 900        if (!crypt)
 901                return -ENOMEM;
 902
 903        crypt->data.ablk_req = req;
 904        crypt->crypto_ctx = dir->npe_ctx_phys;
 905        crypt->mode = dir->npe_mode;
 906        crypt->init_len = dir->npe_ctx_idx;
 907
 908        crypt->crypt_offs = 0;
 909        crypt->crypt_len = nbytes;
 910
 911        BUG_ON(ivsize && !req->info);
 912        memcpy(crypt->iv, req->info, ivsize);
 913        if (req->src != req->dst) {
 914                struct buffer_desc dst_hook;
 915                crypt->mode |= NPE_OP_NOT_IN_PLACE;
 916                /* This was never tested by Intel
 917                 * for more than one dst buffer, I think. */
 918                BUG_ON(req->dst->length < nbytes);
 919                req_ctx->dst = NULL;
 920                if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 921                                        flags, DMA_FROM_DEVICE))
 922                        goto free_buf_dest;
 923                src_direction = DMA_TO_DEVICE;
 924                req_ctx->dst = dst_hook.next;
 925                crypt->dst_buf = dst_hook.phys_next;
 926        } else {
 927                req_ctx->dst = NULL;
 928        }
 929        req_ctx->src = NULL;
 930        if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 931                                flags, src_direction))
 932                goto free_buf_src;
 933
 934        req_ctx->src = src_hook.next;
 935        crypt->src_buf = src_hook.phys_next;
 936        crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
 937        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 938        BUG_ON(qmgr_stat_overflow(SEND_QID));
 939        return -EINPROGRESS;
 940
 941free_buf_src:
 942        free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 943free_buf_dest:
 944        if (req->src != req->dst) {
 945                free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 946        }
 947        crypt->ctl_flags = CTL_FLAG_UNUSED;
 948        return -ENOMEM;
 949}
 950
 951static int ablk_encrypt(struct ablkcipher_request *req)
 952{
 953        return ablk_perform(req, 1);
 954}
 955
 956static int ablk_decrypt(struct ablkcipher_request *req)
 957{
 958        return ablk_perform(req, 0);
 959}
 960
 961static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
 962{
 963        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 964        struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 965        u8 iv[CTR_RFC3686_BLOCK_SIZE];
 966        u8 *info = req->info;
 967        int ret;
 968
 969        /* set up counter block */
 970        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 971        memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 972
 973        /* initialize counter portion of counter block */
 974        *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
 975                cpu_to_be32(1);
 976
 977        req->info = iv;
 978        ret = ablk_perform(req, 1);
 979        req->info = info;
 980        return ret;
 981}
 982
 983static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
 984                unsigned int nbytes)
 985{
 986        int offset = 0;
 987
 988        if (!nbytes)
 989                return 0;
 990
 991        for (;;) {
 992                if (start < offset + sg->length)
 993                        break;
 994
 995                offset += sg->length;
 996                sg = scatterwalk_sg_next(sg);
 997        }
 998        return (start + nbytes > offset + sg->length);
 999}
1000
1001static int aead_perform(struct aead_request *req, int encrypt,
1002                int cryptoffset, int eff_cryptlen, u8 *iv)
1003{
1004        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1005        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1006        unsigned ivsize = crypto_aead_ivsize(tfm);
1007        unsigned authsize = crypto_aead_authsize(tfm);
1008        struct ix_sa_dir *dir;
1009        struct crypt_ctl *crypt;
1010        unsigned int cryptlen;
1011        struct buffer_desc *buf, src_hook;
1012        struct aead_ctx *req_ctx = aead_request_ctx(req);
1013        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1014                                GFP_KERNEL : GFP_ATOMIC;
1015
1016        if (qmgr_stat_full(SEND_QID))
1017                return -EAGAIN;
1018        if (atomic_read(&ctx->configuring))
1019                return -EAGAIN;
1020
1021        if (encrypt) {
1022                dir = &ctx->encrypt;
1023                cryptlen = req->cryptlen;
1024        } else {
1025                dir = &ctx->decrypt;
1026                /* req->cryptlen includes the authsize when decrypting */
1027                cryptlen = req->cryptlen -authsize;
1028                eff_cryptlen -= authsize;
1029        }
1030        crypt = get_crypt_desc();
1031        if (!crypt)
1032                return -ENOMEM;
1033
1034        crypt->data.aead_req = req;
1035        crypt->crypto_ctx = dir->npe_ctx_phys;
1036        crypt->mode = dir->npe_mode;
1037        crypt->init_len = dir->npe_ctx_idx;
1038
1039        crypt->crypt_offs = cryptoffset;
1040        crypt->crypt_len = eff_cryptlen;
1041
1042        crypt->auth_offs = 0;
1043        crypt->auth_len = req->assoclen + ivsize + cryptlen;
1044        BUG_ON(ivsize && !req->iv);
1045        memcpy(crypt->iv, req->iv, ivsize);
1046
1047        if (req->src != req->dst) {
1048                BUG(); /* -ENOTSUP because of my laziness */
1049        }
1050
1051        /* ASSOC data */
1052        buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1053                flags, DMA_TO_DEVICE);
1054        req_ctx->buffer = src_hook.next;
1055        crypt->src_buf = src_hook.phys_next;
1056        if (!buf)
1057                goto out;
1058        /* IV */
1059        sg_init_table(&req_ctx->ivlist, 1);
1060        sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1061        buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1062                        DMA_BIDIRECTIONAL);
1063        if (!buf)
1064                goto free_chain;
1065        if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1066                /* The 12 hmac bytes are scattered,
1067                 * we need to copy them into a safe buffer */
1068                req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1069                                &crypt->icv_rev_aes);
1070                if (unlikely(!req_ctx->hmac_virt))
1071                        goto free_chain;
1072                if (!encrypt) {
1073                        scatterwalk_map_and_copy(req_ctx->hmac_virt,
1074                                req->src, cryptlen, authsize, 0);
1075                }
1076                req_ctx->encrypt = encrypt;
1077        } else {
1078                req_ctx->hmac_virt = NULL;
1079        }
1080        /* Crypt */
1081        buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1082                        DMA_BIDIRECTIONAL);
1083        if (!buf)
1084                goto free_hmac_virt;
1085        if (!req_ctx->hmac_virt) {
1086                crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1087        }
1088
1089        crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1090        qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1091        BUG_ON(qmgr_stat_overflow(SEND_QID));
1092        return -EINPROGRESS;
1093free_hmac_virt:
1094        if (req_ctx->hmac_virt) {
1095                dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1096                                crypt->icv_rev_aes);
1097        }
1098free_chain:
1099        free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1100out:
1101        crypt->ctl_flags = CTL_FLAG_UNUSED;
1102        return -ENOMEM;
1103}
1104
1105static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1106{
1107        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108        u32 *flags = &tfm->base.crt_flags;
1109        unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1110        int ret;
1111
1112        if (!ctx->enckey_len && !ctx->authkey_len)
1113                return 0;
1114        init_completion(&ctx->completion);
1115        atomic_inc(&ctx->configuring);
1116
1117        reset_sa_dir(&ctx->encrypt);
1118        reset_sa_dir(&ctx->decrypt);
1119
1120        ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1121        if (ret)
1122                goto out;
1123        ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1124        if (ret)
1125                goto out;
1126        ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1127                        ctx->authkey_len, digest_len);
1128        if (ret)
1129                goto out;
1130        ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1131                        ctx->authkey_len, digest_len);
1132        if (ret)
1133                goto out;
1134
1135        if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1136                if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1137                        ret = -EINVAL;
1138                        goto out;
1139                } else {
1140                        *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1141                }
1142        }
1143out:
1144        if (!atomic_dec_and_test(&ctx->configuring))
1145                wait_for_completion(&ctx->completion);
1146        return ret;
1147}
1148
1149static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1150{
1151        int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1152
1153        if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1154                return -EINVAL;
1155        return aead_setup(tfm, authsize);
1156}
1157
1158static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159                        unsigned int keylen)
1160{
1161        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1162        struct rtattr *rta = (struct rtattr *)key;
1163        struct crypto_authenc_key_param *param;
1164
1165        if (!RTA_OK(rta, keylen))
1166                goto badkey;
1167        if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1168                goto badkey;
1169        if (RTA_PAYLOAD(rta) < sizeof(*param))
1170                goto badkey;
1171
1172        param = RTA_DATA(rta);
1173        ctx->enckey_len = be32_to_cpu(param->enckeylen);
1174
1175        key += RTA_ALIGN(rta->rta_len);
1176        keylen -= RTA_ALIGN(rta->rta_len);
1177
1178        if (keylen < ctx->enckey_len)
1179                goto badkey;
1180
1181        ctx->authkey_len = keylen - ctx->enckey_len;
1182        memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1183        memcpy(ctx->authkey, key, ctx->authkey_len);
1184
1185        return aead_setup(tfm, crypto_aead_authsize(tfm));
1186badkey:
1187        ctx->enckey_len = 0;
1188        crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1189        return -EINVAL;
1190}
1191
1192static int aead_encrypt(struct aead_request *req)
1193{
1194        unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1195        return aead_perform(req, 1, req->assoclen + ivsize,
1196                        req->cryptlen, req->iv);
1197}
1198
1199static int aead_decrypt(struct aead_request *req)
1200{
1201        unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1202        return aead_perform(req, 0, req->assoclen + ivsize,
1203                        req->cryptlen, req->iv);
1204}
1205
1206static int aead_givencrypt(struct aead_givcrypt_request *req)
1207{
1208        struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1209        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1210        unsigned len, ivsize = crypto_aead_ivsize(tfm);
1211        __be64 seq;
1212
1213        /* copied from eseqiv.c */
1214        if (!ctx->salted) {
1215                get_random_bytes(ctx->salt, ivsize);
1216                ctx->salted = 1;
1217        }
1218        memcpy(req->areq.iv, ctx->salt, ivsize);
1219        len = ivsize;
1220        if (ivsize > sizeof(u64)) {
1221                memset(req->giv, 0, ivsize - sizeof(u64));
1222                len = sizeof(u64);
1223        }
1224        seq = cpu_to_be64(req->seq);
1225        memcpy(req->giv + ivsize - len, &seq, len);
1226        return aead_perform(&req->areq, 1, req->areq.assoclen,
1227                        req->areq.cryptlen +ivsize, req->giv);
1228}
1229
1230static struct ixp_alg ixp4xx_algos[] = {
1231{
1232        .crypto = {
1233                .cra_name       = "cbc(des)",
1234                .cra_blocksize  = DES_BLOCK_SIZE,
1235                .cra_u          = { .ablkcipher = {
1236                        .min_keysize    = DES_KEY_SIZE,
1237                        .max_keysize    = DES_KEY_SIZE,
1238                        .ivsize         = DES_BLOCK_SIZE,
1239                        .geniv          = "eseqiv",
1240                        }
1241                }
1242        },
1243        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1245
1246}, {
1247        .crypto = {
1248                .cra_name       = "ecb(des)",
1249                .cra_blocksize  = DES_BLOCK_SIZE,
1250                .cra_u          = { .ablkcipher = {
1251                        .min_keysize    = DES_KEY_SIZE,
1252                        .max_keysize    = DES_KEY_SIZE,
1253                        }
1254                }
1255        },
1256        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257        .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258}, {
1259        .crypto = {
1260                .cra_name       = "cbc(des3_ede)",
1261                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1262                .cra_u          = { .ablkcipher = {
1263                        .min_keysize    = DES3_EDE_KEY_SIZE,
1264                        .max_keysize    = DES3_EDE_KEY_SIZE,
1265                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1266                        .geniv          = "eseqiv",
1267                        }
1268                }
1269        },
1270        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1271        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1272}, {
1273        .crypto = {
1274                .cra_name       = "ecb(des3_ede)",
1275                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1276                .cra_u          = { .ablkcipher = {
1277                        .min_keysize    = DES3_EDE_KEY_SIZE,
1278                        .max_keysize    = DES3_EDE_KEY_SIZE,
1279                        }
1280                }
1281        },
1282        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1284}, {
1285        .crypto = {
1286                .cra_name       = "cbc(aes)",
1287                .cra_blocksize  = AES_BLOCK_SIZE,
1288                .cra_u          = { .ablkcipher = {
1289                        .min_keysize    = AES_MIN_KEY_SIZE,
1290                        .max_keysize    = AES_MAX_KEY_SIZE,
1291                        .ivsize         = AES_BLOCK_SIZE,
1292                        .geniv          = "eseqiv",
1293                        }
1294                }
1295        },
1296        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1297        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1298}, {
1299        .crypto = {
1300                .cra_name       = "ecb(aes)",
1301                .cra_blocksize  = AES_BLOCK_SIZE,
1302                .cra_u          = { .ablkcipher = {
1303                        .min_keysize    = AES_MIN_KEY_SIZE,
1304                        .max_keysize    = AES_MAX_KEY_SIZE,
1305                        }
1306                }
1307        },
1308        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1309        .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1310}, {
1311        .crypto = {
1312                .cra_name       = "ctr(aes)",
1313                .cra_blocksize  = AES_BLOCK_SIZE,
1314                .cra_u          = { .ablkcipher = {
1315                        .min_keysize    = AES_MIN_KEY_SIZE,
1316                        .max_keysize    = AES_MAX_KEY_SIZE,
1317                        .ivsize         = AES_BLOCK_SIZE,
1318                        .geniv          = "eseqiv",
1319                        }
1320                }
1321        },
1322        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1323        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1324}, {
1325        .crypto = {
1326                .cra_name       = "rfc3686(ctr(aes))",
1327                .cra_blocksize  = AES_BLOCK_SIZE,
1328                .cra_u          = { .ablkcipher = {
1329                        .min_keysize    = AES_MIN_KEY_SIZE,
1330                        .max_keysize    = AES_MAX_KEY_SIZE,
1331                        .ivsize         = AES_BLOCK_SIZE,
1332                        .geniv          = "eseqiv",
1333                        .setkey         = ablk_rfc3686_setkey,
1334                        .encrypt        = ablk_rfc3686_crypt,
1335                        .decrypt        = ablk_rfc3686_crypt }
1336                }
1337        },
1338        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1339        .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1340}, {
1341        .crypto = {
1342                .cra_name       = "authenc(hmac(md5),cbc(des))",
1343                .cra_blocksize  = DES_BLOCK_SIZE,
1344                .cra_u          = { .aead = {
1345                        .ivsize         = DES_BLOCK_SIZE,
1346                        .maxauthsize    = MD5_DIGEST_SIZE,
1347                        }
1348                }
1349        },
1350        .hash = &hash_alg_md5,
1351        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1352        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1353}, {
1354        .crypto = {
1355                .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
1356                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1357                .cra_u          = { .aead = {
1358                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1359                        .maxauthsize    = MD5_DIGEST_SIZE,
1360                        }
1361                }
1362        },
1363        .hash = &hash_alg_md5,
1364        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1365        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1366}, {
1367        .crypto = {
1368                .cra_name       = "authenc(hmac(sha1),cbc(des))",
1369                .cra_blocksize  = DES_BLOCK_SIZE,
1370                .cra_u          = { .aead = {
1371                        .ivsize         = DES_BLOCK_SIZE,
1372                        .maxauthsize    = SHA1_DIGEST_SIZE,
1373                        }
1374                }
1375        },
1376        .hash = &hash_alg_sha1,
1377        .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1378        .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1379}, {
1380        .crypto = {
1381                .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
1382                .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
1383                .cra_u          = { .aead = {
1384                        .ivsize         = DES3_EDE_BLOCK_SIZE,
1385                        .maxauthsize    = SHA1_DIGEST_SIZE,
1386                        }
1387                }
1388        },
1389        .hash = &hash_alg_sha1,
1390        .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1391        .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1392}, {
1393        .crypto = {
1394                .cra_name       = "authenc(hmac(md5),cbc(aes))",
1395                .cra_blocksize  = AES_BLOCK_SIZE,
1396                .cra_u          = { .aead = {
1397                        .ivsize         = AES_BLOCK_SIZE,
1398                        .maxauthsize    = MD5_DIGEST_SIZE,
1399                        }
1400                }
1401        },
1402        .hash = &hash_alg_md5,
1403        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1404        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1405}, {
1406        .crypto = {
1407                .cra_name       = "authenc(hmac(sha1),cbc(aes))",
1408                .cra_blocksize  = AES_BLOCK_SIZE,
1409                .cra_u          = { .aead = {
1410                        .ivsize         = AES_BLOCK_SIZE,
1411                        .maxauthsize    = SHA1_DIGEST_SIZE,
1412                        }
1413                }
1414        },
1415        .hash = &hash_alg_sha1,
1416        .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1417        .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1418} };
1419
1420#define IXP_POSTFIX "-ixp4xx"
1421static int __init ixp_module_init(void)
1422{
1423        int num = ARRAY_SIZE(ixp4xx_algos);
1424        int i,err ;
1425
1426        if (platform_device_register(&pseudo_dev))
1427                return -ENODEV;
1428
1429        spin_lock_init(&desc_lock);
1430        spin_lock_init(&emerg_lock);
1431
1432        err = init_ixp_crypto();
1433        if (err) {
1434                platform_device_unregister(&pseudo_dev);
1435                return err;
1436        }
1437        for (i=0; i< num; i++) {
1438                struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1439
1440                if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1441                        "%s"IXP_POSTFIX, cra->cra_name) >=
1442                        CRYPTO_MAX_ALG_NAME)
1443                {
1444                        continue;
1445                }
1446                if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1447                        continue;
1448                }
1449                if (!ixp4xx_algos[i].hash) {
1450                        /* block ciphers */
1451                        cra->cra_type = &crypto_ablkcipher_type;
1452                        cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1453                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
1454                                         CRYPTO_ALG_ASYNC;
1455                        if (!cra->cra_ablkcipher.setkey)
1456                                cra->cra_ablkcipher.setkey = ablk_setkey;
1457                        if (!cra->cra_ablkcipher.encrypt)
1458                                cra->cra_ablkcipher.encrypt = ablk_encrypt;
1459                        if (!cra->cra_ablkcipher.decrypt)
1460                                cra->cra_ablkcipher.decrypt = ablk_decrypt;
1461                        cra->cra_init = init_tfm_ablk;
1462                } else {
1463                        /* authenc */
1464                        cra->cra_type = &crypto_aead_type;
1465                        cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1466                                         CRYPTO_ALG_KERN_DRIVER_ONLY |
1467                                         CRYPTO_ALG_ASYNC;
1468                        cra->cra_aead.setkey = aead_setkey;
1469                        cra->cra_aead.setauthsize = aead_setauthsize;
1470                        cra->cra_aead.encrypt = aead_encrypt;
1471                        cra->cra_aead.decrypt = aead_decrypt;
1472                        cra->cra_aead.givencrypt = aead_givencrypt;
1473                        cra->cra_init = init_tfm_aead;
1474                }
1475                cra->cra_ctxsize = sizeof(struct ixp_ctx);
1476                cra->cra_module = THIS_MODULE;
1477                cra->cra_alignmask = 3;
1478                cra->cra_priority = 300;
1479                cra->cra_exit = exit_tfm;
1480                if (crypto_register_alg(cra))
1481                        printk(KERN_ERR "Failed to register '%s'\n",
1482                                cra->cra_name);
1483                else
1484                        ixp4xx_algos[i].registered = 1;
1485        }
1486        return 0;
1487}
1488
1489static void __exit ixp_module_exit(void)
1490{
1491        int num = ARRAY_SIZE(ixp4xx_algos);
1492        int i;
1493
1494        for (i=0; i< num; i++) {
1495                if (ixp4xx_algos[i].registered)
1496                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1497        }
1498        release_ixp_crypto();
1499        platform_device_unregister(&pseudo_dev);
1500}
1501
1502module_init(ixp_module_init);
1503module_exit(ixp_module_exit);
1504
1505MODULE_LICENSE("GPL");
1506MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1507MODULE_DESCRIPTION("IXP4xx hardware crypto");
1508
1509
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.