linux/drivers/crypto/padlock-aes.c
<<
>>
Prefs
   1/* 
   2 * Cryptographic API.
   3 *
   4 * Support for VIA PadLock hardware crypto engine.
   5 *
   6 * Copyright (c) 2004  Michal Ludvig <michal@logix.cz>
   7 *
   8 */
   9
  10#include <crypto/algapi.h>
  11#include <crypto/aes.h>
  12#include <linux/module.h>
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/errno.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <asm/byteorder.h>
  19#include <asm/i387.h>
  20#include "padlock.h"
  21
  22/* Control word. */
  23struct cword {
  24        unsigned int __attribute__ ((__packed__))
  25                rounds:4,
  26                algo:3,
  27                keygen:1,
  28                interm:1,
  29                encdec:1,
  30                ksize:2;
  31} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  32
  33/* Whenever making any changes to the following
  34 * structure *make sure* you keep E, d_data
  35 * and cword aligned on 16 Bytes boundaries and
  36 * the Hardware can access 16 * 16 bytes of E and d_data
  37 * (only the first 15 * 16 bytes matter but the HW reads
  38 * more).
  39 */
  40struct aes_ctx {
  41        u32 E[AES_MAX_KEYLENGTH_U32]
  42                __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  43        u32 d_data[AES_MAX_KEYLENGTH_U32]
  44                __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
  45        struct {
  46                struct cword encrypt;
  47                struct cword decrypt;
  48        } cword;
  49        u32 *D;
  50};
  51
  52/* Tells whether the ACE is capable to generate
  53   the extended key for a given key_len. */
  54static inline int
  55aes_hw_extkey_available(uint8_t key_len)
  56{
  57        /* TODO: We should check the actual CPU model/stepping
  58                 as it's possible that the capability will be
  59                 added in the next CPU revisions. */
  60        if (key_len == 16)
  61                return 1;
  62        return 0;
  63}
  64
  65static inline struct aes_ctx *aes_ctx_common(void *ctx)
  66{
  67        unsigned long addr = (unsigned long)ctx;
  68        unsigned long align = PADLOCK_ALIGNMENT;
  69
  70        if (align <= crypto_tfm_ctx_alignment())
  71                align = 1;
  72        return (struct aes_ctx *)ALIGN(addr, align);
  73}
  74
  75static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
  76{
  77        return aes_ctx_common(crypto_tfm_ctx(tfm));
  78}
  79
  80static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
  81{
  82        return aes_ctx_common(crypto_blkcipher_ctx(tfm));
  83}
  84
  85static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
  86                       unsigned int key_len)
  87{
  88        struct aes_ctx *ctx = aes_ctx(tfm);
  89        const __le32 *key = (const __le32 *)in_key;
  90        u32 *flags = &tfm->crt_flags;
  91        struct crypto_aes_ctx gen_aes;
  92
  93        if (key_len % 8) {
  94                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
  95                return -EINVAL;
  96        }
  97
  98        /*
  99         * If the hardware is capable of generating the extended key
 100         * itself we must supply the plain key for both encryption
 101         * and decryption.
 102         */
 103        ctx->D = ctx->E;
 104
 105        ctx->E[0] = le32_to_cpu(key[0]);
 106        ctx->E[1] = le32_to_cpu(key[1]);
 107        ctx->E[2] = le32_to_cpu(key[2]);
 108        ctx->E[3] = le32_to_cpu(key[3]);
 109
 110        /* Prepare control words. */
 111        memset(&ctx->cword, 0, sizeof(ctx->cword));
 112
 113        ctx->cword.decrypt.encdec = 1;
 114        ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
 115        ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
 116        ctx->cword.encrypt.ksize = (key_len - 16) / 8;
 117        ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
 118
 119        /* Don't generate extended keys if the hardware can do it. */
 120        if (aes_hw_extkey_available(key_len))
 121                return 0;
 122
 123        ctx->D = ctx->d_data;
 124        ctx->cword.encrypt.keygen = 1;
 125        ctx->cword.decrypt.keygen = 1;
 126
 127        if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
 128                *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 129                return -EINVAL;
 130        }
 131
 132        memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
 133        memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
 134        return 0;
 135}
 136
 137/* ====== Encryption/decryption routines ====== */
 138
 139/* These are the real call to PadLock. */
 140static inline void padlock_reset_key(void)
 141{
 142        asm volatile ("pushfl; popfl");
 143}
 144
 145/*
 146 * While the padlock instructions don't use FP/SSE registers, they
 147 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
 148 * should be used only inside the irq_ts_save/restore() context
 149 */
 150
 151static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
 152                                  void *control_word)
 153{
 154        asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
 155                      : "+S"(input), "+D"(output)
 156                      : "d"(control_word), "b"(key), "c"(1));
 157}
 158
 159static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
 160{
 161        u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
 162        u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 163
 164        memcpy(tmp, in, AES_BLOCK_SIZE);
 165        padlock_xcrypt(tmp, out, key, cword);
 166}
 167
 168static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
 169                             struct cword *cword)
 170{
 171        /* padlock_xcrypt requires at least two blocks of data. */
 172        if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
 173                       (PAGE_SIZE - 1)))) {
 174                aes_crypt_copy(in, out, key, cword);
 175                return;
 176        }
 177
 178        padlock_xcrypt(in, out, key, cword);
 179}
 180
 181static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
 182                                      void *control_word, u32 count)
 183{
 184        if (count == 1) {
 185                aes_crypt(input, output, key, control_word);
 186                return;
 187        }
 188
 189        asm volatile ("test $1, %%cl;"
 190                      "je 1f;"
 191                      "lea -1(%%ecx), %%eax;"
 192                      "mov $1, %%ecx;"
 193                      ".byte 0xf3,0x0f,0xa7,0xc8;"      /* rep xcryptecb */
 194                      "mov %%eax, %%ecx;"
 195                      "1:"
 196                      ".byte 0xf3,0x0f,0xa7,0xc8"       /* rep xcryptecb */
 197                      : "+S"(input), "+D"(output)
 198                      : "d"(control_word), "b"(key), "c"(count)
 199                      : "ax");
 200}
 201
 202static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
 203                                     u8 *iv, void *control_word, u32 count)
 204{
 205        /* rep xcryptcbc */
 206        asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
 207                      : "+S" (input), "+D" (output), "+a" (iv)
 208                      : "d" (control_word), "b" (key), "c" (count));
 209        return iv;
 210}
 211
 212static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 213{
 214        struct aes_ctx *ctx = aes_ctx(tfm);
 215        int ts_state;
 216        padlock_reset_key();
 217
 218        ts_state = irq_ts_save();
 219        aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
 220        irq_ts_restore(ts_state);
 221}
 222
 223static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
 224{
 225        struct aes_ctx *ctx = aes_ctx(tfm);
 226        int ts_state;
 227        padlock_reset_key();
 228
 229        ts_state = irq_ts_save();
 230        aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
 231        irq_ts_restore(ts_state);
 232}
 233
 234static struct crypto_alg aes_alg = {
 235        .cra_name               =       "aes",
 236        .cra_driver_name        =       "aes-padlock",
 237        .cra_priority           =       PADLOCK_CRA_PRIORITY,
 238        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
 239        .cra_blocksize          =       AES_BLOCK_SIZE,
 240        .cra_ctxsize            =       sizeof(struct aes_ctx),
 241        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 242        .cra_module             =       THIS_MODULE,
 243        .cra_list               =       LIST_HEAD_INIT(aes_alg.cra_list),
 244        .cra_u                  =       {
 245                .cipher = {
 246                        .cia_min_keysize        =       AES_MIN_KEY_SIZE,
 247                        .cia_max_keysize        =       AES_MAX_KEY_SIZE,
 248                        .cia_setkey             =       aes_set_key,
 249                        .cia_encrypt            =       aes_encrypt,
 250                        .cia_decrypt            =       aes_decrypt,
 251                }
 252        }
 253};
 254
 255static int ecb_aes_encrypt(struct blkcipher_desc *desc,
 256                           struct scatterlist *dst, struct scatterlist *src,
 257                           unsigned int nbytes)
 258{
 259        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 260        struct blkcipher_walk walk;
 261        int err;
 262        int ts_state;
 263
 264        padlock_reset_key();
 265
 266        blkcipher_walk_init(&walk, dst, src, nbytes);
 267        err = blkcipher_walk_virt(desc, &walk);
 268
 269        ts_state = irq_ts_save();
 270        while ((nbytes = walk.nbytes)) {
 271                padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 272                                   ctx->E, &ctx->cword.encrypt,
 273                                   nbytes / AES_BLOCK_SIZE);
 274                nbytes &= AES_BLOCK_SIZE - 1;
 275                err = blkcipher_walk_done(desc, &walk, nbytes);
 276        }
 277        irq_ts_restore(ts_state);
 278
 279        return err;
 280}
 281
 282static int ecb_aes_decrypt(struct blkcipher_desc *desc,
 283                           struct scatterlist *dst, struct scatterlist *src,
 284                           unsigned int nbytes)
 285{
 286        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 287        struct blkcipher_walk walk;
 288        int err;
 289        int ts_state;
 290
 291        padlock_reset_key();
 292
 293        blkcipher_walk_init(&walk, dst, src, nbytes);
 294        err = blkcipher_walk_virt(desc, &walk);
 295
 296        ts_state = irq_ts_save();
 297        while ((nbytes = walk.nbytes)) {
 298                padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
 299                                   ctx->D, &ctx->cword.decrypt,
 300                                   nbytes / AES_BLOCK_SIZE);
 301                nbytes &= AES_BLOCK_SIZE - 1;
 302                err = blkcipher_walk_done(desc, &walk, nbytes);
 303        }
 304        irq_ts_restore(ts_state);
 305        return err;
 306}
 307
 308static struct crypto_alg ecb_aes_alg = {
 309        .cra_name               =       "ecb(aes)",
 310        .cra_driver_name        =       "ecb-aes-padlock",
 311        .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
 312        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
 313        .cra_blocksize          =       AES_BLOCK_SIZE,
 314        .cra_ctxsize            =       sizeof(struct aes_ctx),
 315        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 316        .cra_type               =       &crypto_blkcipher_type,
 317        .cra_module             =       THIS_MODULE,
 318        .cra_list               =       LIST_HEAD_INIT(ecb_aes_alg.cra_list),
 319        .cra_u                  =       {
 320                .blkcipher = {
 321                        .min_keysize            =       AES_MIN_KEY_SIZE,
 322                        .max_keysize            =       AES_MAX_KEY_SIZE,
 323                        .setkey                 =       aes_set_key,
 324                        .encrypt                =       ecb_aes_encrypt,
 325                        .decrypt                =       ecb_aes_decrypt,
 326                }
 327        }
 328};
 329
 330static int cbc_aes_encrypt(struct blkcipher_desc *desc,
 331                           struct scatterlist *dst, struct scatterlist *src,
 332                           unsigned int nbytes)
 333{
 334        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 335        struct blkcipher_walk walk;
 336        int err;
 337        int ts_state;
 338
 339        padlock_reset_key();
 340
 341        blkcipher_walk_init(&walk, dst, src, nbytes);
 342        err = blkcipher_walk_virt(desc, &walk);
 343
 344        ts_state = irq_ts_save();
 345        while ((nbytes = walk.nbytes)) {
 346                u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
 347                                            walk.dst.virt.addr, ctx->E,
 348                                            walk.iv, &ctx->cword.encrypt,
 349                                            nbytes / AES_BLOCK_SIZE);
 350                memcpy(walk.iv, iv, AES_BLOCK_SIZE);
 351                nbytes &= AES_BLOCK_SIZE - 1;
 352                err = blkcipher_walk_done(desc, &walk, nbytes);
 353        }
 354        irq_ts_restore(ts_state);
 355
 356        return err;
 357}
 358
 359static int cbc_aes_decrypt(struct blkcipher_desc *desc,
 360                           struct scatterlist *dst, struct scatterlist *src,
 361                           unsigned int nbytes)
 362{
 363        struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
 364        struct blkcipher_walk walk;
 365        int err;
 366        int ts_state;
 367
 368        padlock_reset_key();
 369
 370        blkcipher_walk_init(&walk, dst, src, nbytes);
 371        err = blkcipher_walk_virt(desc, &walk);
 372
 373        ts_state = irq_ts_save();
 374        while ((nbytes = walk.nbytes)) {
 375                padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
 376                                   ctx->D, walk.iv, &ctx->cword.decrypt,
 377                                   nbytes / AES_BLOCK_SIZE);
 378                nbytes &= AES_BLOCK_SIZE - 1;
 379                err = blkcipher_walk_done(desc, &walk, nbytes);
 380        }
 381
 382        irq_ts_restore(ts_state);
 383        return err;
 384}
 385
 386static struct crypto_alg cbc_aes_alg = {
 387        .cra_name               =       "cbc(aes)",
 388        .cra_driver_name        =       "cbc-aes-padlock",
 389        .cra_priority           =       PADLOCK_COMPOSITE_PRIORITY,
 390        .cra_flags              =       CRYPTO_ALG_TYPE_BLKCIPHER,
 391        .cra_blocksize          =       AES_BLOCK_SIZE,
 392        .cra_ctxsize            =       sizeof(struct aes_ctx),
 393        .cra_alignmask          =       PADLOCK_ALIGNMENT - 1,
 394        .cra_type               =       &crypto_blkcipher_type,
 395        .cra_module             =       THIS_MODULE,
 396        .cra_list               =       LIST_HEAD_INIT(cbc_aes_alg.cra_list),
 397        .cra_u                  =       {
 398                .blkcipher = {
 399                        .min_keysize            =       AES_MIN_KEY_SIZE,
 400                        .max_keysize            =       AES_MAX_KEY_SIZE,
 401                        .ivsize                 =       AES_BLOCK_SIZE,
 402                        .setkey                 =       aes_set_key,
 403                        .encrypt                =       cbc_aes_encrypt,
 404                        .decrypt                =       cbc_aes_decrypt,
 405                }
 406        }
 407};
 408
 409static int __init padlock_init(void)
 410{
 411        int ret;
 412
 413        if (!cpu_has_xcrypt) {
 414                printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
 415                return -ENODEV;
 416        }
 417
 418        if (!cpu_has_xcrypt_enabled) {
 419                printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
 420                return -ENODEV;
 421        }
 422
 423        if ((ret = crypto_register_alg(&aes_alg)))
 424                goto aes_err;
 425
 426        if ((ret = crypto_register_alg(&ecb_aes_alg)))
 427                goto ecb_aes_err;
 428
 429        if ((ret = crypto_register_alg(&cbc_aes_alg)))
 430                goto cbc_aes_err;
 431
 432        printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
 433
 434out:
 435        return ret;
 436
 437cbc_aes_err:
 438        crypto_unregister_alg(&ecb_aes_alg);
 439ecb_aes_err:
 440        crypto_unregister_alg(&aes_alg);
 441aes_err:
 442        printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
 443        goto out;
 444}
 445
 446static void __exit padlock_fini(void)
 447{
 448        crypto_unregister_alg(&cbc_aes_alg);
 449        crypto_unregister_alg(&ecb_aes_alg);
 450        crypto_unregister_alg(&aes_alg);
 451}
 452
 453module_init(padlock_init);
 454module_exit(padlock_fini);
 455
 456MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 457MODULE_LICENSE("GPL");
 458MODULE_AUTHOR("Michal Ludvig");
 459
 460MODULE_ALIAS("aes");
 461