1
2
3
4
5
6
7
8
9
10#include <crypto/algapi.h>
11#include <crypto/aes.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/errno.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/percpu.h>
19#include <linux/smp.h>
20#include <asm/byteorder.h>
21#include <asm/i387.h>
22#include "padlock.h"
23
24
25struct cword {
26 unsigned int __attribute__ ((__packed__))
27 rounds:4,
28 algo:3,
29 keygen:1,
30 interm:1,
31 encdec:1,
32 ksize:2;
33} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
34
35
36
37
38
39
40
41
42struct aes_ctx {
43 u32 E[AES_MAX_KEYLENGTH_U32]
44 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
45 u32 d_data[AES_MAX_KEYLENGTH_U32]
46 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
47 struct {
48 struct cword encrypt;
49 struct cword decrypt;
50 } cword;
51 u32 *D;
52};
53
54static DEFINE_PER_CPU(struct cword *, last_cword);
55
56
57
58static inline int
59aes_hw_extkey_available(uint8_t key_len)
60{
61
62
63
64 if (key_len == 16)
65 return 1;
66 return 0;
67}
68
69static inline struct aes_ctx *aes_ctx_common(void *ctx)
70{
71 unsigned long addr = (unsigned long)ctx;
72 unsigned long align = PADLOCK_ALIGNMENT;
73
74 if (align <= crypto_tfm_ctx_alignment())
75 align = 1;
76 return (struct aes_ctx *)ALIGN(addr, align);
77}
78
79static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
80{
81 return aes_ctx_common(crypto_tfm_ctx(tfm));
82}
83
84static inline struct aes_ctx *blk_aes_ctx(struct crypto_blkcipher *tfm)
85{
86 return aes_ctx_common(crypto_blkcipher_ctx(tfm));
87}
88
89static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
90 unsigned int key_len)
91{
92 struct aes_ctx *ctx = aes_ctx(tfm);
93 const __le32 *key = (const __le32 *)in_key;
94 u32 *flags = &tfm->crt_flags;
95 struct crypto_aes_ctx gen_aes;
96 int cpu;
97
98 if (key_len % 8) {
99 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
100 return -EINVAL;
101 }
102
103
104
105
106
107
108 ctx->D = ctx->E;
109
110 ctx->E[0] = le32_to_cpu(key[0]);
111 ctx->E[1] = le32_to_cpu(key[1]);
112 ctx->E[2] = le32_to_cpu(key[2]);
113 ctx->E[3] = le32_to_cpu(key[3]);
114
115
116 memset(&ctx->cword, 0, sizeof(ctx->cword));
117
118 ctx->cword.decrypt.encdec = 1;
119 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
120 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
121 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
122 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
123
124
125 if (aes_hw_extkey_available(key_len))
126 goto ok;
127
128 ctx->D = ctx->d_data;
129 ctx->cword.encrypt.keygen = 1;
130 ctx->cword.decrypt.keygen = 1;
131
132 if (crypto_aes_expand_key(&gen_aes, in_key, key_len)) {
133 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
134 return -EINVAL;
135 }
136
137 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
138 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
139
140ok:
141 for_each_online_cpu(cpu)
142 if (&ctx->cword.encrypt == per_cpu(last_cword, cpu) ||
143 &ctx->cword.decrypt == per_cpu(last_cword, cpu))
144 per_cpu(last_cword, cpu) = NULL;
145
146 return 0;
147}
148
149
150
151
152static inline void padlock_reset_key(struct cword *cword)
153{
154 int cpu = raw_smp_processor_id();
155
156 if (cword != per_cpu(last_cword, cpu))
157 asm volatile ("pushfl; popfl");
158}
159
160static inline void padlock_store_cword(struct cword *cword)
161{
162 per_cpu(last_cword, raw_smp_processor_id()) = cword;
163}
164
165
166
167
168
169
170
171static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
172 struct cword *control_word)
173{
174 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8"
175 : "+S"(input), "+D"(output)
176 : "d"(control_word), "b"(key), "c"(1));
177}
178
179static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
180{
181 u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
182 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
183
184 memcpy(tmp, in, AES_BLOCK_SIZE);
185 padlock_xcrypt(tmp, out, key, cword);
186}
187
188static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
189 struct cword *cword)
190{
191
192 if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
193 (PAGE_SIZE - 1)))) {
194 aes_crypt_copy(in, out, key, cword);
195 return;
196 }
197
198 padlock_xcrypt(in, out, key, cword);
199}
200
201static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
202 void *control_word, u32 count)
203{
204 if (count == 1) {
205 aes_crypt(input, output, key, control_word);
206 return;
207 }
208
209 asm volatile ("test $1, %%cl;"
210 "je 1f;"
211 "lea -1(%%ecx), %%eax;"
212 "mov $1, %%ecx;"
213 ".byte 0xf3,0x0f,0xa7,0xc8;"
214 "mov %%eax, %%ecx;"
215 "1:"
216 ".byte 0xf3,0x0f,0xa7,0xc8"
217 : "+S"(input), "+D"(output)
218 : "d"(control_word), "b"(key), "c"(count)
219 : "ax");
220}
221
222static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
223 u8 *iv, void *control_word, u32 count)
224{
225
226 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
227 : "+S" (input), "+D" (output), "+a" (iv)
228 : "d" (control_word), "b" (key), "c" (count));
229 return iv;
230}
231
232static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
233{
234 struct aes_ctx *ctx = aes_ctx(tfm);
235 int ts_state;
236
237 padlock_reset_key(&ctx->cword.encrypt);
238 ts_state = irq_ts_save();
239 aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
240 irq_ts_restore(ts_state);
241 padlock_store_cword(&ctx->cword.encrypt);
242}
243
244static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
245{
246 struct aes_ctx *ctx = aes_ctx(tfm);
247 int ts_state;
248
249 padlock_reset_key(&ctx->cword.encrypt);
250 ts_state = irq_ts_save();
251 aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
252 irq_ts_restore(ts_state);
253 padlock_store_cword(&ctx->cword.encrypt);
254}
255
256static struct crypto_alg aes_alg = {
257 .cra_name = "aes",
258 .cra_driver_name = "aes-padlock",
259 .cra_priority = PADLOCK_CRA_PRIORITY,
260 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
261 .cra_blocksize = AES_BLOCK_SIZE,
262 .cra_ctxsize = sizeof(struct aes_ctx),
263 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
264 .cra_module = THIS_MODULE,
265 .cra_list = LIST_HEAD_INIT(aes_alg.cra_list),
266 .cra_u = {
267 .cipher = {
268 .cia_min_keysize = AES_MIN_KEY_SIZE,
269 .cia_max_keysize = AES_MAX_KEY_SIZE,
270 .cia_setkey = aes_set_key,
271 .cia_encrypt = aes_encrypt,
272 .cia_decrypt = aes_decrypt,
273 }
274 }
275};
276
277static int ecb_aes_encrypt(struct blkcipher_desc *desc,
278 struct scatterlist *dst, struct scatterlist *src,
279 unsigned int nbytes)
280{
281 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
282 struct blkcipher_walk walk;
283 int err;
284 int ts_state;
285
286 padlock_reset_key(&ctx->cword.encrypt);
287
288 blkcipher_walk_init(&walk, dst, src, nbytes);
289 err = blkcipher_walk_virt(desc, &walk);
290
291 ts_state = irq_ts_save();
292 while ((nbytes = walk.nbytes)) {
293 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
294 ctx->E, &ctx->cword.encrypt,
295 nbytes / AES_BLOCK_SIZE);
296 nbytes &= AES_BLOCK_SIZE - 1;
297 err = blkcipher_walk_done(desc, &walk, nbytes);
298 }
299 irq_ts_restore(ts_state);
300
301 padlock_store_cword(&ctx->cword.encrypt);
302
303 return err;
304}
305
306static int ecb_aes_decrypt(struct blkcipher_desc *desc,
307 struct scatterlist *dst, struct scatterlist *src,
308 unsigned int nbytes)
309{
310 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
311 struct blkcipher_walk walk;
312 int err;
313 int ts_state;
314
315 padlock_reset_key(&ctx->cword.decrypt);
316
317 blkcipher_walk_init(&walk, dst, src, nbytes);
318 err = blkcipher_walk_virt(desc, &walk);
319
320 ts_state = irq_ts_save();
321 while ((nbytes = walk.nbytes)) {
322 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
323 ctx->D, &ctx->cword.decrypt,
324 nbytes / AES_BLOCK_SIZE);
325 nbytes &= AES_BLOCK_SIZE - 1;
326 err = blkcipher_walk_done(desc, &walk, nbytes);
327 }
328 irq_ts_restore(ts_state);
329
330 padlock_store_cword(&ctx->cword.encrypt);
331
332 return err;
333}
334
335static struct crypto_alg ecb_aes_alg = {
336 .cra_name = "ecb(aes)",
337 .cra_driver_name = "ecb-aes-padlock",
338 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
339 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
340 .cra_blocksize = AES_BLOCK_SIZE,
341 .cra_ctxsize = sizeof(struct aes_ctx),
342 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
343 .cra_type = &crypto_blkcipher_type,
344 .cra_module = THIS_MODULE,
345 .cra_list = LIST_HEAD_INIT(ecb_aes_alg.cra_list),
346 .cra_u = {
347 .blkcipher = {
348 .min_keysize = AES_MIN_KEY_SIZE,
349 .max_keysize = AES_MAX_KEY_SIZE,
350 .setkey = aes_set_key,
351 .encrypt = ecb_aes_encrypt,
352 .decrypt = ecb_aes_decrypt,
353 }
354 }
355};
356
357static int cbc_aes_encrypt(struct blkcipher_desc *desc,
358 struct scatterlist *dst, struct scatterlist *src,
359 unsigned int nbytes)
360{
361 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
362 struct blkcipher_walk walk;
363 int err;
364 int ts_state;
365
366 padlock_reset_key(&ctx->cword.encrypt);
367
368 blkcipher_walk_init(&walk, dst, src, nbytes);
369 err = blkcipher_walk_virt(desc, &walk);
370
371 ts_state = irq_ts_save();
372 while ((nbytes = walk.nbytes)) {
373 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
374 walk.dst.virt.addr, ctx->E,
375 walk.iv, &ctx->cword.encrypt,
376 nbytes / AES_BLOCK_SIZE);
377 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
378 nbytes &= AES_BLOCK_SIZE - 1;
379 err = blkcipher_walk_done(desc, &walk, nbytes);
380 }
381 irq_ts_restore(ts_state);
382
383 padlock_store_cword(&ctx->cword.decrypt);
384
385 return err;
386}
387
388static int cbc_aes_decrypt(struct blkcipher_desc *desc,
389 struct scatterlist *dst, struct scatterlist *src,
390 unsigned int nbytes)
391{
392 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm);
393 struct blkcipher_walk walk;
394 int err;
395 int ts_state;
396
397 padlock_reset_key(&ctx->cword.encrypt);
398
399 blkcipher_walk_init(&walk, dst, src, nbytes);
400 err = blkcipher_walk_virt(desc, &walk);
401
402 ts_state = irq_ts_save();
403 while ((nbytes = walk.nbytes)) {
404 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
405 ctx->D, walk.iv, &ctx->cword.decrypt,
406 nbytes / AES_BLOCK_SIZE);
407 nbytes &= AES_BLOCK_SIZE - 1;
408 err = blkcipher_walk_done(desc, &walk, nbytes);
409 }
410
411 irq_ts_restore(ts_state);
412
413 padlock_store_cword(&ctx->cword.encrypt);
414
415 return err;
416}
417
418static struct crypto_alg cbc_aes_alg = {
419 .cra_name = "cbc(aes)",
420 .cra_driver_name = "cbc-aes-padlock",
421 .cra_priority = PADLOCK_COMPOSITE_PRIORITY,
422 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
423 .cra_blocksize = AES_BLOCK_SIZE,
424 .cra_ctxsize = sizeof(struct aes_ctx),
425 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
426 .cra_type = &crypto_blkcipher_type,
427 .cra_module = THIS_MODULE,
428 .cra_list = LIST_HEAD_INIT(cbc_aes_alg.cra_list),
429 .cra_u = {
430 .blkcipher = {
431 .min_keysize = AES_MIN_KEY_SIZE,
432 .max_keysize = AES_MAX_KEY_SIZE,
433 .ivsize = AES_BLOCK_SIZE,
434 .setkey = aes_set_key,
435 .encrypt = cbc_aes_encrypt,
436 .decrypt = cbc_aes_decrypt,
437 }
438 }
439};
440
441static int __init padlock_init(void)
442{
443 int ret;
444
445 if (!cpu_has_xcrypt) {
446 printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
447 return -ENODEV;
448 }
449
450 if (!cpu_has_xcrypt_enabled) {
451 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
452 return -ENODEV;
453 }
454
455 if ((ret = crypto_register_alg(&aes_alg)))
456 goto aes_err;
457
458 if ((ret = crypto_register_alg(&ecb_aes_alg)))
459 goto ecb_aes_err;
460
461 if ((ret = crypto_register_alg(&cbc_aes_alg)))
462 goto cbc_aes_err;
463
464 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
465
466out:
467 return ret;
468
469cbc_aes_err:
470 crypto_unregister_alg(&ecb_aes_alg);
471ecb_aes_err:
472 crypto_unregister_alg(&aes_alg);
473aes_err:
474 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
475 goto out;
476}
477
478static void __exit padlock_fini(void)
479{
480 crypto_unregister_alg(&cbc_aes_alg);
481 crypto_unregister_alg(&ecb_aes_alg);
482 crypto_unregister_alg(&aes_alg);
483}
484
485module_init(padlock_init);
486module_exit(padlock_fini);
487
488MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
489MODULE_LICENSE("GPL");
490MODULE_AUTHOR("Michal Ludvig");
491
492MODULE_ALIAS("aes");
493