linux/crypto/cryptd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Software async crypto daemon.
   4 *
   5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6 *
   7 * Added AEAD support to cryptd.
   8 *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
   9 *             Adrian Hoban <adrian.hoban@intel.com>
  10 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
  11 *             Aidan O'Mahony (aidan.o.mahony@intel.com)
  12 *    Copyright (c) 2010, Intel Corporation.
  13 */
  14
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/cryptd.h>
  19#include <linux/refcount.h>
  20#include <linux/err.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/list.h>
  24#include <linux/module.h>
  25#include <linux/scatterlist.h>
  26#include <linux/sched.h>
  27#include <linux/slab.h>
  28#include <linux/workqueue.h>
  29
  30static unsigned int cryptd_max_cpu_qlen = 1000;
  31module_param(cryptd_max_cpu_qlen, uint, 0);
  32MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  33
  34static struct workqueue_struct *cryptd_wq;
  35
  36struct cryptd_cpu_queue {
  37        struct crypto_queue queue;
  38        struct work_struct work;
  39};
  40
  41struct cryptd_queue {
  42        struct cryptd_cpu_queue __percpu *cpu_queue;
  43};
  44
  45struct cryptd_instance_ctx {
  46        struct crypto_spawn spawn;
  47        struct cryptd_queue *queue;
  48};
  49
  50struct skcipherd_instance_ctx {
  51        struct crypto_skcipher_spawn spawn;
  52        struct cryptd_queue *queue;
  53};
  54
  55struct hashd_instance_ctx {
  56        struct crypto_shash_spawn spawn;
  57        struct cryptd_queue *queue;
  58};
  59
  60struct aead_instance_ctx {
  61        struct crypto_aead_spawn aead_spawn;
  62        struct cryptd_queue *queue;
  63};
  64
  65struct cryptd_skcipher_ctx {
  66        refcount_t refcnt;
  67        struct crypto_sync_skcipher *child;
  68};
  69
  70struct cryptd_skcipher_request_ctx {
  71        crypto_completion_t complete;
  72};
  73
  74struct cryptd_hash_ctx {
  75        refcount_t refcnt;
  76        struct crypto_shash *child;
  77};
  78
  79struct cryptd_hash_request_ctx {
  80        crypto_completion_t complete;
  81        struct shash_desc desc;
  82};
  83
  84struct cryptd_aead_ctx {
  85        refcount_t refcnt;
  86        struct crypto_aead *child;
  87};
  88
  89struct cryptd_aead_request_ctx {
  90        crypto_completion_t complete;
  91};
  92
  93static void cryptd_queue_worker(struct work_struct *work);
  94
  95static int cryptd_init_queue(struct cryptd_queue *queue,
  96                             unsigned int max_cpu_qlen)
  97{
  98        int cpu;
  99        struct cryptd_cpu_queue *cpu_queue;
 100
 101        queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
 102        if (!queue->cpu_queue)
 103                return -ENOMEM;
 104        for_each_possible_cpu(cpu) {
 105                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 106                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
 107                INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
 108        }
 109        pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
 110        return 0;
 111}
 112
 113static void cryptd_fini_queue(struct cryptd_queue *queue)
 114{
 115        int cpu;
 116        struct cryptd_cpu_queue *cpu_queue;
 117
 118        for_each_possible_cpu(cpu) {
 119                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
 120                BUG_ON(cpu_queue->queue.qlen);
 121        }
 122        free_percpu(queue->cpu_queue);
 123}
 124
 125static int cryptd_enqueue_request(struct cryptd_queue *queue,
 126                                  struct crypto_async_request *request)
 127{
 128        int cpu, err;
 129        struct cryptd_cpu_queue *cpu_queue;
 130        refcount_t *refcnt;
 131
 132        cpu = get_cpu();
 133        cpu_queue = this_cpu_ptr(queue->cpu_queue);
 134        err = crypto_enqueue_request(&cpu_queue->queue, request);
 135
 136        refcnt = crypto_tfm_ctx(request->tfm);
 137
 138        if (err == -ENOSPC)
 139                goto out_put_cpu;
 140
 141        queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
 142
 143        if (!refcount_read(refcnt))
 144                goto out_put_cpu;
 145
 146        refcount_inc(refcnt);
 147
 148out_put_cpu:
 149        put_cpu();
 150
 151        return err;
 152}
 153
 154/* Called in workqueue context, do one real cryption work (via
 155 * req->complete) and reschedule itself if there are more work to
 156 * do. */
 157static void cryptd_queue_worker(struct work_struct *work)
 158{
 159        struct cryptd_cpu_queue *cpu_queue;
 160        struct crypto_async_request *req, *backlog;
 161
 162        cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 163        /*
 164         * Only handle one request at a time to avoid hogging crypto workqueue.
 165         * preempt_disable/enable is used to prevent being preempted by
 166         * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
 167         * cryptd_enqueue_request() being accessed from software interrupts.
 168         */
 169        local_bh_disable();
 170        preempt_disable();
 171        backlog = crypto_get_backlog(&cpu_queue->queue);
 172        req = crypto_dequeue_request(&cpu_queue->queue);
 173        preempt_enable();
 174        local_bh_enable();
 175
 176        if (!req)
 177                return;
 178
 179        if (backlog)
 180                backlog->complete(backlog, -EINPROGRESS);
 181        req->complete(req, 0);
 182
 183        if (cpu_queue->queue.qlen)
 184                queue_work(cryptd_wq, &cpu_queue->work);
 185}
 186
 187static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 188{
 189        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 190        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 191        return ictx->queue;
 192}
 193
 194static void cryptd_type_and_mask(struct crypto_attr_type *algt,
 195                                 u32 *type, u32 *mask)
 196{
 197        /*
 198         * cryptd is allowed to wrap internal algorithms, but in that case the
 199         * resulting cryptd instance will be marked as internal as well.
 200         */
 201        *type = algt->type & CRYPTO_ALG_INTERNAL;
 202        *mask = algt->mask & CRYPTO_ALG_INTERNAL;
 203
 204        /* No point in cryptd wrapping an algorithm that's already async. */
 205        *mask |= CRYPTO_ALG_ASYNC;
 206
 207        *mask |= crypto_algt_inherited_mask(algt);
 208}
 209
 210static int cryptd_init_instance(struct crypto_instance *inst,
 211                                struct crypto_alg *alg)
 212{
 213        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 214                     "cryptd(%s)",
 215                     alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 216                return -ENAMETOOLONG;
 217
 218        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 219
 220        inst->alg.cra_priority = alg->cra_priority + 50;
 221        inst->alg.cra_blocksize = alg->cra_blocksize;
 222        inst->alg.cra_alignmask = alg->cra_alignmask;
 223
 224        return 0;
 225}
 226
 227static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
 228                                  const u8 *key, unsigned int keylen)
 229{
 230        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
 231        struct crypto_sync_skcipher *child = ctx->child;
 232
 233        crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 234        crypto_sync_skcipher_set_flags(child,
 235                                       crypto_skcipher_get_flags(parent) &
 236                                         CRYPTO_TFM_REQ_MASK);
 237        return crypto_sync_skcipher_setkey(child, key, keylen);
 238}
 239
 240static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
 241{
 242        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 243        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 244        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 245        int refcnt = refcount_read(&ctx->refcnt);
 246
 247        local_bh_disable();
 248        rctx->complete(&req->base, err);
 249        local_bh_enable();
 250
 251        if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 252                crypto_free_skcipher(tfm);
 253}
 254
 255static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
 256                                    int err)
 257{
 258        struct skcipher_request *req = skcipher_request_cast(base);
 259        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 260        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 261        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 262        struct crypto_sync_skcipher *child = ctx->child;
 263        SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 264
 265        if (unlikely(err == -EINPROGRESS))
 266                goto out;
 267
 268        skcipher_request_set_sync_tfm(subreq, child);
 269        skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 270                                      NULL, NULL);
 271        skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 272                                   req->iv);
 273
 274        err = crypto_skcipher_encrypt(subreq);
 275        skcipher_request_zero(subreq);
 276
 277        req->base.complete = rctx->complete;
 278
 279out:
 280        cryptd_skcipher_complete(req, err);
 281}
 282
 283static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
 284                                    int err)
 285{
 286        struct skcipher_request *req = skcipher_request_cast(base);
 287        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 288        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 289        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 290        struct crypto_sync_skcipher *child = ctx->child;
 291        SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
 292
 293        if (unlikely(err == -EINPROGRESS))
 294                goto out;
 295
 296        skcipher_request_set_sync_tfm(subreq, child);
 297        skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
 298                                      NULL, NULL);
 299        skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
 300                                   req->iv);
 301
 302        err = crypto_skcipher_decrypt(subreq);
 303        skcipher_request_zero(subreq);
 304
 305        req->base.complete = rctx->complete;
 306
 307out:
 308        cryptd_skcipher_complete(req, err);
 309}
 310
 311static int cryptd_skcipher_enqueue(struct skcipher_request *req,
 312                                   crypto_completion_t compl)
 313{
 314        struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
 315        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 316        struct cryptd_queue *queue;
 317
 318        queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
 319        rctx->complete = req->base.complete;
 320        req->base.complete = compl;
 321
 322        return cryptd_enqueue_request(queue, &req->base);
 323}
 324
 325static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
 326{
 327        return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
 328}
 329
 330static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
 331{
 332        return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
 333}
 334
 335static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
 336{
 337        struct skcipher_instance *inst = skcipher_alg_instance(tfm);
 338        struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
 339        struct crypto_skcipher_spawn *spawn = &ictx->spawn;
 340        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 341        struct crypto_skcipher *cipher;
 342
 343        cipher = crypto_spawn_skcipher(spawn);
 344        if (IS_ERR(cipher))
 345                return PTR_ERR(cipher);
 346
 347        ctx->child = (struct crypto_sync_skcipher *)cipher;
 348        crypto_skcipher_set_reqsize(
 349                tfm, sizeof(struct cryptd_skcipher_request_ctx));
 350        return 0;
 351}
 352
 353static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
 354{
 355        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
 356
 357        crypto_free_sync_skcipher(ctx->child);
 358}
 359
 360static void cryptd_skcipher_free(struct skcipher_instance *inst)
 361{
 362        struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
 363
 364        crypto_drop_skcipher(&ctx->spawn);
 365        kfree(inst);
 366}
 367
 368static int cryptd_create_skcipher(struct crypto_template *tmpl,
 369                                  struct rtattr **tb,
 370                                  struct crypto_attr_type *algt,
 371                                  struct cryptd_queue *queue)
 372{
 373        struct skcipherd_instance_ctx *ctx;
 374        struct skcipher_instance *inst;
 375        struct skcipher_alg *alg;
 376        u32 type;
 377        u32 mask;
 378        int err;
 379
 380        cryptd_type_and_mask(algt, &type, &mask);
 381
 382        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 383        if (!inst)
 384                return -ENOMEM;
 385
 386        ctx = skcipher_instance_ctx(inst);
 387        ctx->queue = queue;
 388
 389        err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
 390                                   crypto_attr_alg_name(tb[1]), type, mask);
 391        if (err)
 392                goto err_free_inst;
 393
 394        alg = crypto_spawn_skcipher_alg(&ctx->spawn);
 395        err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
 396        if (err)
 397                goto err_free_inst;
 398
 399        inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 400                (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 401        inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
 402        inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
 403        inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
 404        inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
 405
 406        inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
 407
 408        inst->alg.init = cryptd_skcipher_init_tfm;
 409        inst->alg.exit = cryptd_skcipher_exit_tfm;
 410
 411        inst->alg.setkey = cryptd_skcipher_setkey;
 412        inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
 413        inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
 414
 415        inst->free = cryptd_skcipher_free;
 416
 417        err = skcipher_register_instance(tmpl, inst);
 418        if (err) {
 419err_free_inst:
 420                cryptd_skcipher_free(inst);
 421        }
 422        return err;
 423}
 424
 425static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 426{
 427        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 428        struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
 429        struct crypto_shash_spawn *spawn = &ictx->spawn;
 430        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 431        struct crypto_shash *hash;
 432
 433        hash = crypto_spawn_shash(spawn);
 434        if (IS_ERR(hash))
 435                return PTR_ERR(hash);
 436
 437        ctx->child = hash;
 438        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 439                                 sizeof(struct cryptd_hash_request_ctx) +
 440                                 crypto_shash_descsize(hash));
 441        return 0;
 442}
 443
 444static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 445{
 446        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 447
 448        crypto_free_shash(ctx->child);
 449}
 450
 451static int cryptd_hash_setkey(struct crypto_ahash *parent,
 452                                   const u8 *key, unsigned int keylen)
 453{
 454        struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 455        struct crypto_shash *child = ctx->child;
 456
 457        crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 458        crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
 459                                      CRYPTO_TFM_REQ_MASK);
 460        return crypto_shash_setkey(child, key, keylen);
 461}
 462
 463static int cryptd_hash_enqueue(struct ahash_request *req,
 464                                crypto_completion_t compl)
 465{
 466        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 467        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 468        struct cryptd_queue *queue =
 469                cryptd_get_queue(crypto_ahash_tfm(tfm));
 470
 471        rctx->complete = req->base.complete;
 472        req->base.complete = compl;
 473
 474        return cryptd_enqueue_request(queue, &req->base);
 475}
 476
 477static void cryptd_hash_complete(struct ahash_request *req, int err)
 478{
 479        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 480        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 481        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 482        int refcnt = refcount_read(&ctx->refcnt);
 483
 484        local_bh_disable();
 485        rctx->complete(&req->base, err);
 486        local_bh_enable();
 487
 488        if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 489                crypto_free_ahash(tfm);
 490}
 491
 492static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 493{
 494        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 495        struct crypto_shash *child = ctx->child;
 496        struct ahash_request *req = ahash_request_cast(req_async);
 497        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 498        struct shash_desc *desc = &rctx->desc;
 499
 500        if (unlikely(err == -EINPROGRESS))
 501                goto out;
 502
 503        desc->tfm = child;
 504
 505        err = crypto_shash_init(desc);
 506
 507        req->base.complete = rctx->complete;
 508
 509out:
 510        cryptd_hash_complete(req, err);
 511}
 512
 513static int cryptd_hash_init_enqueue(struct ahash_request *req)
 514{
 515        return cryptd_hash_enqueue(req, cryptd_hash_init);
 516}
 517
 518static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 519{
 520        struct ahash_request *req = ahash_request_cast(req_async);
 521        struct cryptd_hash_request_ctx *rctx;
 522
 523        rctx = ahash_request_ctx(req);
 524
 525        if (unlikely(err == -EINPROGRESS))
 526                goto out;
 527
 528        err = shash_ahash_update(req, &rctx->desc);
 529
 530        req->base.complete = rctx->complete;
 531
 532out:
 533        cryptd_hash_complete(req, err);
 534}
 535
 536static int cryptd_hash_update_enqueue(struct ahash_request *req)
 537{
 538        return cryptd_hash_enqueue(req, cryptd_hash_update);
 539}
 540
 541static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 542{
 543        struct ahash_request *req = ahash_request_cast(req_async);
 544        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 545
 546        if (unlikely(err == -EINPROGRESS))
 547                goto out;
 548
 549        err = crypto_shash_final(&rctx->desc, req->result);
 550
 551        req->base.complete = rctx->complete;
 552
 553out:
 554        cryptd_hash_complete(req, err);
 555}
 556
 557static int cryptd_hash_final_enqueue(struct ahash_request *req)
 558{
 559        return cryptd_hash_enqueue(req, cryptd_hash_final);
 560}
 561
 562static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
 563{
 564        struct ahash_request *req = ahash_request_cast(req_async);
 565        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 566
 567        if (unlikely(err == -EINPROGRESS))
 568                goto out;
 569
 570        err = shash_ahash_finup(req, &rctx->desc);
 571
 572        req->base.complete = rctx->complete;
 573
 574out:
 575        cryptd_hash_complete(req, err);
 576}
 577
 578static int cryptd_hash_finup_enqueue(struct ahash_request *req)
 579{
 580        return cryptd_hash_enqueue(req, cryptd_hash_finup);
 581}
 582
 583static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 584{
 585        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
 586        struct crypto_shash *child = ctx->child;
 587        struct ahash_request *req = ahash_request_cast(req_async);
 588        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 589        struct shash_desc *desc = &rctx->desc;
 590
 591        if (unlikely(err == -EINPROGRESS))
 592                goto out;
 593
 594        desc->tfm = child;
 595
 596        err = shash_ahash_digest(req, desc);
 597
 598        req->base.complete = rctx->complete;
 599
 600out:
 601        cryptd_hash_complete(req, err);
 602}
 603
 604static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 605{
 606        return cryptd_hash_enqueue(req, cryptd_hash_digest);
 607}
 608
 609static int cryptd_hash_export(struct ahash_request *req, void *out)
 610{
 611        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 612
 613        return crypto_shash_export(&rctx->desc, out);
 614}
 615
 616static int cryptd_hash_import(struct ahash_request *req, const void *in)
 617{
 618        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 619        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 620        struct shash_desc *desc = cryptd_shash_desc(req);
 621
 622        desc->tfm = ctx->child;
 623
 624        return crypto_shash_import(desc, in);
 625}
 626
 627static void cryptd_hash_free(struct ahash_instance *inst)
 628{
 629        struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
 630
 631        crypto_drop_shash(&ctx->spawn);
 632        kfree(inst);
 633}
 634
 635static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
 636                              struct crypto_attr_type *algt,
 637                              struct cryptd_queue *queue)
 638{
 639        struct hashd_instance_ctx *ctx;
 640        struct ahash_instance *inst;
 641        struct shash_alg *alg;
 642        u32 type;
 643        u32 mask;
 644        int err;
 645
 646        cryptd_type_and_mask(algt, &type, &mask);
 647
 648        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 649        if (!inst)
 650                return -ENOMEM;
 651
 652        ctx = ahash_instance_ctx(inst);
 653        ctx->queue = queue;
 654
 655        err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
 656                                crypto_attr_alg_name(tb[1]), type, mask);
 657        if (err)
 658                goto err_free_inst;
 659        alg = crypto_spawn_shash_alg(&ctx->spawn);
 660
 661        err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
 662        if (err)
 663                goto err_free_inst;
 664
 665        inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 666                (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
 667                                        CRYPTO_ALG_OPTIONAL_KEY));
 668        inst->alg.halg.digestsize = alg->digestsize;
 669        inst->alg.halg.statesize = alg->statesize;
 670        inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 671
 672        inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
 673        inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
 674
 675        inst->alg.init   = cryptd_hash_init_enqueue;
 676        inst->alg.update = cryptd_hash_update_enqueue;
 677        inst->alg.final  = cryptd_hash_final_enqueue;
 678        inst->alg.finup  = cryptd_hash_finup_enqueue;
 679        inst->alg.export = cryptd_hash_export;
 680        inst->alg.import = cryptd_hash_import;
 681        if (crypto_shash_alg_has_setkey(alg))
 682                inst->alg.setkey = cryptd_hash_setkey;
 683        inst->alg.digest = cryptd_hash_digest_enqueue;
 684
 685        inst->free = cryptd_hash_free;
 686
 687        err = ahash_register_instance(tmpl, inst);
 688        if (err) {
 689err_free_inst:
 690                cryptd_hash_free(inst);
 691        }
 692        return err;
 693}
 694
 695static int cryptd_aead_setkey(struct crypto_aead *parent,
 696                              const u8 *key, unsigned int keylen)
 697{
 698        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 699        struct crypto_aead *child = ctx->child;
 700
 701        return crypto_aead_setkey(child, key, keylen);
 702}
 703
 704static int cryptd_aead_setauthsize(struct crypto_aead *parent,
 705                                   unsigned int authsize)
 706{
 707        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
 708        struct crypto_aead *child = ctx->child;
 709
 710        return crypto_aead_setauthsize(child, authsize);
 711}
 712
 713static void cryptd_aead_crypt(struct aead_request *req,
 714                        struct crypto_aead *child,
 715                        int err,
 716                        int (*crypt)(struct aead_request *req))
 717{
 718        struct cryptd_aead_request_ctx *rctx;
 719        struct cryptd_aead_ctx *ctx;
 720        crypto_completion_t compl;
 721        struct crypto_aead *tfm;
 722        int refcnt;
 723
 724        rctx = aead_request_ctx(req);
 725        compl = rctx->complete;
 726
 727        tfm = crypto_aead_reqtfm(req);
 728
 729        if (unlikely(err == -EINPROGRESS))
 730                goto out;
 731        aead_request_set_tfm(req, child);
 732        err = crypt( req );
 733
 734out:
 735        ctx = crypto_aead_ctx(tfm);
 736        refcnt = refcount_read(&ctx->refcnt);
 737
 738        local_bh_disable();
 739        compl(&req->base, err);
 740        local_bh_enable();
 741
 742        if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
 743                crypto_free_aead(tfm);
 744}
 745
 746static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
 747{
 748        struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 749        struct crypto_aead *child = ctx->child;
 750        struct aead_request *req;
 751
 752        req = container_of(areq, struct aead_request, base);
 753        cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
 754}
 755
 756static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 757{
 758        struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
 759        struct crypto_aead *child = ctx->child;
 760        struct aead_request *req;
 761
 762        req = container_of(areq, struct aead_request, base);
 763        cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
 764}
 765
 766static int cryptd_aead_enqueue(struct aead_request *req,
 767                                    crypto_completion_t compl)
 768{
 769        struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
 770        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 771        struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 772
 773        rctx->complete = req->base.complete;
 774        req->base.complete = compl;
 775        return cryptd_enqueue_request(queue, &req->base);
 776}
 777
 778static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
 779{
 780        return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
 781}
 782
 783static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
 784{
 785        return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
 786}
 787
 788static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
 789{
 790        struct aead_instance *inst = aead_alg_instance(tfm);
 791        struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
 792        struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
 793        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 794        struct crypto_aead *cipher;
 795
 796        cipher = crypto_spawn_aead(spawn);
 797        if (IS_ERR(cipher))
 798                return PTR_ERR(cipher);
 799
 800        ctx->child = cipher;
 801        crypto_aead_set_reqsize(
 802                tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
 803                         crypto_aead_reqsize(cipher)));
 804        return 0;
 805}
 806
 807static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
 808{
 809        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
 810        crypto_free_aead(ctx->child);
 811}
 812
 813static void cryptd_aead_free(struct aead_instance *inst)
 814{
 815        struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
 816
 817        crypto_drop_aead(&ctx->aead_spawn);
 818        kfree(inst);
 819}
 820
 821static int cryptd_create_aead(struct crypto_template *tmpl,
 822                              struct rtattr **tb,
 823                              struct crypto_attr_type *algt,
 824                              struct cryptd_queue *queue)
 825{
 826        struct aead_instance_ctx *ctx;
 827        struct aead_instance *inst;
 828        struct aead_alg *alg;
 829        u32 type;
 830        u32 mask;
 831        int err;
 832
 833        cryptd_type_and_mask(algt, &type, &mask);
 834
 835        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 836        if (!inst)
 837                return -ENOMEM;
 838
 839        ctx = aead_instance_ctx(inst);
 840        ctx->queue = queue;
 841
 842        err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
 843                               crypto_attr_alg_name(tb[1]), type, mask);
 844        if (err)
 845                goto err_free_inst;
 846
 847        alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
 848        err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
 849        if (err)
 850                goto err_free_inst;
 851
 852        inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
 853                (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
 854        inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
 855
 856        inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
 857        inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
 858
 859        inst->alg.init = cryptd_aead_init_tfm;
 860        inst->alg.exit = cryptd_aead_exit_tfm;
 861        inst->alg.setkey = cryptd_aead_setkey;
 862        inst->alg.setauthsize = cryptd_aead_setauthsize;
 863        inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
 864        inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
 865
 866        inst->free = cryptd_aead_free;
 867
 868        err = aead_register_instance(tmpl, inst);
 869        if (err) {
 870err_free_inst:
 871                cryptd_aead_free(inst);
 872        }
 873        return err;
 874}
 875
 876static struct cryptd_queue queue;
 877
 878static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
 879{
 880        struct crypto_attr_type *algt;
 881
 882        algt = crypto_get_attr_type(tb);
 883        if (IS_ERR(algt))
 884                return PTR_ERR(algt);
 885
 886        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 887        case CRYPTO_ALG_TYPE_SKCIPHER:
 888                return cryptd_create_skcipher(tmpl, tb, algt, &queue);
 889        case CRYPTO_ALG_TYPE_HASH:
 890                return cryptd_create_hash(tmpl, tb, algt, &queue);
 891        case CRYPTO_ALG_TYPE_AEAD:
 892                return cryptd_create_aead(tmpl, tb, algt, &queue);
 893        }
 894
 895        return -EINVAL;
 896}
 897
 898static struct crypto_template cryptd_tmpl = {
 899        .name = "cryptd",
 900        .create = cryptd_create,
 901        .module = THIS_MODULE,
 902};
 903
 904struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
 905                                              u32 type, u32 mask)
 906{
 907        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 908        struct cryptd_skcipher_ctx *ctx;
 909        struct crypto_skcipher *tfm;
 910
 911        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 912                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 913                return ERR_PTR(-EINVAL);
 914
 915        tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
 916        if (IS_ERR(tfm))
 917                return ERR_CAST(tfm);
 918
 919        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 920                crypto_free_skcipher(tfm);
 921                return ERR_PTR(-EINVAL);
 922        }
 923
 924        ctx = crypto_skcipher_ctx(tfm);
 925        refcount_set(&ctx->refcnt, 1);
 926
 927        return container_of(tfm, struct cryptd_skcipher, base);
 928}
 929EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
 930
 931struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
 932{
 933        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 934
 935        return &ctx->child->base;
 936}
 937EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
 938
 939bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
 940{
 941        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 942
 943        return refcount_read(&ctx->refcnt) - 1;
 944}
 945EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
 946
 947void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
 948{
 949        struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
 950
 951        if (refcount_dec_and_test(&ctx->refcnt))
 952                crypto_free_skcipher(&tfm->base);
 953}
 954EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
 955
 956struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
 957                                        u32 type, u32 mask)
 958{
 959        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 960        struct cryptd_hash_ctx *ctx;
 961        struct crypto_ahash *tfm;
 962
 963        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 964                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 965                return ERR_PTR(-EINVAL);
 966        tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
 967        if (IS_ERR(tfm))
 968                return ERR_CAST(tfm);
 969        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
 970                crypto_free_ahash(tfm);
 971                return ERR_PTR(-EINVAL);
 972        }
 973
 974        ctx = crypto_ahash_ctx(tfm);
 975        refcount_set(&ctx->refcnt, 1);
 976
 977        return __cryptd_ahash_cast(tfm);
 978}
 979EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
 980
 981struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
 982{
 983        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 984
 985        return ctx->child;
 986}
 987EXPORT_SYMBOL_GPL(cryptd_ahash_child);
 988
 989struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
 990{
 991        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 992        return &rctx->desc;
 993}
 994EXPORT_SYMBOL_GPL(cryptd_shash_desc);
 995
 996bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
 997{
 998        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
 999
1000        return refcount_read(&ctx->refcnt) - 1;
1001}
1002EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1003
1004void cryptd_free_ahash(struct cryptd_ahash *tfm)
1005{
1006        struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1007
1008        if (refcount_dec_and_test(&ctx->refcnt))
1009                crypto_free_ahash(&tfm->base);
1010}
1011EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1012
1013struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1014                                                  u32 type, u32 mask)
1015{
1016        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1017        struct cryptd_aead_ctx *ctx;
1018        struct crypto_aead *tfm;
1019
1020        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1021                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1022                return ERR_PTR(-EINVAL);
1023        tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1024        if (IS_ERR(tfm))
1025                return ERR_CAST(tfm);
1026        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1027                crypto_free_aead(tfm);
1028                return ERR_PTR(-EINVAL);
1029        }
1030
1031        ctx = crypto_aead_ctx(tfm);
1032        refcount_set(&ctx->refcnt, 1);
1033
1034        return __cryptd_aead_cast(tfm);
1035}
1036EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1037
1038struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1039{
1040        struct cryptd_aead_ctx *ctx;
1041        ctx = crypto_aead_ctx(&tfm->base);
1042        return ctx->child;
1043}
1044EXPORT_SYMBOL_GPL(cryptd_aead_child);
1045
1046bool cryptd_aead_queued(struct cryptd_aead *tfm)
1047{
1048        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1049
1050        return refcount_read(&ctx->refcnt) - 1;
1051}
1052EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1053
1054void cryptd_free_aead(struct cryptd_aead *tfm)
1055{
1056        struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1057
1058        if (refcount_dec_and_test(&ctx->refcnt))
1059                crypto_free_aead(&tfm->base);
1060}
1061EXPORT_SYMBOL_GPL(cryptd_free_aead);
1062
1063static int __init cryptd_init(void)
1064{
1065        int err;
1066
1067        cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1068                                    1);
1069        if (!cryptd_wq)
1070                return -ENOMEM;
1071
1072        err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1073        if (err)
1074                goto err_destroy_wq;
1075
1076        err = crypto_register_template(&cryptd_tmpl);
1077        if (err)
1078                goto err_fini_queue;
1079
1080        return 0;
1081
1082err_fini_queue:
1083        cryptd_fini_queue(&queue);
1084err_destroy_wq:
1085        destroy_workqueue(cryptd_wq);
1086        return err;
1087}
1088
1089static void __exit cryptd_exit(void)
1090{
1091        destroy_workqueue(cryptd_wq);
1092        cryptd_fini_queue(&queue);
1093        crypto_unregister_template(&cryptd_tmpl);
1094}
1095
1096subsys_initcall(cryptd_init);
1097module_exit(cryptd_exit);
1098
1099MODULE_LICENSE("GPL");
1100MODULE_DESCRIPTION("Software async crypto daemon");
1101MODULE_ALIAS_CRYPTO("cryptd");
1102
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.