linux/crypto/cryptd.c
<<
>>
Prefs
   1/*
   2 * Software async crypto daemon.
   3 *
   4 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License as published by the Free
   8 * Software Foundation; either version 2 of the License, or (at your option)
   9 * any later version.
  10 *
  11 */
  12
  13#include <crypto/algapi.h>
  14#include <crypto/internal/hash.h>
  15#include <crypto/cryptd.h>
  16#include <crypto/crypto_wq.h>
  17#include <linux/err.h>
  18#include <linux/init.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/module.h>
  22#include <linux/scatterlist.h>
  23#include <linux/sched.h>
  24#include <linux/slab.h>
  25
  26#define CRYPTD_MAX_CPU_QLEN 100
  27
  28struct cryptd_cpu_queue {
  29        struct crypto_queue queue;
  30        struct work_struct work;
  31};
  32
  33struct cryptd_queue {
  34        struct cryptd_cpu_queue *cpu_queue;
  35};
  36
  37struct cryptd_instance_ctx {
  38        struct crypto_spawn spawn;
  39        struct cryptd_queue *queue;
  40};
  41
  42struct cryptd_blkcipher_ctx {
  43        struct crypto_blkcipher *child;
  44};
  45
  46struct cryptd_blkcipher_request_ctx {
  47        crypto_completion_t complete;
  48};
  49
  50struct cryptd_hash_ctx {
  51        struct crypto_hash *child;
  52};
  53
  54struct cryptd_hash_request_ctx {
  55        crypto_completion_t complete;
  56};
  57
  58static void cryptd_queue_worker(struct work_struct *work);
  59
  60static int cryptd_init_queue(struct cryptd_queue *queue,
  61                             unsigned int max_cpu_qlen)
  62{
  63        int cpu;
  64        struct cryptd_cpu_queue *cpu_queue;
  65
  66        queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
  67        if (!queue->cpu_queue)
  68                return -ENOMEM;
  69        for_each_possible_cpu(cpu) {
  70                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  71                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  72                INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
  73        }
  74        return 0;
  75}
  76
  77static void cryptd_fini_queue(struct cryptd_queue *queue)
  78{
  79        int cpu;
  80        struct cryptd_cpu_queue *cpu_queue;
  81
  82        for_each_possible_cpu(cpu) {
  83                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  84                BUG_ON(cpu_queue->queue.qlen);
  85        }
  86        free_percpu(queue->cpu_queue);
  87}
  88
  89static int cryptd_enqueue_request(struct cryptd_queue *queue,
  90                                  struct crypto_async_request *request)
  91{
  92        int cpu, err;
  93        struct cryptd_cpu_queue *cpu_queue;
  94
  95        cpu = get_cpu();
  96        cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  97        err = crypto_enqueue_request(&cpu_queue->queue, request);
  98        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
  99        put_cpu();
 100
 101        return err;
 102}
 103
 104/* Called in workqueue context, do one real cryption work (via
 105 * req->complete) and reschedule itself if there are more work to
 106 * do. */
 107static void cryptd_queue_worker(struct work_struct *work)
 108{
 109        struct cryptd_cpu_queue *cpu_queue;
 110        struct crypto_async_request *req, *backlog;
 111
 112        cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 113        /* Only handle one request at a time to avoid hogging crypto
 114         * workqueue. preempt_disable/enable is used to prevent
 115         * being preempted by cryptd_enqueue_request() */
 116        preempt_disable();
 117        backlog = crypto_get_backlog(&cpu_queue->queue);
 118        req = crypto_dequeue_request(&cpu_queue->queue);
 119        preempt_enable();
 120
 121        if (!req)
 122                return;
 123
 124        if (backlog)
 125                backlog->complete(backlog, -EINPROGRESS);
 126        req->complete(req, 0);
 127
 128        if (cpu_queue->queue.qlen)
 129                queue_work(kcrypto_wq, &cpu_queue->work);
 130}
 131
 132static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
 133{
 134        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 135        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 136        return ictx->queue;
 137}
 138
 139static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
 140                                   const u8 *key, unsigned int keylen)
 141{
 142        struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
 143        struct crypto_blkcipher *child = ctx->child;
 144        int err;
 145
 146        crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 147        crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
 148                                          CRYPTO_TFM_REQ_MASK);
 149        err = crypto_blkcipher_setkey(child, key, keylen);
 150        crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
 151                                            CRYPTO_TFM_RES_MASK);
 152        return err;
 153}
 154
 155static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
 156                                   struct crypto_blkcipher *child,
 157                                   int err,
 158                                   int (*crypt)(struct blkcipher_desc *desc,
 159                                                struct scatterlist *dst,
 160                                                struct scatterlist *src,
 161                                                unsigned int len))
 162{
 163        struct cryptd_blkcipher_request_ctx *rctx;
 164        struct blkcipher_desc desc;
 165
 166        rctx = ablkcipher_request_ctx(req);
 167
 168        if (unlikely(err == -EINPROGRESS))
 169                goto out;
 170
 171        desc.tfm = child;
 172        desc.info = req->info;
 173        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 174
 175        err = crypt(&desc, req->dst, req->src, req->nbytes);
 176
 177        req->base.complete = rctx->complete;
 178
 179out:
 180        local_bh_disable();
 181        rctx->complete(&req->base, err);
 182        local_bh_enable();
 183}
 184
 185static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
 186{
 187        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 188        struct crypto_blkcipher *child = ctx->child;
 189
 190        cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 191                               crypto_blkcipher_crt(child)->encrypt);
 192}
 193
 194static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 195{
 196        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
 197        struct crypto_blkcipher *child = ctx->child;
 198
 199        cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
 200                               crypto_blkcipher_crt(child)->decrypt);
 201}
 202
 203static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 204                                    crypto_completion_t complete)
 205{
 206        struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
 207        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 208        struct cryptd_queue *queue;
 209
 210        queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
 211        rctx->complete = req->base.complete;
 212        req->base.complete = complete;
 213
 214        return cryptd_enqueue_request(queue, &req->base);
 215}
 216
 217static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
 218{
 219        return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
 220}
 221
 222static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
 223{
 224        return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
 225}
 226
 227static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
 228{
 229        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 230        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 231        struct crypto_spawn *spawn = &ictx->spawn;
 232        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 233        struct crypto_blkcipher *cipher;
 234
 235        cipher = crypto_spawn_blkcipher(spawn);
 236        if (IS_ERR(cipher))
 237                return PTR_ERR(cipher);
 238
 239        ctx->child = cipher;
 240        tfm->crt_ablkcipher.reqsize =
 241                sizeof(struct cryptd_blkcipher_request_ctx);
 242        return 0;
 243}
 244
 245static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
 246{
 247        struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 248
 249        crypto_free_blkcipher(ctx->child);
 250}
 251
 252static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
 253                                                     struct cryptd_queue *queue)
 254{
 255        struct crypto_instance *inst;
 256        struct cryptd_instance_ctx *ctx;
 257        int err;
 258
 259        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 260        if (!inst) {
 261                inst = ERR_PTR(-ENOMEM);
 262                goto out;
 263        }
 264
 265        err = -ENAMETOOLONG;
 266        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 267                     "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 268                goto out_free_inst;
 269
 270        ctx = crypto_instance_ctx(inst);
 271        err = crypto_init_spawn(&ctx->spawn, alg, inst,
 272                                CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
 273        if (err)
 274                goto out_free_inst;
 275
 276        ctx->queue = queue;
 277
 278        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 279
 280        inst->alg.cra_priority = alg->cra_priority + 50;
 281        inst->alg.cra_blocksize = alg->cra_blocksize;
 282        inst->alg.cra_alignmask = alg->cra_alignmask;
 283
 284out:
 285        return inst;
 286
 287out_free_inst:
 288        kfree(inst);
 289        inst = ERR_PTR(err);
 290        goto out;
 291}
 292
 293static struct crypto_instance *cryptd_alloc_blkcipher(
 294        struct rtattr **tb, struct cryptd_queue *queue)
 295{
 296        struct crypto_instance *inst;
 297        struct crypto_alg *alg;
 298
 299        alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
 300                                  CRYPTO_ALG_TYPE_MASK);
 301        if (IS_ERR(alg))
 302                return ERR_CAST(alg);
 303
 304        inst = cryptd_alloc_instance(alg, queue);
 305        if (IS_ERR(inst))
 306                goto out_put_alg;
 307
 308        inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
 309        inst->alg.cra_type = &crypto_ablkcipher_type;
 310
 311        inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
 312        inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
 313        inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
 314
 315        inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
 316
 317        inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
 318
 319        inst->alg.cra_init = cryptd_blkcipher_init_tfm;
 320        inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
 321
 322        inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
 323        inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
 324        inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
 325
 326out_put_alg:
 327        crypto_mod_put(alg);
 328        return inst;
 329}
 330
 331static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
 332{
 333        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 334        struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
 335        struct crypto_spawn *spawn = &ictx->spawn;
 336        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 337        struct crypto_hash *cipher;
 338
 339        cipher = crypto_spawn_hash(spawn);
 340        if (IS_ERR(cipher))
 341                return PTR_ERR(cipher);
 342
 343        ctx->child = cipher;
 344        tfm->crt_ahash.reqsize =
 345                sizeof(struct cryptd_hash_request_ctx);
 346        return 0;
 347}
 348
 349static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
 350{
 351        struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 352
 353        crypto_free_hash(ctx->child);
 354}
 355
 356static int cryptd_hash_setkey(struct crypto_ahash *parent,
 357                                   const u8 *key, unsigned int keylen)
 358{
 359        struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
 360        struct crypto_hash     *child = ctx->child;
 361        int err;
 362
 363        crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 364        crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) &
 365                                          CRYPTO_TFM_REQ_MASK);
 366        err = crypto_hash_setkey(child, key, keylen);
 367        crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) &
 368                                            CRYPTO_TFM_RES_MASK);
 369        return err;
 370}
 371
 372static int cryptd_hash_enqueue(struct ahash_request *req,
 373                                crypto_completion_t complete)
 374{
 375        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
 376        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 377        struct cryptd_queue *queue =
 378                cryptd_get_queue(crypto_ahash_tfm(tfm));
 379
 380        rctx->complete = req->base.complete;
 381        req->base.complete = complete;
 382
 383        return cryptd_enqueue_request(queue, &req->base);
 384}
 385
 386static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
 387{
 388        struct cryptd_hash_ctx *ctx   = crypto_tfm_ctx(req_async->tfm);
 389        struct crypto_hash     *child = ctx->child;
 390        struct ahash_request    *req = ahash_request_cast(req_async);
 391        struct cryptd_hash_request_ctx *rctx;
 392        struct hash_desc desc;
 393
 394        rctx = ahash_request_ctx(req);
 395
 396        if (unlikely(err == -EINPROGRESS))
 397                goto out;
 398
 399        desc.tfm = child;
 400        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 401
 402        err = crypto_hash_crt(child)->init(&desc);
 403
 404        req->base.complete = rctx->complete;
 405
 406out:
 407        local_bh_disable();
 408        rctx->complete(&req->base, err);
 409        local_bh_enable();
 410}
 411
 412static int cryptd_hash_init_enqueue(struct ahash_request *req)
 413{
 414        return cryptd_hash_enqueue(req, cryptd_hash_init);
 415}
 416
 417static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
 418{
 419        struct cryptd_hash_ctx *ctx   = crypto_tfm_ctx(req_async->tfm);
 420        struct crypto_hash     *child = ctx->child;
 421        struct ahash_request    *req = ahash_request_cast(req_async);
 422        struct cryptd_hash_request_ctx *rctx;
 423        struct hash_desc desc;
 424
 425        rctx = ahash_request_ctx(req);
 426
 427        if (unlikely(err == -EINPROGRESS))
 428                goto out;
 429
 430        desc.tfm = child;
 431        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 432
 433        err = crypto_hash_crt(child)->update(&desc,
 434                                                req->src,
 435                                                req->nbytes);
 436
 437        req->base.complete = rctx->complete;
 438
 439out:
 440        local_bh_disable();
 441        rctx->complete(&req->base, err);
 442        local_bh_enable();
 443}
 444
 445static int cryptd_hash_update_enqueue(struct ahash_request *req)
 446{
 447        return cryptd_hash_enqueue(req, cryptd_hash_update);
 448}
 449
 450static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
 451{
 452        struct cryptd_hash_ctx *ctx   = crypto_tfm_ctx(req_async->tfm);
 453        struct crypto_hash     *child = ctx->child;
 454        struct ahash_request    *req = ahash_request_cast(req_async);
 455        struct cryptd_hash_request_ctx *rctx;
 456        struct hash_desc desc;
 457
 458        rctx = ahash_request_ctx(req);
 459
 460        if (unlikely(err == -EINPROGRESS))
 461                goto out;
 462
 463        desc.tfm = child;
 464        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 465
 466        err = crypto_hash_crt(child)->final(&desc, req->result);
 467
 468        req->base.complete = rctx->complete;
 469
 470out:
 471        local_bh_disable();
 472        rctx->complete(&req->base, err);
 473        local_bh_enable();
 474}
 475
 476static int cryptd_hash_final_enqueue(struct ahash_request *req)
 477{
 478        return cryptd_hash_enqueue(req, cryptd_hash_final);
 479}
 480
 481static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
 482{
 483        struct cryptd_hash_ctx *ctx   = crypto_tfm_ctx(req_async->tfm);
 484        struct crypto_hash     *child = ctx->child;
 485        struct ahash_request    *req = ahash_request_cast(req_async);
 486        struct cryptd_hash_request_ctx *rctx;
 487        struct hash_desc desc;
 488
 489        rctx = ahash_request_ctx(req);
 490
 491        if (unlikely(err == -EINPROGRESS))
 492                goto out;
 493
 494        desc.tfm = child;
 495        desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 496
 497        err = crypto_hash_crt(child)->digest(&desc,
 498                                                req->src,
 499                                                req->nbytes,
 500                                                req->result);
 501
 502        req->base.complete = rctx->complete;
 503
 504out:
 505        local_bh_disable();
 506        rctx->complete(&req->base, err);
 507        local_bh_enable();
 508}
 509
 510static int cryptd_hash_digest_enqueue(struct ahash_request *req)
 511{
 512        return cryptd_hash_enqueue(req, cryptd_hash_digest);
 513}
 514
 515static struct crypto_instance *cryptd_alloc_hash(
 516        struct rtattr **tb, struct cryptd_queue *queue)
 517{
 518        struct crypto_instance *inst;
 519        struct crypto_alg *alg;
 520
 521        alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
 522                                  CRYPTO_ALG_TYPE_HASH_MASK);
 523        if (IS_ERR(alg))
 524                return ERR_PTR(PTR_ERR(alg));
 525
 526        inst = cryptd_alloc_instance(alg, queue);
 527        if (IS_ERR(inst))
 528                goto out_put_alg;
 529
 530        inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC;
 531        inst->alg.cra_type = &crypto_ahash_type;
 532
 533        inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize;
 534        inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
 535
 536        inst->alg.cra_init = cryptd_hash_init_tfm;
 537        inst->alg.cra_exit = cryptd_hash_exit_tfm;
 538
 539        inst->alg.cra_ahash.init   = cryptd_hash_init_enqueue;
 540        inst->alg.cra_ahash.update = cryptd_hash_update_enqueue;
 541        inst->alg.cra_ahash.final  = cryptd_hash_final_enqueue;
 542        inst->alg.cra_ahash.setkey = cryptd_hash_setkey;
 543        inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue;
 544
 545out_put_alg:
 546        crypto_mod_put(alg);
 547        return inst;
 548}
 549
 550static struct cryptd_queue queue;
 551
 552static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
 553{
 554        struct crypto_attr_type *algt;
 555
 556        algt = crypto_get_attr_type(tb);
 557        if (IS_ERR(algt))
 558                return ERR_CAST(algt);
 559
 560        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 561        case CRYPTO_ALG_TYPE_BLKCIPHER:
 562                return cryptd_alloc_blkcipher(tb, &queue);
 563        case CRYPTO_ALG_TYPE_DIGEST:
 564                return cryptd_alloc_hash(tb, &queue);
 565        }
 566
 567        return ERR_PTR(-EINVAL);
 568}
 569
 570static void cryptd_free(struct crypto_instance *inst)
 571{
 572        struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
 573
 574        crypto_drop_spawn(&ctx->spawn);
 575        kfree(inst);
 576}
 577
 578static struct crypto_template cryptd_tmpl = {
 579        .name = "cryptd",
 580        .alloc = cryptd_alloc,
 581        .free = cryptd_free,
 582        .module = THIS_MODULE,
 583};
 584
 585struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
 586                                                  u32 type, u32 mask)
 587{
 588        char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
 589        struct crypto_ablkcipher *tfm;
 590
 591        if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
 592                     "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
 593                return ERR_PTR(-EINVAL);
 594        tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask);
 595        if (IS_ERR(tfm))
 596                return ERR_CAST(tfm);
 597        if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) {
 598                crypto_free_ablkcipher(tfm);
 599                return ERR_PTR(-EINVAL);
 600        }
 601
 602        return __cryptd_ablkcipher_cast(tfm);
 603}
 604EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
 605
 606struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
 607{
 608        struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
 609        return ctx->child;
 610}
 611EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
 612
 613void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
 614{
 615        crypto_free_ablkcipher(&tfm->base);
 616}
 617EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
 618
 619static int __init cryptd_init(void)
 620{
 621        int err;
 622
 623        err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
 624        if (err)
 625                return err;
 626
 627        err = crypto_register_template(&cryptd_tmpl);
 628        if (err)
 629                cryptd_fini_queue(&queue);
 630
 631        return err;
 632}
 633
 634static void __exit cryptd_exit(void)
 635{
 636        cryptd_fini_queue(&queue);
 637        crypto_unregister_template(&cryptd_tmpl);
 638}
 639
 640module_init(cryptd_init);
 641module_exit(cryptd_exit);
 642
 643MODULE_LICENSE("GPL");
 644MODULE_DESCRIPTION("Software async crypto daemon");
 645