linux/crypto/pcrypt.c
<<
>>
Prefs
   1/*
   2 * pcrypt - Parallel crypto wrapper.
   3 *
   4 * Copyright (C) 2009 secunet Security Networks AG
   5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms and conditions of the GNU General Public License,
   9 * version 2, as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope it will be useful, but WITHOUT
  12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 * more details.
  15 *
  16 * You should have received a copy of the GNU General Public License along with
  17 * this program; if not, write to the Free Software Foundation, Inc.,
  18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  19 */
  20
  21#include <crypto/algapi.h>
  22#include <crypto/internal/aead.h>
  23#include <linux/err.h>
  24#include <linux/init.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/notifier.h>
  28#include <linux/kobject.h>
  29#include <linux/cpu.h>
  30#include <crypto/pcrypt.h>
  31
  32struct padata_pcrypt {
  33        struct padata_instance *pinst;
  34        struct workqueue_struct *wq;
  35
  36        /*
  37         * Cpumask for callback CPUs. It should be
  38         * equal to serial cpumask of corresponding padata instance,
  39         * so it is updated when padata notifies us about serial
  40         * cpumask change.
  41         *
  42         * cb_cpumask is protected by RCU. This fact prevents us from
  43         * using cpumask_var_t directly because the actual type of
  44         * cpumsak_var_t depends on kernel configuration(particularly on
  45         * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
  46         * cpumask_var_t may be either a pointer to the struct cpumask
  47         * or a variable allocated on the stack. Thus we can not safely use
  48         * cpumask_var_t with RCU operations such as rcu_assign_pointer or
  49         * rcu_dereference. So cpumask_var_t is wrapped with struct
  50         * pcrypt_cpumask which makes possible to use it with RCU.
  51         */
  52        struct pcrypt_cpumask {
  53                cpumask_var_t mask;
  54        } *cb_cpumask;
  55        struct notifier_block nblock;
  56};
  57
  58static struct padata_pcrypt pencrypt;
  59static struct padata_pcrypt pdecrypt;
  60static struct kset           *pcrypt_kset;
  61
  62struct pcrypt_instance_ctx {
  63        struct crypto_spawn spawn;
  64        unsigned int tfm_count;
  65};
  66
  67struct pcrypt_aead_ctx {
  68        struct crypto_aead *child;
  69        unsigned int cb_cpu;
  70};
  71
  72static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
  73                              struct padata_pcrypt *pcrypt)
  74{
  75        unsigned int cpu_index, cpu, i;
  76        struct pcrypt_cpumask *cpumask;
  77
  78        cpu = *cb_cpu;
  79
  80        rcu_read_lock_bh();
  81        cpumask = rcu_dereference(pcrypt->cb_cpumask);
  82        if (cpumask_test_cpu(cpu, cpumask->mask))
  83                        goto out;
  84
  85        if (!cpumask_weight(cpumask->mask))
  86                        goto out;
  87
  88        cpu_index = cpu % cpumask_weight(cpumask->mask);
  89
  90        cpu = cpumask_first(cpumask->mask);
  91        for (i = 0; i < cpu_index; i++)
  92                cpu = cpumask_next(cpu, cpumask->mask);
  93
  94        *cb_cpu = cpu;
  95
  96out:
  97        rcu_read_unlock_bh();
  98        return padata_do_parallel(pcrypt->pinst, padata, cpu);
  99}
 100
 101static int pcrypt_aead_setkey(struct crypto_aead *parent,
 102                              const u8 *key, unsigned int keylen)
 103{
 104        struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 105
 106        return crypto_aead_setkey(ctx->child, key, keylen);
 107}
 108
 109static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
 110                                   unsigned int authsize)
 111{
 112        struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 113
 114        return crypto_aead_setauthsize(ctx->child, authsize);
 115}
 116
 117static void pcrypt_aead_serial(struct padata_priv *padata)
 118{
 119        struct pcrypt_request *preq = pcrypt_padata_request(padata);
 120        struct aead_request *req = pcrypt_request_ctx(preq);
 121
 122        aead_request_complete(req->base.data, padata->info);
 123}
 124
 125static void pcrypt_aead_giv_serial(struct padata_priv *padata)
 126{
 127        struct pcrypt_request *preq = pcrypt_padata_request(padata);
 128        struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
 129
 130        aead_request_complete(req->areq.base.data, padata->info);
 131}
 132
 133static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
 134{
 135        struct aead_request *req = areq->data;
 136        struct pcrypt_request *preq = aead_request_ctx(req);
 137        struct padata_priv *padata = pcrypt_request_padata(preq);
 138
 139        padata->info = err;
 140        req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 141
 142        padata_do_serial(padata);
 143}
 144
 145static void pcrypt_aead_enc(struct padata_priv *padata)
 146{
 147        struct pcrypt_request *preq = pcrypt_padata_request(padata);
 148        struct aead_request *req = pcrypt_request_ctx(preq);
 149
 150        padata->info = crypto_aead_encrypt(req);
 151
 152        if (padata->info == -EINPROGRESS)
 153                return;
 154
 155        padata_do_serial(padata);
 156}
 157
 158static int pcrypt_aead_encrypt(struct aead_request *req)
 159{
 160        int err;
 161        struct pcrypt_request *preq = aead_request_ctx(req);
 162        struct aead_request *creq = pcrypt_request_ctx(preq);
 163        struct padata_priv *padata = pcrypt_request_padata(preq);
 164        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 165        struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
 166        u32 flags = aead_request_flags(req);
 167
 168        memset(padata, 0, sizeof(struct padata_priv));
 169
 170        padata->parallel = pcrypt_aead_enc;
 171        padata->serial = pcrypt_aead_serial;
 172
 173        aead_request_set_tfm(creq, ctx->child);
 174        aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
 175                                  pcrypt_aead_done, req);
 176        aead_request_set_crypt(creq, req->src, req->dst,
 177                               req->cryptlen, req->iv);
 178        aead_request_set_assoc(creq, req->assoc, req->assoclen);
 179
 180        err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
 181        if (!err)
 182                return -EINPROGRESS;
 183
 184        return err;
 185}
 186
 187static void pcrypt_aead_dec(struct padata_priv *padata)
 188{
 189        struct pcrypt_request *preq = pcrypt_padata_request(padata);
 190        struct aead_request *req = pcrypt_request_ctx(preq);
 191
 192        padata->info = crypto_aead_decrypt(req);
 193
 194        if (padata->info == -EINPROGRESS)
 195                return;
 196
 197        padata_do_serial(padata);
 198}
 199
 200static int pcrypt_aead_decrypt(struct aead_request *req)
 201{
 202        int err;
 203        struct pcrypt_request *preq = aead_request_ctx(req);
 204        struct aead_request *creq = pcrypt_request_ctx(preq);
 205        struct padata_priv *padata = pcrypt_request_padata(preq);
 206        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 207        struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
 208        u32 flags = aead_request_flags(req);
 209
 210        memset(padata, 0, sizeof(struct padata_priv));
 211
 212        padata->parallel = pcrypt_aead_dec;
 213        padata->serial = pcrypt_aead_serial;
 214
 215        aead_request_set_tfm(creq, ctx->child);
 216        aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
 217                                  pcrypt_aead_done, req);
 218        aead_request_set_crypt(creq, req->src, req->dst,
 219                               req->cryptlen, req->iv);
 220        aead_request_set_assoc(creq, req->assoc, req->assoclen);
 221
 222        err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
 223        if (!err)
 224                return -EINPROGRESS;
 225
 226        return err;
 227}
 228
 229static void pcrypt_aead_givenc(struct padata_priv *padata)
 230{
 231        struct pcrypt_request *preq = pcrypt_padata_request(padata);
 232        struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
 233
 234        padata->info = crypto_aead_givencrypt(req);
 235
 236        if (padata->info == -EINPROGRESS)
 237                return;
 238
 239        padata_do_serial(padata);
 240}
 241
 242static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
 243{
 244        int err;
 245        struct aead_request *areq = &req->areq;
 246        struct pcrypt_request *preq = aead_request_ctx(areq);
 247        struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
 248        struct padata_priv *padata = pcrypt_request_padata(preq);
 249        struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
 250        struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
 251        u32 flags = aead_request_flags(areq);
 252
 253        memset(padata, 0, sizeof(struct padata_priv));
 254
 255        padata->parallel = pcrypt_aead_givenc;
 256        padata->serial = pcrypt_aead_giv_serial;
 257
 258        aead_givcrypt_set_tfm(creq, ctx->child);
 259        aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
 260                                   pcrypt_aead_done, areq);
 261        aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
 262                                areq->cryptlen, areq->iv);
 263        aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
 264        aead_givcrypt_set_giv(creq, req->giv, req->seq);
 265
 266        err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
 267        if (!err)
 268                return -EINPROGRESS;
 269
 270        return err;
 271}
 272
 273static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
 274{
 275        int cpu, cpu_index;
 276        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
 277        struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
 278        struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 279        struct crypto_aead *cipher;
 280
 281        ictx->tfm_count++;
 282
 283        cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
 284
 285        ctx->cb_cpu = cpumask_first(cpu_online_mask);
 286        for (cpu = 0; cpu < cpu_index; cpu++)
 287                ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
 288
 289        cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
 290
 291        if (IS_ERR(cipher))
 292                return PTR_ERR(cipher);
 293
 294        ctx->child = cipher;
 295        tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
 296                + sizeof(struct aead_givcrypt_request)
 297                + crypto_aead_reqsize(cipher);
 298
 299        return 0;
 300}
 301
 302static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
 303{
 304        struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
 305
 306        crypto_free_aead(ctx->child);
 307}
 308
 309static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
 310{
 311        struct crypto_instance *inst;
 312        struct pcrypt_instance_ctx *ctx;
 313        int err;
 314
 315        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
 316        if (!inst) {
 317                inst = ERR_PTR(-ENOMEM);
 318                goto out;
 319        }
 320
 321        err = -ENAMETOOLONG;
 322        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
 323                     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 324                goto out_free_inst;
 325
 326        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
 327
 328        ctx = crypto_instance_ctx(inst);
 329        err = crypto_init_spawn(&ctx->spawn, alg, inst,
 330                                CRYPTO_ALG_TYPE_MASK);
 331        if (err)
 332                goto out_free_inst;
 333
 334        inst->alg.cra_priority = alg->cra_priority + 100;
 335        inst->alg.cra_blocksize = alg->cra_blocksize;
 336        inst->alg.cra_alignmask = alg->cra_alignmask;
 337
 338out:
 339        return inst;
 340
 341out_free_inst:
 342        kfree(inst);
 343        inst = ERR_PTR(err);
 344        goto out;
 345}
 346
 347static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
 348                                                 u32 type, u32 mask)
 349{
 350        struct crypto_instance *inst;
 351        struct crypto_alg *alg;
 352
 353        alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
 354        if (IS_ERR(alg))
 355                return ERR_CAST(alg);
 356
 357        inst = pcrypt_alloc_instance(alg);
 358        if (IS_ERR(inst))
 359                goto out_put_alg;
 360
 361        inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
 362        inst->alg.cra_type = &crypto_aead_type;
 363
 364        inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
 365        inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
 366        inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
 367
 368        inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
 369
 370        inst->alg.cra_init = pcrypt_aead_init_tfm;
 371        inst->alg.cra_exit = pcrypt_aead_exit_tfm;
 372
 373        inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
 374        inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
 375        inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
 376        inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
 377        inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
 378
 379out_put_alg:
 380        crypto_mod_put(alg);
 381        return inst;
 382}
 383
 384static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
 385{
 386        struct crypto_attr_type *algt;
 387
 388        algt = crypto_get_attr_type(tb);
 389        if (IS_ERR(algt))
 390                return ERR_CAST(algt);
 391
 392        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
 393        case CRYPTO_ALG_TYPE_AEAD:
 394                return pcrypt_alloc_aead(tb, algt->type, algt->mask);
 395        }
 396
 397        return ERR_PTR(-EINVAL);
 398}
 399
 400static void pcrypt_free(struct crypto_instance *inst)
 401{
 402        struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
 403
 404        crypto_drop_spawn(&ctx->spawn);
 405        kfree(inst);
 406}
 407
 408static int pcrypt_cpumask_change_notify(struct notifier_block *self,
 409                                        unsigned long val, void *data)
 410{
 411        struct padata_pcrypt *pcrypt;
 412        struct pcrypt_cpumask *new_mask, *old_mask;
 413        struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
 414
 415        if (!(val & PADATA_CPU_SERIAL))
 416                return 0;
 417
 418        pcrypt = container_of(self, struct padata_pcrypt, nblock);
 419        new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
 420        if (!new_mask)
 421                return -ENOMEM;
 422        if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
 423                kfree(new_mask);
 424                return -ENOMEM;
 425        }
 426
 427        old_mask = pcrypt->cb_cpumask;
 428
 429        cpumask_copy(new_mask->mask, cpumask->cbcpu);
 430        rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
 431        synchronize_rcu_bh();
 432
 433        free_cpumask_var(old_mask->mask);
 434        kfree(old_mask);
 435        return 0;
 436}
 437
 438static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
 439{
 440        int ret;
 441
 442        pinst->kobj.kset = pcrypt_kset;
 443        ret = kobject_add(&pinst->kobj, NULL, name);
 444        if (!ret)
 445                kobject_uevent(&pinst->kobj, KOBJ_ADD);
 446
 447        return ret;
 448}
 449
 450static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
 451                              const char *name)
 452{
 453        int ret = -ENOMEM;
 454        struct pcrypt_cpumask *mask;
 455
 456        get_online_cpus();
 457
 458        pcrypt->wq = alloc_workqueue(name,
 459                                     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 460        if (!pcrypt->wq)
 461                goto err;
 462
 463        pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
 464        if (!pcrypt->pinst)
 465                goto err_destroy_workqueue;
 466
 467        mask = kmalloc(sizeof(*mask), GFP_KERNEL);
 468        if (!mask)
 469                goto err_free_padata;
 470        if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
 471                kfree(mask);
 472                goto err_free_padata;
 473        }
 474
 475        cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
 476        rcu_assign_pointer(pcrypt->cb_cpumask, mask);
 477
 478        pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
 479        ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
 480        if (ret)
 481                goto err_free_cpumask;
 482
 483        ret = pcrypt_sysfs_add(pcrypt->pinst, name);
 484        if (ret)
 485                goto err_unregister_notifier;
 486
 487        put_online_cpus();
 488
 489        return ret;
 490
 491err_unregister_notifier:
 492        padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
 493err_free_cpumask:
 494        free_cpumask_var(mask->mask);
 495        kfree(mask);
 496err_free_padata:
 497        padata_free(pcrypt->pinst);
 498err_destroy_workqueue:
 499        destroy_workqueue(pcrypt->wq);
 500err:
 501        put_online_cpus();
 502
 503        return ret;
 504}
 505
 506static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 507{
 508        free_cpumask_var(pcrypt->cb_cpumask->mask);
 509        kfree(pcrypt->cb_cpumask);
 510
 511        padata_stop(pcrypt->pinst);
 512        padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
 513        destroy_workqueue(pcrypt->wq);
 514        padata_free(pcrypt->pinst);
 515}
 516
 517static struct crypto_template pcrypt_tmpl = {
 518        .name = "pcrypt",
 519        .alloc = pcrypt_alloc,
 520        .free = pcrypt_free,
 521        .module = THIS_MODULE,
 522};
 523
 524static int __init pcrypt_init(void)
 525{
 526        int err = -ENOMEM;
 527
 528        pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
 529        if (!pcrypt_kset)
 530                goto err;
 531
 532        err = pcrypt_init_padata(&pencrypt, "pencrypt");
 533        if (err)
 534                goto err_unreg_kset;
 535
 536        err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
 537        if (err)
 538                goto err_deinit_pencrypt;
 539
 540        padata_start(pencrypt.pinst);
 541        padata_start(pdecrypt.pinst);
 542
 543        return crypto_register_template(&pcrypt_tmpl);
 544
 545err_deinit_pencrypt:
 546        pcrypt_fini_padata(&pencrypt);
 547err_unreg_kset:
 548        kset_unregister(pcrypt_kset);
 549err:
 550        return err;
 551}
 552
 553static void __exit pcrypt_exit(void)
 554{
 555        pcrypt_fini_padata(&pencrypt);
 556        pcrypt_fini_padata(&pdecrypt);
 557
 558        kset_unregister(pcrypt_kset);
 559        crypto_unregister_template(&pcrypt_tmpl);
 560}
 561
 562module_init(pcrypt_init);
 563module_exit(pcrypt_exit);
 564
 565MODULE_LICENSE("GPL");
 566MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 567MODULE_DESCRIPTION("Parallel crypto wrapper");
 568
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.