linux/crypto/seqiv.c
<<
>>
Prefs
   1/*
   2 * seqiv: Sequence Number IV Generator
   3 *
   4 * This generator generates an IV based on a sequence number by xoring it
   5 * with a salt.  This algorithm is mainly useful for CTR and similar modes.
   6 *
   7 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * This program is free software; you can redistribute it and/or modify it
  10 * under the terms of the GNU General Public License as published by the Free
  11 * Software Foundation; either version 2 of the License, or (at your option)
  12 * any later version.
  13 *
  14 */
  15
  16#include <crypto/internal/aead.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/rng.h>
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/kernel.h>
  22#include <linux/module.h>
  23#include <linux/slab.h>
  24#include <linux/spinlock.h>
  25#include <linux/string.h>
  26
  27struct seqiv_ctx {
  28        spinlock_t lock;
  29        u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
  30};
  31
  32static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err)
  33{
  34        struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
  35        struct crypto_ablkcipher *geniv;
  36
  37        if (err == -EINPROGRESS)
  38                return;
  39
  40        if (err)
  41                goto out;
  42
  43        geniv = skcipher_givcrypt_reqtfm(req);
  44        memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv));
  45
  46out:
  47        kfree(subreq->info);
  48}
  49
  50static void seqiv_complete(struct crypto_async_request *base, int err)
  51{
  52        struct skcipher_givcrypt_request *req = base->data;
  53
  54        seqiv_complete2(req, err);
  55        skcipher_givcrypt_complete(req, err);
  56}
  57
  58static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err)
  59{
  60        struct aead_request *subreq = aead_givcrypt_reqctx(req);
  61        struct crypto_aead *geniv;
  62
  63        if (err == -EINPROGRESS)
  64                return;
  65
  66        if (err)
  67                goto out;
  68
  69        geniv = aead_givcrypt_reqtfm(req);
  70        memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv));
  71
  72out:
  73        kfree(subreq->iv);
  74}
  75
  76static void seqiv_aead_complete(struct crypto_async_request *base, int err)
  77{
  78        struct aead_givcrypt_request *req = base->data;
  79
  80        seqiv_aead_complete2(req, err);
  81        aead_givcrypt_complete(req, err);
  82}
  83
  84static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq,
  85                        unsigned int ivsize)
  86{
  87        unsigned int len = ivsize;
  88
  89        if (ivsize > sizeof(u64)) {
  90                memset(info, 0, ivsize - sizeof(u64));
  91                len = sizeof(u64);
  92        }
  93        seq = cpu_to_be64(seq);
  94        memcpy(info + ivsize - len, &seq, len);
  95        crypto_xor(info, ctx->salt, ivsize);
  96}
  97
  98static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
  99{
 100        struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 101        struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 102        struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
 103        crypto_completion_t complete;
 104        void *data;
 105        u8 *info;
 106        unsigned int ivsize;
 107        int err;
 108
 109        ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 110
 111        complete = req->creq.base.complete;
 112        data = req->creq.base.data;
 113        info = req->creq.info;
 114
 115        ivsize = crypto_ablkcipher_ivsize(geniv);
 116
 117        if (unlikely(!IS_ALIGNED((unsigned long)info,
 118                                 crypto_ablkcipher_alignmask(geniv) + 1))) {
 119                info = kmalloc(ivsize, req->creq.base.flags &
 120                                       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
 121                                                                  GFP_ATOMIC);
 122                if (!info)
 123                        return -ENOMEM;
 124
 125                complete = seqiv_complete;
 126                data = req;
 127        }
 128
 129        ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
 130                                        data);
 131        ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
 132                                     req->creq.nbytes, info);
 133
 134        seqiv_geniv(ctx, info, req->seq, ivsize);
 135        memcpy(req->giv, info, ivsize);
 136
 137        err = crypto_ablkcipher_encrypt(subreq);
 138        if (unlikely(info != req->creq.info))
 139                seqiv_complete2(req, err);
 140        return err;
 141}
 142
 143static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
 144{
 145        struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
 146        struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
 147        struct aead_request *areq = &req->areq;
 148        struct aead_request *subreq = aead_givcrypt_reqctx(req);
 149        crypto_completion_t complete;
 150        void *data;
 151        u8 *info;
 152        unsigned int ivsize;
 153        int err;
 154
 155        aead_request_set_tfm(subreq, aead_geniv_base(geniv));
 156
 157        complete = areq->base.complete;
 158        data = areq->base.data;
 159        info = areq->iv;
 160
 161        ivsize = crypto_aead_ivsize(geniv);
 162
 163        if (unlikely(!IS_ALIGNED((unsigned long)info,
 164                                 crypto_aead_alignmask(geniv) + 1))) {
 165                info = kmalloc(ivsize, areq->base.flags &
 166                                       CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
 167                                                                  GFP_ATOMIC);
 168                if (!info)
 169                        return -ENOMEM;
 170
 171                complete = seqiv_aead_complete;
 172                data = req;
 173        }
 174
 175        aead_request_set_callback(subreq, areq->base.flags, complete, data);
 176        aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
 177                               info);
 178        aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
 179
 180        seqiv_geniv(ctx, info, req->seq, ivsize);
 181        memcpy(req->giv, info, ivsize);
 182
 183        err = crypto_aead_encrypt(subreq);
 184        if (unlikely(info != areq->iv))
 185                seqiv_aead_complete2(req, err);
 186        return err;
 187}
 188
 189static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req)
 190{
 191        struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
 192        struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 193        int err = 0;
 194
 195        spin_lock_bh(&ctx->lock);
 196        if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first)
 197                goto unlock;
 198
 199        crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt;
 200        err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
 201                                   crypto_ablkcipher_ivsize(geniv));
 202
 203unlock:
 204        spin_unlock_bh(&ctx->lock);
 205
 206        if (err)
 207                return err;
 208
 209        return seqiv_givencrypt(req);
 210}
 211
 212static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req)
 213{
 214        struct crypto_aead *geniv = aead_givcrypt_reqtfm(req);
 215        struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
 216        int err = 0;
 217
 218        spin_lock_bh(&ctx->lock);
 219        if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first)
 220                goto unlock;
 221
 222        crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt;
 223        err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
 224                                   crypto_aead_ivsize(geniv));
 225
 226unlock:
 227        spin_unlock_bh(&ctx->lock);
 228
 229        if (err)
 230                return err;
 231
 232        return seqiv_aead_givencrypt(req);
 233}
 234
 235static int seqiv_init(struct crypto_tfm *tfm)
 236{
 237        struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
 238        struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
 239
 240        spin_lock_init(&ctx->lock);
 241
 242        tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
 243
 244        return skcipher_geniv_init(tfm);
 245}
 246
 247static int seqiv_aead_init(struct crypto_tfm *tfm)
 248{
 249        struct crypto_aead *geniv = __crypto_aead_cast(tfm);
 250        struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
 251
 252        spin_lock_init(&ctx->lock);
 253
 254        tfm->crt_aead.reqsize = sizeof(struct aead_request);
 255
 256        return aead_geniv_init(tfm);
 257}
 258
 259static struct crypto_template seqiv_tmpl;
 260
 261static struct crypto_instance *seqiv_ablkcipher_alloc(struct rtattr **tb)
 262{
 263        struct crypto_instance *inst;
 264
 265        inst = skcipher_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
 266
 267        if (IS_ERR(inst))
 268                goto out;
 269
 270        inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first;
 271
 272        inst->alg.cra_init = seqiv_init;
 273        inst->alg.cra_exit = skcipher_geniv_exit;
 274
 275        inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize;
 276
 277out:
 278        return inst;
 279}
 280
 281static struct crypto_instance *seqiv_aead_alloc(struct rtattr **tb)
 282{
 283        struct crypto_instance *inst;
 284
 285        inst = aead_geniv_alloc(&seqiv_tmpl, tb, 0, 0);
 286
 287        if (IS_ERR(inst))
 288                goto out;
 289
 290        inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first;
 291
 292        inst->alg.cra_init = seqiv_aead_init;
 293        inst->alg.cra_exit = aead_geniv_exit;
 294
 295        inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize;
 296
 297out:
 298        return inst;
 299}
 300
 301static struct crypto_instance *seqiv_alloc(struct rtattr **tb)
 302{
 303        struct crypto_attr_type *algt;
 304        struct crypto_instance *inst;
 305        int err;
 306
 307        algt = crypto_get_attr_type(tb);
 308        if (IS_ERR(algt))
 309                return ERR_CAST(algt);
 310
 311        err = crypto_get_default_rng();
 312        if (err)
 313                return ERR_PTR(err);
 314
 315        if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
 316                inst = seqiv_ablkcipher_alloc(tb);
 317        else
 318                inst = seqiv_aead_alloc(tb);
 319
 320        if (IS_ERR(inst))
 321                goto put_rng;
 322
 323        inst->alg.cra_alignmask |= __alignof__(u32) - 1;
 324        inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx);
 325
 326out:
 327        return inst;
 328
 329put_rng:
 330        crypto_put_default_rng();
 331        goto out;
 332}
 333
 334static void seqiv_free(struct crypto_instance *inst)
 335{
 336        if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK)
 337                skcipher_geniv_free(inst);
 338        else
 339                aead_geniv_free(inst);
 340        crypto_put_default_rng();
 341}
 342
 343static struct crypto_template seqiv_tmpl = {
 344        .name = "seqiv",
 345        .alloc = seqiv_alloc,
 346        .free = seqiv_free,
 347        .module = THIS_MODULE,
 348};
 349
 350static int __init seqiv_module_init(void)
 351{
 352        return crypto_register_template(&seqiv_tmpl);
 353}
 354
 355static void __exit seqiv_module_exit(void)
 356{
 357        crypto_unregister_template(&seqiv_tmpl);
 358}
 359
 360module_init(seqiv_module_init);
 361module_exit(seqiv_module_exit);
 362
 363MODULE_LICENSE("GPL");
 364MODULE_DESCRIPTION("Sequence Number IV Generator");
 365
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.