linux/crypto/blkcipher.c
<<
>>
Prefs
   1/*
   2 * Block chaining cipher operations.
   3 * 
   4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
   5 * multiple page boundaries by using temporary blocks.  In user context,
   6 * the kernel is given a chance to schedule us once per page.
   7 *
   8 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * This program is free software; you can redistribute it and/or modify it
  11 * under the terms of the GNU General Public License as published by the Free
  12 * Software Foundation; either version 2 of the License, or (at your option) 
  13 * any later version.
  14 *
  15 */
  16
  17#include <linux/crypto.h>
  18#include <linux/errno.h>
  19#include <linux/kernel.h>
  20#include <linux/io.h>
  21#include <linux/module.h>
  22#include <linux/scatterlist.h>
  23#include <linux/seq_file.h>
  24#include <linux/slab.h>
  25#include <linux/string.h>
  26
  27#include "internal.h"
  28#include "scatterwalk.h"
  29
  30enum {
  31        BLKCIPHER_WALK_PHYS = 1 << 0,
  32        BLKCIPHER_WALK_SLOW = 1 << 1,
  33        BLKCIPHER_WALK_COPY = 1 << 2,
  34        BLKCIPHER_WALK_DIFF = 1 << 3,
  35};
  36
  37static int blkcipher_walk_next(struct blkcipher_desc *desc,
  38                               struct blkcipher_walk *walk);
  39static int blkcipher_walk_first(struct blkcipher_desc *desc,
  40                                struct blkcipher_walk *walk);
  41
  42static inline void blkcipher_map_src(struct blkcipher_walk *walk)
  43{
  44        walk->src.virt.addr = scatterwalk_map(&walk->in, 0);
  45}
  46
  47static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
  48{
  49        walk->dst.virt.addr = scatterwalk_map(&walk->out, 1);
  50}
  51
  52static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
  53{
  54        scatterwalk_unmap(walk->src.virt.addr, 0);
  55}
  56
  57static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
  58{
  59        scatterwalk_unmap(walk->dst.virt.addr, 1);
  60}
  61
  62static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
  63{
  64        if (offset_in_page(start + len) < len)
  65                return (u8 *)((unsigned long)(start + len) & PAGE_MASK);
  66        return start;
  67}
  68
  69static inline unsigned int blkcipher_done_slow(struct crypto_blkcipher *tfm,
  70                                               struct blkcipher_walk *walk,
  71                                               unsigned int bsize)
  72{
  73        u8 *addr;
  74        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
  75
  76        addr = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
  77        addr = blkcipher_get_spot(addr, bsize);
  78        scatterwalk_copychunks(addr, &walk->out, bsize, 1);
  79        return bsize;
  80}
  81
  82static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
  83                                               unsigned int n)
  84{
  85        n = walk->nbytes - n;
  86
  87        if (walk->flags & BLKCIPHER_WALK_COPY) {
  88                blkcipher_map_dst(walk);
  89                memcpy(walk->dst.virt.addr, walk->page, n);
  90                blkcipher_unmap_dst(walk);
  91        } else if (!(walk->flags & BLKCIPHER_WALK_PHYS)) {
  92                blkcipher_unmap_src(walk);
  93                if (walk->flags & BLKCIPHER_WALK_DIFF)
  94                        blkcipher_unmap_dst(walk);
  95        }
  96
  97        scatterwalk_advance(&walk->in, n);
  98        scatterwalk_advance(&walk->out, n);
  99
 100        return n;
 101}
 102
 103int blkcipher_walk_done(struct blkcipher_desc *desc,
 104                        struct blkcipher_walk *walk, int err)
 105{
 106        struct crypto_blkcipher *tfm = desc->tfm;
 107        unsigned int nbytes = 0;
 108
 109        if (likely(err >= 0)) {
 110                unsigned int bsize = crypto_blkcipher_blocksize(tfm);
 111                unsigned int n;
 112
 113                if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
 114                        n = blkcipher_done_fast(walk, err);
 115                else
 116                        n = blkcipher_done_slow(tfm, walk, bsize);
 117
 118                nbytes = walk->total - n;
 119                err = 0;
 120        }
 121
 122        scatterwalk_done(&walk->in, 0, nbytes);
 123        scatterwalk_done(&walk->out, 1, nbytes);
 124
 125        walk->total = nbytes;
 126        walk->nbytes = nbytes;
 127
 128        if (nbytes) {
 129                crypto_yield(desc->flags);
 130                return blkcipher_walk_next(desc, walk);
 131        }
 132
 133        if (walk->iv != desc->info)
 134                memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
 135        if (walk->buffer != walk->page)
 136                kfree(walk->buffer);
 137        if (walk->page)
 138                free_page((unsigned long)walk->page);
 139
 140        return err;
 141}
 142EXPORT_SYMBOL_GPL(blkcipher_walk_done);
 143
 144static inline int blkcipher_next_slow(struct blkcipher_desc *desc,
 145                                      struct blkcipher_walk *walk,
 146                                      unsigned int bsize,
 147                                      unsigned int alignmask)
 148{
 149        unsigned int n;
 150
 151        if (walk->buffer)
 152                goto ok;
 153
 154        walk->buffer = walk->page;
 155        if (walk->buffer)
 156                goto ok;
 157
 158        n = bsize * 2 + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
 159        walk->buffer = kmalloc(n, GFP_ATOMIC);
 160        if (!walk->buffer)
 161                return blkcipher_walk_done(desc, walk, -ENOMEM);
 162
 163ok:
 164        walk->dst.virt.addr = (u8 *)ALIGN((unsigned long)walk->buffer,
 165                                          alignmask + 1);
 166        walk->dst.virt.addr = blkcipher_get_spot(walk->dst.virt.addr, bsize);
 167        walk->src.virt.addr = blkcipher_get_spot(walk->dst.virt.addr + bsize,
 168                                                 bsize);
 169
 170        scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
 171
 172        walk->nbytes = bsize;
 173        walk->flags |= BLKCIPHER_WALK_SLOW;
 174
 175        return 0;
 176}
 177
 178static inline int blkcipher_next_copy(struct blkcipher_walk *walk)
 179{
 180        u8 *tmp = walk->page;
 181
 182        blkcipher_map_src(walk);
 183        memcpy(tmp, walk->src.virt.addr, walk->nbytes);
 184        blkcipher_unmap_src(walk);
 185
 186        walk->src.virt.addr = tmp;
 187        walk->dst.virt.addr = tmp;
 188
 189        return 0;
 190}
 191
 192static inline int blkcipher_next_fast(struct blkcipher_desc *desc,
 193                                      struct blkcipher_walk *walk)
 194{
 195        unsigned long diff;
 196
 197        walk->src.phys.page = scatterwalk_page(&walk->in);
 198        walk->src.phys.offset = offset_in_page(walk->in.offset);
 199        walk->dst.phys.page = scatterwalk_page(&walk->out);
 200        walk->dst.phys.offset = offset_in_page(walk->out.offset);
 201
 202        if (walk->flags & BLKCIPHER_WALK_PHYS)
 203                return 0;
 204
 205        diff = walk->src.phys.offset - walk->dst.phys.offset;
 206        diff |= walk->src.virt.page - walk->dst.virt.page;
 207
 208        blkcipher_map_src(walk);
 209        walk->dst.virt.addr = walk->src.virt.addr;
 210
 211        if (diff) {
 212                walk->flags |= BLKCIPHER_WALK_DIFF;
 213                blkcipher_map_dst(walk);
 214        }
 215
 216        return 0;
 217}
 218
 219static int blkcipher_walk_next(struct blkcipher_desc *desc,
 220                               struct blkcipher_walk *walk)
 221{
 222        struct crypto_blkcipher *tfm = desc->tfm;
 223        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 224        unsigned int bsize = crypto_blkcipher_blocksize(tfm);
 225        unsigned int n;
 226        int err;
 227
 228        n = walk->total;
 229        if (unlikely(n < bsize)) {
 230                desc->flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
 231                return blkcipher_walk_done(desc, walk, -EINVAL);
 232        }
 233
 234        walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
 235                         BLKCIPHER_WALK_DIFF);
 236        if (!scatterwalk_aligned(&walk->in, alignmask) ||
 237            !scatterwalk_aligned(&walk->out, alignmask)) {
 238                walk->flags |= BLKCIPHER_WALK_COPY;
 239                if (!walk->page) {
 240                        walk->page = (void *)__get_free_page(GFP_ATOMIC);
 241                        if (!walk->page)
 242                                n = 0;
 243                }
 244        }
 245
 246        n = scatterwalk_clamp(&walk->in, n);
 247        n = scatterwalk_clamp(&walk->out, n);
 248
 249        if (unlikely(n < bsize)) {
 250                err = blkcipher_next_slow(desc, walk, bsize, alignmask);
 251                goto set_phys_lowmem;
 252        }
 253
 254        walk->nbytes = n;
 255        if (walk->flags & BLKCIPHER_WALK_COPY) {
 256                err = blkcipher_next_copy(walk);
 257                goto set_phys_lowmem;
 258        }
 259
 260        return blkcipher_next_fast(desc, walk);
 261
 262set_phys_lowmem:
 263        if (walk->flags & BLKCIPHER_WALK_PHYS) {
 264                walk->src.phys.page = virt_to_page(walk->src.virt.addr);
 265                walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
 266                walk->src.phys.offset &= PAGE_SIZE - 1;
 267                walk->dst.phys.offset &= PAGE_SIZE - 1;
 268        }
 269        return err;
 270}
 271
 272static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
 273                                    struct crypto_blkcipher *tfm,
 274                                    unsigned int alignmask)
 275{
 276        unsigned bs = crypto_blkcipher_blocksize(tfm);
 277        unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
 278        unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
 279        u8 *iv;
 280
 281        size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
 282        walk->buffer = kmalloc(size, GFP_ATOMIC);
 283        if (!walk->buffer)
 284                return -ENOMEM;
 285
 286        iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
 287        iv = blkcipher_get_spot(iv, bs) + bs;
 288        iv = blkcipher_get_spot(iv, bs) + bs;
 289        iv = blkcipher_get_spot(iv, ivsize);
 290
 291        walk->iv = memcpy(iv, walk->iv, ivsize);
 292        return 0;
 293}
 294
 295int blkcipher_walk_virt(struct blkcipher_desc *desc,
 296                        struct blkcipher_walk *walk)
 297{
 298        walk->flags &= ~BLKCIPHER_WALK_PHYS;
 299        return blkcipher_walk_first(desc, walk);
 300}
 301EXPORT_SYMBOL_GPL(blkcipher_walk_virt);
 302
 303int blkcipher_walk_phys(struct blkcipher_desc *desc,
 304                        struct blkcipher_walk *walk)
 305{
 306        walk->flags |= BLKCIPHER_WALK_PHYS;
 307        return blkcipher_walk_first(desc, walk);
 308}
 309EXPORT_SYMBOL_GPL(blkcipher_walk_phys);
 310
 311static int blkcipher_walk_first(struct blkcipher_desc *desc,
 312                                struct blkcipher_walk *walk)
 313{
 314        struct crypto_blkcipher *tfm = desc->tfm;
 315        unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
 316
 317        walk->nbytes = walk->total;
 318        if (unlikely(!walk->total))
 319                return 0;
 320
 321        walk->buffer = NULL;
 322        walk->iv = desc->info;
 323        if (unlikely(((unsigned long)walk->iv & alignmask))) {
 324                int err = blkcipher_copy_iv(walk, tfm, alignmask);
 325                if (err)
 326                        return err;
 327        }
 328
 329        scatterwalk_start(&walk->in, walk->in.sg);
 330        scatterwalk_start(&walk->out, walk->out.sg);
 331        walk->page = NULL;
 332
 333        return blkcipher_walk_next(desc, walk);
 334}
 335
 336static int setkey(struct crypto_tfm *tfm, const u8 *key,
 337                  unsigned int keylen)
 338{
 339        struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
 340
 341        if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) {
 342                tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 343                return -EINVAL;
 344        }
 345
 346        return cipher->setkey(tfm, key, keylen);
 347}
 348
 349static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
 350{
 351        struct blkcipher_alg *cipher = &alg->cra_blkcipher;
 352        unsigned int len = alg->cra_ctxsize;
 353
 354        if (cipher->ivsize) {
 355                len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
 356                len += cipher->ivsize;
 357        }
 358
 359        return len;
 360}
 361
 362static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
 363{
 364        struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
 365        struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
 366        unsigned long align = crypto_tfm_alg_alignmask(tfm) + 1;
 367        unsigned long addr;
 368
 369        if (alg->ivsize > PAGE_SIZE / 8)
 370                return -EINVAL;
 371
 372        crt->setkey = setkey;
 373        crt->encrypt = alg->encrypt;
 374        crt->decrypt = alg->decrypt;
 375
 376        addr = (unsigned long)crypto_tfm_ctx(tfm);
 377        addr = ALIGN(addr, align);
 378        addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
 379        crt->iv = (void *)addr;
 380
 381        return 0;
 382}
 383
 384static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 385        __attribute_used__;
 386static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
 387{
 388        seq_printf(m, "type         : blkcipher\n");
 389        seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
 390        seq_printf(m, "min keysize  : %u\n", alg->cra_blkcipher.min_keysize);
 391        seq_printf(m, "max keysize  : %u\n", alg->cra_blkcipher.max_keysize);
 392        seq_printf(m, "ivsize       : %u\n", alg->cra_blkcipher.ivsize);
 393}
 394
 395const struct crypto_type crypto_blkcipher_type = {
 396        .ctxsize = crypto_blkcipher_ctxsize,
 397        .init = crypto_init_blkcipher_ops,
 398#ifdef CONFIG_PROC_FS
 399        .show = crypto_blkcipher_show,
 400#endif
 401};
 402EXPORT_SYMBOL_GPL(crypto_blkcipher_type);
 403
 404MODULE_LICENSE("GPL");
 405MODULE_DESCRIPTION("Generic block chaining cipher type");
 406
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.