linux/drivers/crypto/ccp/ccp-crypto-aes-xts.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
   4 *
   5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
   6 *
   7 * Author: Gary R Hook <gary.hook@amd.com>
   8 * Author: Tom Lendacky <thomas.lendacky@amd.com>
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/delay.h>
  14#include <linux/scatterlist.h>
  15#include <crypto/aes.h>
  16#include <crypto/xts.h>
  17#include <crypto/internal/skcipher.h>
  18#include <crypto/scatterwalk.h>
  19
  20#include "ccp-crypto.h"
  21
  22struct ccp_aes_xts_def {
  23        const char *name;
  24        const char *drv_name;
  25};
  26
  27static const struct ccp_aes_xts_def aes_xts_algs[] = {
  28        {
  29                .name           = "xts(aes)",
  30                .drv_name       = "xts-aes-ccp",
  31        },
  32};
  33
  34struct ccp_unit_size_map {
  35        unsigned int size;
  36        u32 value;
  37};
  38
  39static struct ccp_unit_size_map xts_unit_sizes[] = {
  40        {
  41                .size   = 16,
  42                .value  = CCP_XTS_AES_UNIT_SIZE_16,
  43        },
  44        {
  45                .size   = 512,
  46                .value  = CCP_XTS_AES_UNIT_SIZE_512,
  47        },
  48        {
  49                .size   = 1024,
  50                .value  = CCP_XTS_AES_UNIT_SIZE_1024,
  51        },
  52        {
  53                .size   = 2048,
  54                .value  = CCP_XTS_AES_UNIT_SIZE_2048,
  55        },
  56        {
  57                .size   = 4096,
  58                .value  = CCP_XTS_AES_UNIT_SIZE_4096,
  59        },
  60};
  61
  62static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
  63{
  64        struct skcipher_request *req = skcipher_request_cast(async_req);
  65        struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
  66
  67        if (ret)
  68                return ret;
  69
  70        memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE);
  71
  72        return 0;
  73}
  74
  75static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
  76                              unsigned int key_len)
  77{
  78        struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
  79        unsigned int ccpversion = ccp_version();
  80        int ret;
  81
  82        ret = xts_verify_key(tfm, key, key_len);
  83        if (ret)
  84                return ret;
  85
  86        /* Version 3 devices support 128-bit keys; version 5 devices can
  87         * accommodate 128- and 256-bit keys.
  88         */
  89        switch (key_len) {
  90        case AES_KEYSIZE_128 * 2:
  91                memcpy(ctx->u.aes.key, key, key_len);
  92                break;
  93        case AES_KEYSIZE_256 * 2:
  94                if (ccpversion > CCP_VERSION(3, 0))
  95                        memcpy(ctx->u.aes.key, key, key_len);
  96                break;
  97        }
  98        ctx->u.aes.key_len = key_len / 2;
  99        sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
 100
 101        return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
 102}
 103
 104static int ccp_aes_xts_crypt(struct skcipher_request *req,
 105                             unsigned int encrypt)
 106{
 107        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 108        struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
 109        struct ccp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 110        unsigned int ccpversion = ccp_version();
 111        unsigned int fallback = 0;
 112        unsigned int unit;
 113        u32 unit_size;
 114        int ret;
 115
 116        if (!ctx->u.aes.key_len)
 117                return -EINVAL;
 118
 119        if (!req->iv)
 120                return -EINVAL;
 121
 122        /* Check conditions under which the CCP can fulfill a request. The
 123         * device can handle input plaintext of a length that is a multiple
 124         * of the unit_size, bug the crypto implementation only supports
 125         * the unit_size being equal to the input length. This limits the
 126         * number of scenarios we can handle.
 127         */
 128        unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
 129        for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) {
 130                if (req->cryptlen == xts_unit_sizes[unit].size) {
 131                        unit_size = unit;
 132                        break;
 133                }
 134        }
 135        /* The CCP has restrictions on block sizes. Also, a version 3 device
 136         * only supports AES-128 operations; version 5 CCPs support both
 137         * AES-128 and -256 operations.
 138         */
 139        if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST)
 140                fallback = 1;
 141        if ((ccpversion < CCP_VERSION(5, 0)) &&
 142            (ctx->u.aes.key_len != AES_KEYSIZE_128))
 143                fallback = 1;
 144        if ((ctx->u.aes.key_len != AES_KEYSIZE_128) &&
 145            (ctx->u.aes.key_len != AES_KEYSIZE_256))
 146                fallback = 1;
 147        if (fallback) {
 148                /* Use the fallback to process the request for any
 149                 * unsupported unit sizes or key sizes
 150                 */
 151                skcipher_request_set_tfm(&rctx->fallback_req,
 152                                         ctx->u.aes.tfm_skcipher);
 153                skcipher_request_set_callback(&rctx->fallback_req,
 154                                              req->base.flags,
 155                                              req->base.complete,
 156                                              req->base.data);
 157                skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 158                                           req->dst, req->cryptlen, req->iv);
 159                ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
 160                                crypto_skcipher_decrypt(&rctx->fallback_req);
 161                return ret;
 162        }
 163
 164        memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE);
 165        sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
 166
 167        memset(&rctx->cmd, 0, sizeof(rctx->cmd));
 168        INIT_LIST_HEAD(&rctx->cmd.entry);
 169        rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
 170        rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
 171        rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
 172                                           : CCP_AES_ACTION_DECRYPT;
 173        rctx->cmd.u.xts.unit_size = unit_size;
 174        rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
 175        rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
 176        rctx->cmd.u.xts.iv = &rctx->iv_sg;
 177        rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
 178        rctx->cmd.u.xts.src = req->src;
 179        rctx->cmd.u.xts.src_len = req->cryptlen;
 180        rctx->cmd.u.xts.dst = req->dst;
 181
 182        ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
 183
 184        return ret;
 185}
 186
 187static int ccp_aes_xts_encrypt(struct skcipher_request *req)
 188{
 189        return ccp_aes_xts_crypt(req, 1);
 190}
 191
 192static int ccp_aes_xts_decrypt(struct skcipher_request *req)
 193{
 194        return ccp_aes_xts_crypt(req, 0);
 195}
 196
 197static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm)
 198{
 199        struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
 200        struct crypto_skcipher *fallback_tfm;
 201
 202        ctx->complete = ccp_aes_xts_complete;
 203        ctx->u.aes.key_len = 0;
 204
 205        fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
 206                                             CRYPTO_ALG_NEED_FALLBACK);
 207        if (IS_ERR(fallback_tfm)) {
 208                pr_warn("could not load fallback driver xts(aes)\n");
 209                return PTR_ERR(fallback_tfm);
 210        }
 211        ctx->u.aes.tfm_skcipher = fallback_tfm;
 212
 213        crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx) +
 214                                         crypto_skcipher_reqsize(fallback_tfm));
 215
 216        return 0;
 217}
 218
 219static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm)
 220{
 221        struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm);
 222
 223        crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
 224}
 225
 226static int ccp_register_aes_xts_alg(struct list_head *head,
 227                                    const struct ccp_aes_xts_def *def)
 228{
 229        struct ccp_crypto_skcipher_alg *ccp_alg;
 230        struct skcipher_alg *alg;
 231        int ret;
 232
 233        ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
 234        if (!ccp_alg)
 235                return -ENOMEM;
 236
 237        INIT_LIST_HEAD(&ccp_alg->entry);
 238
 239        alg = &ccp_alg->alg;
 240
 241        snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
 242        snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
 243                 def->drv_name);
 244        alg->base.cra_flags     = CRYPTO_ALG_ASYNC |
 245                                  CRYPTO_ALG_ALLOCATES_MEMORY |
 246                                  CRYPTO_ALG_KERN_DRIVER_ONLY |
 247                                  CRYPTO_ALG_NEED_FALLBACK;
 248        alg->base.cra_blocksize = AES_BLOCK_SIZE;
 249        alg->base.cra_ctxsize   = sizeof(struct ccp_ctx);
 250        alg->base.cra_priority  = CCP_CRA_PRIORITY;
 251        alg->base.cra_module    = THIS_MODULE;
 252
 253        alg->setkey             = ccp_aes_xts_setkey;
 254        alg->encrypt            = ccp_aes_xts_encrypt;
 255        alg->decrypt            = ccp_aes_xts_decrypt;
 256        alg->min_keysize        = AES_MIN_KEY_SIZE * 2;
 257        alg->max_keysize        = AES_MAX_KEY_SIZE * 2;
 258        alg->ivsize             = AES_BLOCK_SIZE;
 259        alg->init               = ccp_aes_xts_init_tfm;
 260        alg->exit               = ccp_aes_xts_exit_tfm;
 261
 262        ret = crypto_register_skcipher(alg);
 263        if (ret) {
 264                pr_err("%s skcipher algorithm registration error (%d)\n",
 265                       alg->base.cra_name, ret);
 266                kfree(ccp_alg);
 267                return ret;
 268        }
 269
 270        list_add(&ccp_alg->entry, head);
 271
 272        return 0;
 273}
 274
 275int ccp_register_aes_xts_algs(struct list_head *head)
 276{
 277        int i, ret;
 278
 279        for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
 280                ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
 281                if (ret)
 282                        return ret;
 283        }
 284
 285        return 0;
 286}
 287