linux/drivers/crypto/caam/caamalg_qi2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright 2015-2016 Freescale Semiconductor Inc.
   4 * Copyright 2017-2019 NXP
   5 */
   6
   7#include "compat.h"
   8#include "regs.h"
   9#include "caamalg_qi2.h"
  10#include "dpseci_cmd.h"
  11#include "desc_constr.h"
  12#include "error.h"
  13#include "sg_sw_sec4.h"
  14#include "sg_sw_qm2.h"
  15#include "key_gen.h"
  16#include "caamalg_desc.h"
  17#include "caamhash_desc.h"
  18#include "dpseci-debugfs.h"
  19#include <linux/fsl/mc.h>
  20#include <soc/fsl/dpaa2-io.h>
  21#include <soc/fsl/dpaa2-fd.h>
  22#include <crypto/xts.h>
  23#include <asm/unaligned.h>
  24
  25#define CAAM_CRA_PRIORITY       2000
  26
  27/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  28#define CAAM_MAX_KEY_SIZE       (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
  29                                 SHA512_DIGEST_SIZE * 2)
  30
  31/*
  32 * This is a a cache of buffers, from which the users of CAAM QI driver
  33 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
  34 * NOTE: A more elegant solution would be to have some headroom in the frames
  35 *       being processed. This can be added by the dpaa2-eth driver. This would
  36 *       pose a problem for userspace application processing which cannot
  37 *       know of this limitation. So for now, this will work.
  38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
  39 */
  40static struct kmem_cache *qi_cache;
  41
  42struct caam_alg_entry {
  43        struct device *dev;
  44        int class1_alg_type;
  45        int class2_alg_type;
  46        bool rfc3686;
  47        bool geniv;
  48        bool nodkp;
  49};
  50
  51struct caam_aead_alg {
  52        struct aead_alg aead;
  53        struct caam_alg_entry caam;
  54        bool registered;
  55};
  56
  57struct caam_skcipher_alg {
  58        struct skcipher_alg skcipher;
  59        struct caam_alg_entry caam;
  60        bool registered;
  61};
  62
  63/**
  64 * struct caam_ctx - per-session context
  65 * @flc: Flow Contexts array
  66 * @key:  [authentication key], encryption key
  67 * @flc_dma: I/O virtual addresses of the Flow Contexts
  68 * @key_dma: I/O virtual address of the key
  69 * @dir: DMA direction for mapping key and Flow Contexts
  70 * @dev: dpseci device
  71 * @adata: authentication algorithm details
  72 * @cdata: encryption algorithm details
  73 * @authsize: authentication tag (a.k.a. ICV / MAC) size
  74 * @xts_key_fallback: true if fallback tfm needs to be used due
  75 *                    to unsupported xts key lengths
  76 * @fallback: xts fallback tfm
  77 */
  78struct caam_ctx {
  79        struct caam_flc flc[NUM_OP];
  80        u8 key[CAAM_MAX_KEY_SIZE];
  81        dma_addr_t flc_dma[NUM_OP];
  82        dma_addr_t key_dma;
  83        enum dma_data_direction dir;
  84        struct device *dev;
  85        struct alginfo adata;
  86        struct alginfo cdata;
  87        unsigned int authsize;
  88        bool xts_key_fallback;
  89        struct crypto_skcipher *fallback;
  90};
  91
  92static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
  93                                     dma_addr_t iova_addr)
  94{
  95        phys_addr_t phys_addr;
  96
  97        phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
  98                                   iova_addr;
  99
 100        return phys_to_virt(phys_addr);
 101}
 102
 103/*
 104 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
 105 *
 106 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
 107 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
 108 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
 109 * hosting 16 SG entries.
 110 *
 111 * @flags - flags that would be used for the equivalent kmalloc(..) call
 112 *
 113 * Returns a pointer to a retrieved buffer on success or NULL on failure.
 114 */
 115static inline void *qi_cache_zalloc(gfp_t flags)
 116{
 117        return kmem_cache_zalloc(qi_cache, flags);
 118}
 119
 120/*
 121 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
 122 *
 123 * @obj - buffer previously allocated by qi_cache_zalloc
 124 *
 125 * No checking is being done, the call is a passthrough call to
 126 * kmem_cache_free(...)
 127 */
 128static inline void qi_cache_free(void *obj)
 129{
 130        kmem_cache_free(qi_cache, obj);
 131}
 132
 133static struct caam_request *to_caam_req(struct crypto_async_request *areq)
 134{
 135        switch (crypto_tfm_alg_type(areq->tfm)) {
 136        case CRYPTO_ALG_TYPE_SKCIPHER:
 137                return skcipher_request_ctx(skcipher_request_cast(areq));
 138        case CRYPTO_ALG_TYPE_AEAD:
 139                return aead_request_ctx(container_of(areq, struct aead_request,
 140                                                     base));
 141        case CRYPTO_ALG_TYPE_AHASH:
 142                return ahash_request_ctx(ahash_request_cast(areq));
 143        default:
 144                return ERR_PTR(-EINVAL);
 145        }
 146}
 147
 148static void caam_unmap(struct device *dev, struct scatterlist *src,
 149                       struct scatterlist *dst, int src_nents,
 150                       int dst_nents, dma_addr_t iv_dma, int ivsize,
 151                       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
 152                       int qm_sg_bytes)
 153{
 154        if (dst != src) {
 155                if (src_nents)
 156                        dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 157                if (dst_nents)
 158                        dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 159        } else {
 160                dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 161        }
 162
 163        if (iv_dma)
 164                dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
 165
 166        if (qm_sg_bytes)
 167                dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
 168}
 169
 170static int aead_set_sh_desc(struct crypto_aead *aead)
 171{
 172        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 173                                                 typeof(*alg), aead);
 174        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 175        unsigned int ivsize = crypto_aead_ivsize(aead);
 176        struct device *dev = ctx->dev;
 177        struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
 178        struct caam_flc *flc;
 179        u32 *desc;
 180        u32 ctx1_iv_off = 0;
 181        u32 *nonce = NULL;
 182        unsigned int data_len[2];
 183        u32 inl_mask;
 184        const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 185                               OP_ALG_AAI_CTR_MOD128);
 186        const bool is_rfc3686 = alg->caam.rfc3686;
 187
 188        if (!ctx->cdata.keylen || !ctx->authsize)
 189                return 0;
 190
 191        /*
 192         * AES-CTR needs to load IV in CONTEXT1 reg
 193         * at an offset of 128bits (16bytes)
 194         * CONTEXT1[255:128] = IV
 195         */
 196        if (ctr_mode)
 197                ctx1_iv_off = 16;
 198
 199        /*
 200         * RFC3686 specific:
 201         *      CONTEXT1[255:128] = {NONCE, IV, COUNTER}
 202         */
 203        if (is_rfc3686) {
 204                ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
 205                nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
 206                                ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
 207        }
 208
 209        /*
 210         * In case |user key| > |derived key|, using DKP<imm,imm> would result
 211         * in invalid opcodes (last bytes of user key) in the resulting
 212         * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
 213         * addresses are needed.
 214         */
 215        ctx->adata.key_virt = ctx->key;
 216        ctx->adata.key_dma = ctx->key_dma;
 217
 218        ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
 219        ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
 220
 221        data_len[0] = ctx->adata.keylen_pad;
 222        data_len[1] = ctx->cdata.keylen;
 223
 224        /* aead_encrypt shared descriptor */
 225        if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
 226                                                 DESC_QI_AEAD_ENC_LEN) +
 227                              (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 228                              DESC_JOB_IO_LEN, data_len, &inl_mask,
 229                              ARRAY_SIZE(data_len)) < 0)
 230                return -EINVAL;
 231
 232        ctx->adata.key_inline = !!(inl_mask & 1);
 233        ctx->cdata.key_inline = !!(inl_mask & 2);
 234
 235        flc = &ctx->flc[ENCRYPT];
 236        desc = flc->sh_desc;
 237
 238        if (alg->caam.geniv)
 239                cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
 240                                          ivsize, ctx->authsize, is_rfc3686,
 241                                          nonce, ctx1_iv_off, true,
 242                                          priv->sec_attr.era);
 243        else
 244                cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
 245                                       ivsize, ctx->authsize, is_rfc3686, nonce,
 246                                       ctx1_iv_off, true, priv->sec_attr.era);
 247
 248        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 249        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 250                                   sizeof(flc->flc) + desc_bytes(desc),
 251                                   ctx->dir);
 252
 253        /* aead_decrypt shared descriptor */
 254        if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
 255                              (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
 256                              DESC_JOB_IO_LEN, data_len, &inl_mask,
 257                              ARRAY_SIZE(data_len)) < 0)
 258                return -EINVAL;
 259
 260        ctx->adata.key_inline = !!(inl_mask & 1);
 261        ctx->cdata.key_inline = !!(inl_mask & 2);
 262
 263        flc = &ctx->flc[DECRYPT];
 264        desc = flc->sh_desc;
 265        cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
 266                               ivsize, ctx->authsize, alg->caam.geniv,
 267                               is_rfc3686, nonce, ctx1_iv_off, true,
 268                               priv->sec_attr.era);
 269        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 270        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 271                                   sizeof(flc->flc) + desc_bytes(desc),
 272                                   ctx->dir);
 273
 274        return 0;
 275}
 276
 277static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 278{
 279        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 280
 281        ctx->authsize = authsize;
 282        aead_set_sh_desc(authenc);
 283
 284        return 0;
 285}
 286
 287static int aead_setkey(struct crypto_aead *aead, const u8 *key,
 288                       unsigned int keylen)
 289{
 290        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 291        struct device *dev = ctx->dev;
 292        struct crypto_authenc_keys keys;
 293
 294        if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 295                goto badkey;
 296
 297        dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
 298                keys.authkeylen + keys.enckeylen, keys.enckeylen,
 299                keys.authkeylen);
 300        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 301                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 302
 303        ctx->adata.keylen = keys.authkeylen;
 304        ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
 305                                              OP_ALG_ALGSEL_MASK);
 306
 307        if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 308                goto badkey;
 309
 310        memcpy(ctx->key, keys.authkey, keys.authkeylen);
 311        memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 312        dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
 313                                   keys.enckeylen, ctx->dir);
 314        print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
 315                             DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
 316                             ctx->adata.keylen_pad + keys.enckeylen, 1);
 317
 318        ctx->cdata.keylen = keys.enckeylen;
 319
 320        memzero_explicit(&keys, sizeof(keys));
 321        return aead_set_sh_desc(aead);
 322badkey:
 323        memzero_explicit(&keys, sizeof(keys));
 324        return -EINVAL;
 325}
 326
 327static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 328                            unsigned int keylen)
 329{
 330        struct crypto_authenc_keys keys;
 331        int err;
 332
 333        err = crypto_authenc_extractkeys(&keys, key, keylen);
 334        if (unlikely(err))
 335                goto out;
 336
 337        err = -EINVAL;
 338        if (keys.enckeylen != DES3_EDE_KEY_SIZE)
 339                goto out;
 340
 341        err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
 342              aead_setkey(aead, key, keylen);
 343
 344out:
 345        memzero_explicit(&keys, sizeof(keys));
 346        return err;
 347}
 348
 349static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 350                                           bool encrypt)
 351{
 352        struct crypto_aead *aead = crypto_aead_reqtfm(req);
 353        struct caam_request *req_ctx = aead_request_ctx(req);
 354        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
 355        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
 356        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 357        struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
 358                                                 typeof(*alg), aead);
 359        struct device *dev = ctx->dev;
 360        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 361                      GFP_KERNEL : GFP_ATOMIC;
 362        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
 363        int src_len, dst_len = 0;
 364        struct aead_edesc *edesc;
 365        dma_addr_t qm_sg_dma, iv_dma = 0;
 366        int ivsize = 0;
 367        unsigned int authsize = ctx->authsize;
 368        int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
 369        int in_len, out_len;
 370        struct dpaa2_sg_entry *sg_table;
 371
 372        /* allocate space for base edesc, link tables and IV */
 373        edesc = qi_cache_zalloc(GFP_DMA | flags);
 374        if (unlikely(!edesc)) {
 375                dev_err(dev, "could not allocate extended descriptor\n");
 376                return ERR_PTR(-ENOMEM);
 377        }
 378
 379        if (unlikely(req->dst != req->src)) {
 380                src_len = req->assoclen + req->cryptlen;
 381                dst_len = src_len + (encrypt ? authsize : (-authsize));
 382
 383                src_nents = sg_nents_for_len(req->src, src_len);
 384                if (unlikely(src_nents < 0)) {
 385                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 386                                src_len);
 387                        qi_cache_free(edesc);
 388                        return ERR_PTR(src_nents);
 389                }
 390
 391                dst_nents = sg_nents_for_len(req->dst, dst_len);
 392                if (unlikely(dst_nents < 0)) {
 393                        dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
 394                                dst_len);
 395                        qi_cache_free(edesc);
 396                        return ERR_PTR(dst_nents);
 397                }
 398
 399                if (src_nents) {
 400                        mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
 401                                                      DMA_TO_DEVICE);
 402                        if (unlikely(!mapped_src_nents)) {
 403                                dev_err(dev, "unable to map source\n");
 404                                qi_cache_free(edesc);
 405                                return ERR_PTR(-ENOMEM);
 406                        }
 407                } else {
 408                        mapped_src_nents = 0;
 409                }
 410
 411                if (dst_nents) {
 412                        mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
 413                                                      DMA_FROM_DEVICE);
 414                        if (unlikely(!mapped_dst_nents)) {
 415                                dev_err(dev, "unable to map destination\n");
 416                                dma_unmap_sg(dev, req->src, src_nents,
 417                                             DMA_TO_DEVICE);
 418                                qi_cache_free(edesc);
 419                                return ERR_PTR(-ENOMEM);
 420                        }
 421                } else {
 422                        mapped_dst_nents = 0;
 423                }
 424        } else {
 425                src_len = req->assoclen + req->cryptlen +
 426                          (encrypt ? authsize : 0);
 427
 428                src_nents = sg_nents_for_len(req->src, src_len);
 429                if (unlikely(src_nents < 0)) {
 430                        dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
 431                                src_len);
 432                        qi_cache_free(edesc);
 433                        return ERR_PTR(src_nents);
 434                }
 435
 436                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
 437                                              DMA_BIDIRECTIONAL);
 438                if (unlikely(!mapped_src_nents)) {
 439                        dev_err(dev, "unable to map source\n");
 440                        qi_cache_free(edesc);
 441                        return ERR_PTR(-ENOMEM);
 442                }
 443        }
 444
 445        if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
 446                ivsize = crypto_aead_ivsize(aead);
 447
 448        /*
 449         * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
 450         * Input is not contiguous.
 451         * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
 452         * the end of the table by allocating more S/G entries. Logic:
 453         * if (src != dst && output S/G)
 454         *      pad output S/G, if needed
 455         * else if (src == dst && S/G)
 456         *      overlapping S/Gs; pad one of them
 457         * else if (input S/G) ...
 458         *      pad input S/G, if needed
 459         */
 460        qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
 461        if (mapped_dst_nents > 1)
 462                qm_sg_nents += pad_sg_nents(mapped_dst_nents);
 463        else if ((req->src == req->dst) && (mapped_src_nents > 1))
 464                qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
 465                                  1 + !!ivsize +
 466                                  pad_sg_nents(mapped_src_nents));
 467        else
 468                qm_sg_nents = pad_sg_nents(qm_sg_nents);
 469
 470        sg_table = &edesc->sgt[0];
 471        qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
 472        if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
 473                     CAAM_QI_MEMCACHE_SIZE)) {
 474                dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
 475                        qm_sg_nents, ivsize);
 476                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
 477                           0, DMA_NONE, 0, 0);
 478                qi_cache_free(edesc);
 479                return ERR_PTR(-ENOMEM);
 480        }
 481
 482        if (ivsize) {
 483                u8 *iv = (u8 *)(sg_table + qm_sg_nents);
 484
 485                /* Make sure IV is located in a DMAable area */
 486                memcpy(iv, req->iv, ivsize);
 487
 488                iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
 489                if (dma_mapping_error(dev, iv_dma)) {
 490                        dev_err(dev, "unable to map IV\n");
 491                        caam_unmap(dev, req->src, req->dst, src_nents,
 492                                   dst_nents, 0, 0, DMA_NONE, 0, 0);
 493                        qi_cache_free(edesc);
 494                        return ERR_PTR(-ENOMEM);
 495                }
 496        }
 497
 498        edesc->src_nents = src_nents;
 499        edesc->dst_nents = dst_nents;
 500        edesc->iv_dma = iv_dma;
 501
 502        if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
 503            OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
 504                /*
 505                 * The associated data comes already with the IV but we need
 506                 * to skip it when we authenticate or encrypt...
 507                 */
 508                edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
 509        else
 510                edesc->assoclen = cpu_to_caam32(req->assoclen);
 511        edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
 512                                             DMA_TO_DEVICE);
 513        if (dma_mapping_error(dev, edesc->assoclen_dma)) {
 514                dev_err(dev, "unable to map assoclen\n");
 515                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 516                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
 517                qi_cache_free(edesc);
 518                return ERR_PTR(-ENOMEM);
 519        }
 520
 521        dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
 522        qm_sg_index++;
 523        if (ivsize) {
 524                dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
 525                qm_sg_index++;
 526        }
 527        sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
 528        qm_sg_index += mapped_src_nents;
 529
 530        if (mapped_dst_nents > 1)
 531                sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
 532
 533        qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
 534        if (dma_mapping_error(dev, qm_sg_dma)) {
 535                dev_err(dev, "unable to map S/G table\n");
 536                dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
 537                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
 538                           iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
 539                qi_cache_free(edesc);
 540                return ERR_PTR(-ENOMEM);
 541        }
 542
 543        edesc->qm_sg_dma = qm_sg_dma;
 544        edesc->qm_sg_bytes = qm_sg_bytes;
 545
 546        out_len = req->assoclen + req->cryptlen +
 547                  (encrypt ? ctx->authsize : (-ctx->authsize));
 548        in_len = 4 + ivsize + req->assoclen + req->cryptlen;
 549
 550        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
 551        dpaa2_fl_set_final(in_fle, true);
 552        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
 553        dpaa2_fl_set_addr(in_fle, qm_sg_dma);
 554        dpaa2_fl_set_len(in_fle, in_len);
 555
 556        if (req->dst == req->src) {
 557                if (mapped_src_nents == 1) {
 558                        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
 559                        dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
 560                } else {
 561                        dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
 562                        dpaa2_fl_set_addr(out_fle, qm_sg_dma +
 563                                          (1 + !!ivsize) * sizeof(*sg_table));
 564                }
 565        } else if (!mapped_dst_nents) {
 566                /*
 567                 * crypto engine requires the output entry to be present when
 568                 * "frame list" FD is used.
 569                 * Since engine does not support FMT=2'b11 (unused entry type),
 570                 * leaving out_fle zeroized is the best option.
 571                 */
 572                goto skip_out_fle;
 573        } else if (mapped_dst_nents == 1) {
 574                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
 575                dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
 576        } else {
 577                dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
 578                dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
 579                                  sizeof(*sg_table));
 580        }
 581
 582        dpaa2_fl_set_len(out_fle, out_len);
 583
 584skip_out_fle:
 585        return edesc;
 586}
 587
 588static int chachapoly_set_sh_desc(struct crypto_aead *aead)
 589{
 590        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 591        unsigned int ivsize = crypto_aead_ivsize(aead);
 592        struct device *dev = ctx->dev;
 593        struct caam_flc *flc;
 594        u32 *desc;
 595
 596        if (!ctx->cdata.keylen || !ctx->authsize)
 597                return 0;
 598
 599        flc = &ctx->flc[ENCRYPT];
 600        desc = flc->sh_desc;
 601        cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 602                               ctx->authsize, true, true);
 603        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 604        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 605                                   sizeof(flc->flc) + desc_bytes(desc),
 606                                   ctx->dir);
 607
 608        flc = &ctx->flc[DECRYPT];
 609        desc = flc->sh_desc;
 610        cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
 611                               ctx->authsize, false, true);
 612        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 613        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 614                                   sizeof(flc->flc) + desc_bytes(desc),
 615                                   ctx->dir);
 616
 617        return 0;
 618}
 619
 620static int chachapoly_setauthsize(struct crypto_aead *aead,
 621                                  unsigned int authsize)
 622{
 623        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 624
 625        if (authsize != POLY1305_DIGEST_SIZE)
 626                return -EINVAL;
 627
 628        ctx->authsize = authsize;
 629        return chachapoly_set_sh_desc(aead);
 630}
 631
 632static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
 633                             unsigned int keylen)
 634{
 635        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 636        unsigned int ivsize = crypto_aead_ivsize(aead);
 637        unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
 638
 639        if (keylen != CHACHA_KEY_SIZE + saltlen)
 640                return -EINVAL;
 641
 642        ctx->cdata.key_virt = key;
 643        ctx->cdata.keylen = keylen - saltlen;
 644
 645        return chachapoly_set_sh_desc(aead);
 646}
 647
 648static int gcm_set_sh_desc(struct crypto_aead *aead)
 649{
 650        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 651        struct device *dev = ctx->dev;
 652        unsigned int ivsize = crypto_aead_ivsize(aead);
 653        struct caam_flc *flc;
 654        u32 *desc;
 655        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 656                        ctx->cdata.keylen;
 657
 658        if (!ctx->cdata.keylen || !ctx->authsize)
 659                return 0;
 660
 661        /*
 662         * AES GCM encrypt shared descriptor
 663         * Job Descriptor and Shared Descriptor
 664         * must fit into the 64-word Descriptor h/w Buffer
 665         */
 666        if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
 667                ctx->cdata.key_inline = true;
 668                ctx->cdata.key_virt = ctx->key;
 669        } else {
 670                ctx->cdata.key_inline = false;
 671                ctx->cdata.key_dma = ctx->key_dma;
 672        }
 673
 674        flc = &ctx->flc[ENCRYPT];
 675        desc = flc->sh_desc;
 676        cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
 677        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 678        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 679                                   sizeof(flc->flc) + desc_bytes(desc),
 680                                   ctx->dir);
 681
 682        /*
 683         * Job Descriptor and Shared Descriptors
 684         * must all fit into the 64-word Descriptor h/w Buffer
 685         */
 686        if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
 687                ctx->cdata.key_inline = true;
 688                ctx->cdata.key_virt = ctx->key;
 689        } else {
 690                ctx->cdata.key_inline = false;
 691                ctx->cdata.key_dma = ctx->key_dma;
 692        }
 693
 694        flc = &ctx->flc[DECRYPT];
 695        desc = flc->sh_desc;
 696        cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
 697        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 698        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 699                                   sizeof(flc->flc) + desc_bytes(desc),
 700                                   ctx->dir);
 701
 702        return 0;
 703}
 704
 705static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
 706{
 707        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 708        int err;
 709
 710        err = crypto_gcm_check_authsize(authsize);
 711        if (err)
 712                return err;
 713
 714        ctx->authsize = authsize;
 715        gcm_set_sh_desc(authenc);
 716
 717        return 0;
 718}
 719
 720static int gcm_setkey(struct crypto_aead *aead,
 721                      const u8 *key, unsigned int keylen)
 722{
 723        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 724        struct device *dev = ctx->dev;
 725        int ret;
 726
 727        ret = aes_check_keylen(keylen);
 728        if (ret)
 729                return ret;
 730        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 731                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 732
 733        memcpy(ctx->key, key, keylen);
 734        dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
 735        ctx->cdata.keylen = keylen;
 736
 737        return gcm_set_sh_desc(aead);
 738}
 739
 740static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 741{
 742        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 743        struct device *dev = ctx->dev;
 744        unsigned int ivsize = crypto_aead_ivsize(aead);
 745        struct caam_flc *flc;
 746        u32 *desc;
 747        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 748                        ctx->cdata.keylen;
 749
 750        if (!ctx->cdata.keylen || !ctx->authsize)
 751                return 0;
 752
 753        ctx->cdata.key_virt = ctx->key;
 754
 755        /*
 756         * RFC4106 encrypt shared descriptor
 757         * Job Descriptor and Shared Descriptor
 758         * must fit into the 64-word Descriptor h/w Buffer
 759         */
 760        if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
 761                ctx->cdata.key_inline = true;
 762        } else {
 763                ctx->cdata.key_inline = false;
 764                ctx->cdata.key_dma = ctx->key_dma;
 765        }
 766
 767        flc = &ctx->flc[ENCRYPT];
 768        desc = flc->sh_desc;
 769        cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 770                                  true);
 771        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 772        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 773                                   sizeof(flc->flc) + desc_bytes(desc),
 774                                   ctx->dir);
 775
 776        /*
 777         * Job Descriptor and Shared Descriptors
 778         * must all fit into the 64-word Descriptor h/w Buffer
 779         */
 780        if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
 781                ctx->cdata.key_inline = true;
 782        } else {
 783                ctx->cdata.key_inline = false;
 784                ctx->cdata.key_dma = ctx->key_dma;
 785        }
 786
 787        flc = &ctx->flc[DECRYPT];
 788        desc = flc->sh_desc;
 789        cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 790                                  true);
 791        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 792        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 793                                   sizeof(flc->flc) + desc_bytes(desc),
 794                                   ctx->dir);
 795
 796        return 0;
 797}
 798
 799static int rfc4106_setauthsize(struct crypto_aead *authenc,
 800                               unsigned int authsize)
 801{
 802        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 803        int err;
 804
 805        err = crypto_rfc4106_check_authsize(authsize);
 806        if (err)
 807                return err;
 808
 809        ctx->authsize = authsize;
 810        rfc4106_set_sh_desc(authenc);
 811
 812        return 0;
 813}
 814
 815static int rfc4106_setkey(struct crypto_aead *aead,
 816                          const u8 *key, unsigned int keylen)
 817{
 818        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 819        struct device *dev = ctx->dev;
 820        int ret;
 821
 822        ret = aes_check_keylen(keylen - 4);
 823        if (ret)
 824                return ret;
 825
 826        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 827                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 828
 829        memcpy(ctx->key, key, keylen);
 830        /*
 831         * The last four bytes of the key material are used as the salt value
 832         * in the nonce. Update the AES key length.
 833         */
 834        ctx->cdata.keylen = keylen - 4;
 835        dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
 836                                   ctx->dir);
 837
 838        return rfc4106_set_sh_desc(aead);
 839}
 840
 841static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 842{
 843        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 844        struct device *dev = ctx->dev;
 845        unsigned int ivsize = crypto_aead_ivsize(aead);
 846        struct caam_flc *flc;
 847        u32 *desc;
 848        int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
 849                        ctx->cdata.keylen;
 850
 851        if (!ctx->cdata.keylen || !ctx->authsize)
 852                return 0;
 853
 854        ctx->cdata.key_virt = ctx->key;
 855
 856        /*
 857         * RFC4543 encrypt shared descriptor
 858         * Job Descriptor and Shared Descriptor
 859         * must fit into the 64-word Descriptor h/w Buffer
 860         */
 861        if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
 862                ctx->cdata.key_inline = true;
 863        } else {
 864                ctx->cdata.key_inline = false;
 865                ctx->cdata.key_dma = ctx->key_dma;
 866        }
 867
 868        flc = &ctx->flc[ENCRYPT];
 869        desc = flc->sh_desc;
 870        cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
 871                                  true);
 872        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 873        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 874                                   sizeof(flc->flc) + desc_bytes(desc),
 875                                   ctx->dir);
 876
 877        /*
 878         * Job Descriptor and Shared Descriptors
 879         * must all fit into the 64-word Descriptor h/w Buffer
 880         */
 881        if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
 882                ctx->cdata.key_inline = true;
 883        } else {
 884                ctx->cdata.key_inline = false;
 885                ctx->cdata.key_dma = ctx->key_dma;
 886        }
 887
 888        flc = &ctx->flc[DECRYPT];
 889        desc = flc->sh_desc;
 890        cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
 891                                  true);
 892        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 893        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 894                                   sizeof(flc->flc) + desc_bytes(desc),
 895                                   ctx->dir);
 896
 897        return 0;
 898}
 899
 900static int rfc4543_setauthsize(struct crypto_aead *authenc,
 901                               unsigned int authsize)
 902{
 903        struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 904
 905        if (authsize != 16)
 906                return -EINVAL;
 907
 908        ctx->authsize = authsize;
 909        rfc4543_set_sh_desc(authenc);
 910
 911        return 0;
 912}
 913
 914static int rfc4543_setkey(struct crypto_aead *aead,
 915                          const u8 *key, unsigned int keylen)
 916{
 917        struct caam_ctx *ctx = crypto_aead_ctx(aead);
 918        struct device *dev = ctx->dev;
 919        int ret;
 920
 921        ret = aes_check_keylen(keylen - 4);
 922        if (ret)
 923                return ret;
 924
 925        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 926                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 927
 928        memcpy(ctx->key, key, keylen);
 929        /*
 930         * The last four bytes of the key material are used as the salt value
 931         * in the nonce. Update the AES key length.
 932         */
 933        ctx->cdata.keylen = keylen - 4;
 934        dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
 935                                   ctx->dir);
 936
 937        return rfc4543_set_sh_desc(aead);
 938}
 939
 940static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
 941                           unsigned int keylen, const u32 ctx1_iv_off)
 942{
 943        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
 944        struct caam_skcipher_alg *alg =
 945                container_of(crypto_skcipher_alg(skcipher),
 946                             struct caam_skcipher_alg, skcipher);
 947        struct device *dev = ctx->dev;
 948        struct caam_flc *flc;
 949        unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 950        u32 *desc;
 951        const bool is_rfc3686 = alg->caam.rfc3686;
 952
 953        print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
 954                             DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 955
 956        ctx->cdata.keylen = keylen;
 957        ctx->cdata.key_virt = key;
 958        ctx->cdata.key_inline = true;
 959
 960        /* skcipher_encrypt shared descriptor */
 961        flc = &ctx->flc[ENCRYPT];
 962        desc = flc->sh_desc;
 963        cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
 964                                   ctx1_iv_off);
 965        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 966        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
 967                                   sizeof(flc->flc) + desc_bytes(desc),
 968                                   ctx->dir);
 969
 970        /* skcipher_decrypt shared descriptor */
 971        flc = &ctx->flc[DECRYPT];
 972        desc = flc->sh_desc;
 973        cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
 974                                   ctx1_iv_off);
 975        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
 976        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
 977                                   sizeof(flc->flc) + desc_bytes(desc),
 978                                   ctx->dir);
 979
 980        return 0;
 981}
 982
 983static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
 984                               const u8 *key, unsigned int keylen)
 985{
 986        int err;
 987
 988        err = aes_check_keylen(keylen);
 989        if (err)
 990                return err;
 991
 992        return skcipher_setkey(skcipher, key, keylen, 0);
 993}
 994
 995static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
 996                                   const u8 *key, unsigned int keylen)
 997{
 998        u32 ctx1_iv_off;
 999        int err;
1000
1001        /*
1002         * RFC3686 specific:
1003         *      | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1004         *      | *key = {KEY, NONCE}
1005         */
1006        ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1007        keylen -= CTR_RFC3686_NONCE_SIZE;
1008
1009        err = aes_check_keylen(keylen);
1010        if (err)
1011                return err;
1012
1013        return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1014}
1015
1016static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1017                               const u8 *key, unsigned int keylen)
1018{
1019        u32 ctx1_iv_off;
1020        int err;
1021
1022        /*
1023         * AES-CTR needs to load IV in CONTEXT1 reg
1024         * at an offset of 128bits (16bytes)
1025         * CONTEXT1[255:128] = IV
1026         */
1027        ctx1_iv_off = 16;
1028
1029        err = aes_check_keylen(keylen);
1030        if (err)
1031                return err;
1032
1033        return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1034}
1035
1036static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1037                                    const u8 *key, unsigned int keylen)
1038{
1039        if (keylen != CHACHA_KEY_SIZE)
1040                return -EINVAL;
1041
1042        return skcipher_setkey(skcipher, key, keylen, 0);
1043}
1044
1045static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1046                               const u8 *key, unsigned int keylen)
1047{
1048        return verify_skcipher_des_key(skcipher, key) ?:
1049               skcipher_setkey(skcipher, key, keylen, 0);
1050}
1051
1052static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1053                                const u8 *key, unsigned int keylen)
1054{
1055        return verify_skcipher_des3_key(skcipher, key) ?:
1056               skcipher_setkey(skcipher, key, keylen, 0);
1057}
1058
1059static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1060                               unsigned int keylen)
1061{
1062        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1063        struct device *dev = ctx->dev;
1064        struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1065        struct caam_flc *flc;
1066        u32 *desc;
1067        int err;
1068
1069        err = xts_verify_key(skcipher, key, keylen);
1070        if (err) {
1071                dev_dbg(dev, "key size mismatch\n");
1072                return err;
1073        }
1074
1075        if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1076                ctx->xts_key_fallback = true;
1077
1078        if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1079                err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1080                if (err)
1081                        return err;
1082        }
1083
1084        ctx->cdata.keylen = keylen;
1085        ctx->cdata.key_virt = key;
1086        ctx->cdata.key_inline = true;
1087
1088        /* xts_skcipher_encrypt shared descriptor */
1089        flc = &ctx->flc[ENCRYPT];
1090        desc = flc->sh_desc;
1091        cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1092        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1093        dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1094                                   sizeof(flc->flc) + desc_bytes(desc),
1095                                   ctx->dir);
1096
1097        /* xts_skcipher_decrypt shared descriptor */
1098        flc = &ctx->flc[DECRYPT];
1099        desc = flc->sh_desc;
1100        cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1101        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1102        dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1103                                   sizeof(flc->flc) + desc_bytes(desc),
1104                                   ctx->dir);
1105
1106        return 0;
1107}
1108
1109static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1110{
1111        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1112        struct caam_request *req_ctx = skcipher_request_ctx(req);
1113        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1114        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1115        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1116        struct device *dev = ctx->dev;
1117        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118                       GFP_KERNEL : GFP_ATOMIC;
1119        int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1120        struct skcipher_edesc *edesc;
1121        dma_addr_t iv_dma;
1122        u8 *iv;
1123        int ivsize = crypto_skcipher_ivsize(skcipher);
1124        int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1125        struct dpaa2_sg_entry *sg_table;
1126
1127        src_nents = sg_nents_for_len(req->src, req->cryptlen);
1128        if (unlikely(src_nents < 0)) {
1129                dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1130                        req->cryptlen);
1131                return ERR_PTR(src_nents);
1132        }
1133
1134        if (unlikely(req->dst != req->src)) {
1135                dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1136                if (unlikely(dst_nents < 0)) {
1137                        dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1138                                req->cryptlen);
1139                        return ERR_PTR(dst_nents);
1140                }
1141
1142                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1143                                              DMA_TO_DEVICE);
1144                if (unlikely(!mapped_src_nents)) {
1145                        dev_err(dev, "unable to map source\n");
1146                        return ERR_PTR(-ENOMEM);
1147                }
1148
1149                mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1150                                              DMA_FROM_DEVICE);
1151                if (unlikely(!mapped_dst_nents)) {
1152                        dev_err(dev, "unable to map destination\n");
1153                        dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1154                        return ERR_PTR(-ENOMEM);
1155                }
1156        } else {
1157                mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1158                                              DMA_BIDIRECTIONAL);
1159                if (unlikely(!mapped_src_nents)) {
1160                        dev_err(dev, "unable to map source\n");
1161                        return ERR_PTR(-ENOMEM);
1162                }
1163        }
1164
1165        qm_sg_ents = 1 + mapped_src_nents;
1166        dst_sg_idx = qm_sg_ents;
1167
1168        /*
1169         * Input, output HW S/G tables: [IV, src][dst, IV]
1170         * IV entries point to the same buffer
1171         * If src == dst, S/G entries are reused (S/G tables overlap)
1172         *
1173         * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1174         * the end of the table by allocating more S/G entries.
1175         */
1176        if (req->src != req->dst)
1177                qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1178        else
1179                qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1180
1181        qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1182        if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1183                     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1184                dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1185                        qm_sg_ents, ivsize);
1186                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1187                           0, DMA_NONE, 0, 0);
1188                return ERR_PTR(-ENOMEM);
1189        }
1190
1191        /* allocate space for base edesc, link tables and IV */
1192        edesc = qi_cache_zalloc(GFP_DMA | flags);
1193        if (unlikely(!edesc)) {
1194                dev_err(dev, "could not allocate extended descriptor\n");
1195                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1196                           0, DMA_NONE, 0, 0);
1197                return ERR_PTR(-ENOMEM);
1198        }
1199
1200        /* Make sure IV is located in a DMAable area */
1201        sg_table = &edesc->sgt[0];
1202        iv = (u8 *)(sg_table + qm_sg_ents);
1203        memcpy(iv, req->iv, ivsize);
1204
1205        iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1206        if (dma_mapping_error(dev, iv_dma)) {
1207                dev_err(dev, "unable to map IV\n");
1208                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1209                           0, DMA_NONE, 0, 0);
1210                qi_cache_free(edesc);
1211                return ERR_PTR(-ENOMEM);
1212        }
1213
1214        edesc->src_nents = src_nents;
1215        edesc->dst_nents = dst_nents;
1216        edesc->iv_dma = iv_dma;
1217        edesc->qm_sg_bytes = qm_sg_bytes;
1218
1219        dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1220        sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1221
1222        if (req->src != req->dst)
1223                sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1224
1225        dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1226                         ivsize, 0);
1227
1228        edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1229                                          DMA_TO_DEVICE);
1230        if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1231                dev_err(dev, "unable to map S/G table\n");
1232                caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1233                           iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1234                qi_cache_free(edesc);
1235                return ERR_PTR(-ENOMEM);
1236        }
1237
1238        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1239        dpaa2_fl_set_final(in_fle, true);
1240        dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1241        dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1242
1243        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1244        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1245
1246        dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1247
1248        if (req->src == req->dst)
1249                dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1250                                  sizeof(*sg_table));
1251        else
1252                dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1253                                  sizeof(*sg_table));
1254
1255        return edesc;
1256}
1257
1258static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1259                       struct aead_request *req)
1260{
1261        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262        int ivsize = crypto_aead_ivsize(aead);
1263
1264        caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1265                   edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1266                   edesc->qm_sg_bytes);
1267        dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1268}
1269
1270static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1271                           struct skcipher_request *req)
1272{
1273        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1274        int ivsize = crypto_skcipher_ivsize(skcipher);
1275
1276        caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1277                   edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1278                   edesc->qm_sg_bytes);
1279}
1280
1281static void aead_encrypt_done(void *cbk_ctx, u32 status)
1282{
1283        struct crypto_async_request *areq = cbk_ctx;
1284        struct aead_request *req = container_of(areq, struct aead_request,
1285                                                base);
1286        struct caam_request *req_ctx = to_caam_req(areq);
1287        struct aead_edesc *edesc = req_ctx->edesc;
1288        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1289        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1290        int ecode = 0;
1291
1292        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1293
1294        if (unlikely(status))
1295                ecode = caam_qi2_strstatus(ctx->dev, status);
1296
1297        aead_unmap(ctx->dev, edesc, req);
1298        qi_cache_free(edesc);
1299        aead_request_complete(req, ecode);
1300}
1301
1302static void aead_decrypt_done(void *cbk_ctx, u32 status)
1303{
1304        struct crypto_async_request *areq = cbk_ctx;
1305        struct aead_request *req = container_of(areq, struct aead_request,
1306                                                base);
1307        struct caam_request *req_ctx = to_caam_req(areq);
1308        struct aead_edesc *edesc = req_ctx->edesc;
1309        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1311        int ecode = 0;
1312
1313        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1314
1315        if (unlikely(status))
1316                ecode = caam_qi2_strstatus(ctx->dev, status);
1317
1318        aead_unmap(ctx->dev, edesc, req);
1319        qi_cache_free(edesc);
1320        aead_request_complete(req, ecode);
1321}
1322
1323static int aead_encrypt(struct aead_request *req)
1324{
1325        struct aead_edesc *edesc;
1326        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1327        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1328        struct caam_request *caam_req = aead_request_ctx(req);
1329        int ret;
1330
1331        /* allocate extended descriptor */
1332        edesc = aead_edesc_alloc(req, true);
1333        if (IS_ERR(edesc))
1334                return PTR_ERR(edesc);
1335
1336        caam_req->flc = &ctx->flc[ENCRYPT];
1337        caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1338        caam_req->cbk = aead_encrypt_done;
1339        caam_req->ctx = &req->base;
1340        caam_req->edesc = edesc;
1341        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1342        if (ret != -EINPROGRESS &&
1343            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1344                aead_unmap(ctx->dev, edesc, req);
1345                qi_cache_free(edesc);
1346        }
1347
1348        return ret;
1349}
1350
1351static int aead_decrypt(struct aead_request *req)
1352{
1353        struct aead_edesc *edesc;
1354        struct crypto_aead *aead = crypto_aead_reqtfm(req);
1355        struct caam_ctx *ctx = crypto_aead_ctx(aead);
1356        struct caam_request *caam_req = aead_request_ctx(req);
1357        int ret;
1358
1359        /* allocate extended descriptor */
1360        edesc = aead_edesc_alloc(req, false);
1361        if (IS_ERR(edesc))
1362                return PTR_ERR(edesc);
1363
1364        caam_req->flc = &ctx->flc[DECRYPT];
1365        caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1366        caam_req->cbk = aead_decrypt_done;
1367        caam_req->ctx = &req->base;
1368        caam_req->edesc = edesc;
1369        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1370        if (ret != -EINPROGRESS &&
1371            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1372                aead_unmap(ctx->dev, edesc, req);
1373                qi_cache_free(edesc);
1374        }
1375
1376        return ret;
1377}
1378
1379static int ipsec_gcm_encrypt(struct aead_request *req)
1380{
1381        return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1382}
1383
1384static int ipsec_gcm_decrypt(struct aead_request *req)
1385{
1386        return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1387}
1388
1389static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1390{
1391        struct crypto_async_request *areq = cbk_ctx;
1392        struct skcipher_request *req = skcipher_request_cast(areq);
1393        struct caam_request *req_ctx = to_caam_req(areq);
1394        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1395        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1396        struct skcipher_edesc *edesc = req_ctx->edesc;
1397        int ecode = 0;
1398        int ivsize = crypto_skcipher_ivsize(skcipher);
1399
1400        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1401
1402        if (unlikely(status))
1403                ecode = caam_qi2_strstatus(ctx->dev, status);
1404
1405        print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1406                             DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1407                             edesc->src_nents > 1 ? 100 : ivsize, 1);
1408        caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1409                     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1410                     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1411
1412        skcipher_unmap(ctx->dev, edesc, req);
1413
1414        /*
1415         * The crypto API expects us to set the IV (req->iv) to the last
1416         * ciphertext block (CBC mode) or last counter (CTR mode).
1417         * This is used e.g. by the CTS mode.
1418         */
1419        if (!ecode)
1420                memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1421                       ivsize);
1422
1423        qi_cache_free(edesc);
1424        skcipher_request_complete(req, ecode);
1425}
1426
1427static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1428{
1429        struct crypto_async_request *areq = cbk_ctx;
1430        struct skcipher_request *req = skcipher_request_cast(areq);
1431        struct caam_request *req_ctx = to_caam_req(areq);
1432        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1434        struct skcipher_edesc *edesc = req_ctx->edesc;
1435        int ecode = 0;
1436        int ivsize = crypto_skcipher_ivsize(skcipher);
1437
1438        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1439
1440        if (unlikely(status))
1441                ecode = caam_qi2_strstatus(ctx->dev, status);
1442
1443        print_hex_dump_debug("dstiv  @" __stringify(__LINE__)": ",
1444                             DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1445                             edesc->src_nents > 1 ? 100 : ivsize, 1);
1446        caam_dump_sg("dst    @" __stringify(__LINE__)": ",
1447                     DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1448                     edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1449
1450        skcipher_unmap(ctx->dev, edesc, req);
1451
1452        /*
1453         * The crypto API expects us to set the IV (req->iv) to the last
1454         * ciphertext block (CBC mode) or last counter (CTR mode).
1455         * This is used e.g. by the CTS mode.
1456         */
1457        if (!ecode)
1458                memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1459                       ivsize);
1460
1461        qi_cache_free(edesc);
1462        skcipher_request_complete(req, ecode);
1463}
1464
1465static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1466{
1467        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1468        unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1469
1470        return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1471}
1472
1473static int skcipher_encrypt(struct skcipher_request *req)
1474{
1475        struct skcipher_edesc *edesc;
1476        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1477        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1478        struct caam_request *caam_req = skcipher_request_ctx(req);
1479        struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1480        int ret;
1481
1482        /*
1483         * XTS is expected to return an error even for input length = 0
1484         * Note that the case input length < block size will be caught during
1485         * HW offloading and return an error.
1486         */
1487        if (!req->cryptlen && !ctx->fallback)
1488                return 0;
1489
1490        if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1491                              ctx->xts_key_fallback)) {
1492                skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1493                skcipher_request_set_callback(&caam_req->fallback_req,
1494                                              req->base.flags,
1495                                              req->base.complete,
1496                                              req->base.data);
1497                skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1498                                           req->dst, req->cryptlen, req->iv);
1499
1500                return crypto_skcipher_encrypt(&caam_req->fallback_req);
1501        }
1502
1503        /* allocate extended descriptor */
1504        edesc = skcipher_edesc_alloc(req);
1505        if (IS_ERR(edesc))
1506                return PTR_ERR(edesc);
1507
1508        caam_req->flc = &ctx->flc[ENCRYPT];
1509        caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1510        caam_req->cbk = skcipher_encrypt_done;
1511        caam_req->ctx = &req->base;
1512        caam_req->edesc = edesc;
1513        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1514        if (ret != -EINPROGRESS &&
1515            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1516                skcipher_unmap(ctx->dev, edesc, req);
1517                qi_cache_free(edesc);
1518        }
1519
1520        return ret;
1521}
1522
1523static int skcipher_decrypt(struct skcipher_request *req)
1524{
1525        struct skcipher_edesc *edesc;
1526        struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1527        struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
1528        struct caam_request *caam_req = skcipher_request_ctx(req);
1529        struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1530        int ret;
1531
1532        /*
1533         * XTS is expected to return an error even for input length = 0
1534         * Note that the case input length < block size will be caught during
1535         * HW offloading and return an error.
1536         */
1537        if (!req->cryptlen && !ctx->fallback)
1538                return 0;
1539
1540        if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1541                              ctx->xts_key_fallback)) {
1542                skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1543                skcipher_request_set_callback(&caam_req->fallback_req,
1544                                              req->base.flags,
1545                                              req->base.complete,
1546                                              req->base.data);
1547                skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1548                                           req->dst, req->cryptlen, req->iv);
1549
1550                return crypto_skcipher_decrypt(&caam_req->fallback_req);
1551        }
1552
1553        /* allocate extended descriptor */
1554        edesc = skcipher_edesc_alloc(req);
1555        if (IS_ERR(edesc))
1556                return PTR_ERR(edesc);
1557
1558        caam_req->flc = &ctx->flc[DECRYPT];
1559        caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1560        caam_req->cbk = skcipher_decrypt_done;
1561        caam_req->ctx = &req->base;
1562        caam_req->edesc = edesc;
1563        ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1564        if (ret != -EINPROGRESS &&
1565            !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1566                skcipher_unmap(ctx->dev, edesc, req);
1567                qi_cache_free(edesc);
1568        }
1569
1570        return ret;
1571}
1572
1573static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1574                         bool uses_dkp)
1575{
1576        dma_addr_t dma_addr;
1577        int i;
1578
1579        /* copy descriptor header template value */
1580        ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1581        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1582
1583        ctx->dev = caam->dev;
1584        ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1585
1586        dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1587                                        offsetof(struct caam_ctx, flc_dma),
1588                                        ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1589        if (dma_mapping_error(ctx->dev, dma_addr)) {
1590                dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1591                return -ENOMEM;
1592        }
1593
1594        for (i = 0; i < NUM_OP; i++)
1595                ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1596        ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1597
1598        return 0;
1599}
1600
1601static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1602{
1603        struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1604        struct caam_skcipher_alg *caam_alg =
1605                container_of(alg, typeof(*caam_alg), skcipher);
1606        struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1607        u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1608        int ret = 0;
1609
1610        if (alg_aai == OP_ALG_AAI_XTS) {
1611                const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1612                struct crypto_skcipher *fallback;
1613
1614                fallback = crypto_alloc_skcipher(tfm_name, 0,
1615                                                 CRYPTO_ALG_NEED_FALLBACK);
1616                if (IS_ERR(fallback)) {
1617                        dev_err(caam_alg->caam.dev,
1618                                "Failed to allocate %s fallback: %ld\n",
1619                                tfm_name, PTR_ERR(fallback));
1620                        return PTR_ERR(fallback);
1621                }
1622
1623                ctx->fallback = fallback;
1624                crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request) +
1625                                            crypto_skcipher_reqsize(fallback));
1626        } else {
1627                crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
1628        }
1629
1630        ret = caam_cra_init(ctx, &caam_alg->caam, false);
1631        if (ret && ctx->fallback)
1632                crypto_free_skcipher(ctx->fallback);
1633
1634        return ret;
1635}
1636
1637static int caam_cra_init_aead(struct crypto_aead *tfm)
1638{
1639        struct aead_alg *alg = crypto_aead_alg(tfm);
1640        struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1641                                                      aead);
1642
1643        crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
1644        return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
1645                             !caam_alg->caam.nodkp);
1646}
1647
1648static void caam_exit_common(struct caam_ctx *ctx)
1649{
1650        dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1651                               offsetof(struct caam_ctx, flc_dma), ctx->dir,
1652                               DMA_ATTR_SKIP_CPU_SYNC);
1653}
1654
1655static void caam_cra_exit(struct crypto_skcipher *tfm)
1656{
1657        struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
1658
1659        if (ctx->fallback)
1660                crypto_free_skcipher(ctx->fallback);
1661        caam_exit_common(ctx);
1662}
1663
1664static void caam_cra_exit_aead(struct crypto_aead *tfm)
1665{
1666        caam_exit_common(crypto_aead_ctx(tfm));
1667}
1668
1669static struct caam_skcipher_alg driver_algs[] = {
1670        {
1671                .skcipher = {
1672                        .base = {
1673                                .cra_name = "cbc(aes)",
1674                                .cra_driver_name = "cbc-aes-caam-qi2",
1675                                .cra_blocksize = AES_BLOCK_SIZE,
1676                        },
1677                        .setkey = aes_skcipher_setkey,
1678                        .encrypt = skcipher_encrypt,
1679                        .decrypt = skcipher_decrypt,
1680                        .min_keysize = AES_MIN_KEY_SIZE,
1681                        .max_keysize = AES_MAX_KEY_SIZE,
1682                        .ivsize = AES_BLOCK_SIZE,
1683                },
1684                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1685        },
1686        {
1687                .skcipher = {
1688                        .base = {
1689                                .cra_name = "cbc(des3_ede)",
1690                                .cra_driver_name = "cbc-3des-caam-qi2",
1691                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1692                        },
1693                        .setkey = des3_skcipher_setkey,
1694                        .encrypt = skcipher_encrypt,
1695                        .decrypt = skcipher_decrypt,
1696                        .min_keysize = DES3_EDE_KEY_SIZE,
1697                        .max_keysize = DES3_EDE_KEY_SIZE,
1698                        .ivsize = DES3_EDE_BLOCK_SIZE,
1699                },
1700                .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1701        },
1702        {
1703                .skcipher = {
1704                        .base = {
1705                                .cra_name = "cbc(des)",
1706                                .cra_driver_name = "cbc-des-caam-qi2",
1707                                .cra_blocksize = DES_BLOCK_SIZE,
1708                        },
1709                        .setkey = des_skcipher_setkey,
1710                        .encrypt = skcipher_encrypt,
1711                        .decrypt = skcipher_decrypt,
1712                        .min_keysize = DES_KEY_SIZE,
1713                        .max_keysize = DES_KEY_SIZE,
1714                        .ivsize = DES_BLOCK_SIZE,
1715                },
1716                .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1717        },
1718        {
1719                .skcipher = {
1720                        .base = {
1721                                .cra_name = "ctr(aes)",
1722                                .cra_driver_name = "ctr-aes-caam-qi2",
1723                                .cra_blocksize = 1,
1724                        },
1725                        .setkey = ctr_skcipher_setkey,
1726                        .encrypt = skcipher_encrypt,
1727                        .decrypt = skcipher_decrypt,
1728                        .min_keysize = AES_MIN_KEY_SIZE,
1729                        .max_keysize = AES_MAX_KEY_SIZE,
1730                        .ivsize = AES_BLOCK_SIZE,
1731                        .chunksize = AES_BLOCK_SIZE,
1732                },
1733                .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1734                                        OP_ALG_AAI_CTR_MOD128,
1735        },
1736        {
1737                .skcipher = {
1738                        .base = {
1739                                .cra_name = "rfc3686(ctr(aes))",
1740                                .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1741                                .cra_blocksize = 1,
1742                        },
1743                        .setkey = rfc3686_skcipher_setkey,
1744                        .encrypt = skcipher_encrypt,
1745                        .decrypt = skcipher_decrypt,
1746                        .min_keysize = AES_MIN_KEY_SIZE +
1747                                       CTR_RFC3686_NONCE_SIZE,
1748                        .max_keysize = AES_MAX_KEY_SIZE +
1749                                       CTR_RFC3686_NONCE_SIZE,
1750                        .ivsize = CTR_RFC3686_IV_SIZE,
1751                        .chunksize = AES_BLOCK_SIZE,
1752                },
1753                .caam = {
1754                        .class1_alg_type = OP_ALG_ALGSEL_AES |
1755                                           OP_ALG_AAI_CTR_MOD128,
1756                        .rfc3686 = true,
1757                },
1758        },
1759        {
1760                .skcipher = {
1761                        .base = {
1762                                .cra_name = "xts(aes)",
1763                                .cra_driver_name = "xts-aes-caam-qi2",
1764                                .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1765                                .cra_blocksize = AES_BLOCK_SIZE,
1766                        },
1767                        .setkey = xts_skcipher_setkey,
1768                        .encrypt = skcipher_encrypt,
1769                        .decrypt = skcipher_decrypt,
1770                        .min_keysize = 2 * AES_MIN_KEY_SIZE,
1771                        .max_keysize = 2 * AES_MAX_KEY_SIZE,
1772                        .ivsize = AES_BLOCK_SIZE,
1773                },
1774                .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1775        },
1776        {
1777                .skcipher = {
1778                        .base = {
1779                                .cra_name = "chacha20",
1780                                .cra_driver_name = "chacha20-caam-qi2",
1781                                .cra_blocksize = 1,
1782                        },
1783                        .setkey = chacha20_skcipher_setkey,
1784                        .encrypt = skcipher_encrypt,
1785                        .decrypt = skcipher_decrypt,
1786                        .min_keysize = CHACHA_KEY_SIZE,
1787                        .max_keysize = CHACHA_KEY_SIZE,
1788                        .ivsize = CHACHA_IV_SIZE,
1789                },
1790                .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1791        },
1792};
1793
1794static struct caam_aead_alg driver_aeads[] = {
1795        {
1796                .aead = {
1797                        .base = {
1798                                .cra_name = "rfc4106(gcm(aes))",
1799                                .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1800                                .cra_blocksize = 1,
1801                        },
1802                        .setkey = rfc4106_setkey,
1803                        .setauthsize = rfc4106_setauthsize,
1804                        .encrypt = ipsec_gcm_encrypt,
1805                        .decrypt = ipsec_gcm_decrypt,
1806                        .ivsize = 8,
1807                        .maxauthsize = AES_BLOCK_SIZE,
1808                },
1809                .caam = {
1810                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1811                        .nodkp = true,
1812                },
1813        },
1814        {
1815                .aead = {
1816                        .base = {
1817                                .cra_name = "rfc4543(gcm(aes))",
1818                                .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1819                                .cra_blocksize = 1,
1820                        },
1821                        .setkey = rfc4543_setkey,
1822                        .setauthsize = rfc4543_setauthsize,
1823                        .encrypt = ipsec_gcm_encrypt,
1824                        .decrypt = ipsec_gcm_decrypt,
1825                        .ivsize = 8,
1826                        .maxauthsize = AES_BLOCK_SIZE,
1827                },
1828                .caam = {
1829                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1830                        .nodkp = true,
1831                },
1832        },
1833        /* Galois Counter Mode */
1834        {
1835                .aead = {
1836                        .base = {
1837                                .cra_name = "gcm(aes)",
1838                                .cra_driver_name = "gcm-aes-caam-qi2",
1839                                .cra_blocksize = 1,
1840                        },
1841                        .setkey = gcm_setkey,
1842                        .setauthsize = gcm_setauthsize,
1843                        .encrypt = aead_encrypt,
1844                        .decrypt = aead_decrypt,
1845                        .ivsize = 12,
1846                        .maxauthsize = AES_BLOCK_SIZE,
1847                },
1848                .caam = {
1849                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1850                        .nodkp = true,
1851                }
1852        },
1853        /* single-pass ipsec_esp descriptor */
1854        {
1855                .aead = {
1856                        .base = {
1857                                .cra_name = "authenc(hmac(md5),cbc(aes))",
1858                                .cra_driver_name = "authenc-hmac-md5-"
1859                                                   "cbc-aes-caam-qi2",
1860                                .cra_blocksize = AES_BLOCK_SIZE,
1861                        },
1862                        .setkey = aead_setkey,
1863                        .setauthsize = aead_setauthsize,
1864                        .encrypt = aead_encrypt,
1865                        .decrypt = aead_decrypt,
1866                        .ivsize = AES_BLOCK_SIZE,
1867                        .maxauthsize = MD5_DIGEST_SIZE,
1868                },
1869                .caam = {
1870                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1871                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1872                                           OP_ALG_AAI_HMAC_PRECOMP,
1873                }
1874        },
1875        {
1876                .aead = {
1877                        .base = {
1878                                .cra_name = "echainiv(authenc(hmac(md5),"
1879                                            "cbc(aes)))",
1880                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
1881                                                   "cbc-aes-caam-qi2",
1882                                .cra_blocksize = AES_BLOCK_SIZE,
1883                        },
1884                        .setkey = aead_setkey,
1885                        .setauthsize = aead_setauthsize,
1886                        .encrypt = aead_encrypt,
1887                        .decrypt = aead_decrypt,
1888                        .ivsize = AES_BLOCK_SIZE,
1889                        .maxauthsize = MD5_DIGEST_SIZE,
1890                },
1891                .caam = {
1892                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1893                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1894                                           OP_ALG_AAI_HMAC_PRECOMP,
1895                        .geniv = true,
1896                }
1897        },
1898        {
1899                .aead = {
1900                        .base = {
1901                                .cra_name = "authenc(hmac(sha1),cbc(aes))",
1902                                .cra_driver_name = "authenc-hmac-sha1-"
1903                                                   "cbc-aes-caam-qi2",
1904                                .cra_blocksize = AES_BLOCK_SIZE,
1905                        },
1906                        .setkey = aead_setkey,
1907                        .setauthsize = aead_setauthsize,
1908                        .encrypt = aead_encrypt,
1909                        .decrypt = aead_decrypt,
1910                        .ivsize = AES_BLOCK_SIZE,
1911                        .maxauthsize = SHA1_DIGEST_SIZE,
1912                },
1913                .caam = {
1914                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1915                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1916                                           OP_ALG_AAI_HMAC_PRECOMP,
1917                }
1918        },
1919        {
1920                .aead = {
1921                        .base = {
1922                                .cra_name = "echainiv(authenc(hmac(sha1),"
1923                                            "cbc(aes)))",
1924                                .cra_driver_name = "echainiv-authenc-"
1925                                                   "hmac-sha1-cbc-aes-caam-qi2",
1926                                .cra_blocksize = AES_BLOCK_SIZE,
1927                        },
1928                        .setkey = aead_setkey,
1929                        .setauthsize = aead_setauthsize,
1930                        .encrypt = aead_encrypt,
1931                        .decrypt = aead_decrypt,
1932                        .ivsize = AES_BLOCK_SIZE,
1933                        .maxauthsize = SHA1_DIGEST_SIZE,
1934                },
1935                .caam = {
1936                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1937                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1938                                           OP_ALG_AAI_HMAC_PRECOMP,
1939                        .geniv = true,
1940                },
1941        },
1942        {
1943                .aead = {
1944                        .base = {
1945                                .cra_name = "authenc(hmac(sha224),cbc(aes))",
1946                                .cra_driver_name = "authenc-hmac-sha224-"
1947                                                   "cbc-aes-caam-qi2",
1948                                .cra_blocksize = AES_BLOCK_SIZE,
1949                        },
1950                        .setkey = aead_setkey,
1951                        .setauthsize = aead_setauthsize,
1952                        .encrypt = aead_encrypt,
1953                        .decrypt = aead_decrypt,
1954                        .ivsize = AES_BLOCK_SIZE,
1955                        .maxauthsize = SHA224_DIGEST_SIZE,
1956                },
1957                .caam = {
1958                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1959                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1960                                           OP_ALG_AAI_HMAC_PRECOMP,
1961                }
1962        },
1963        {
1964                .aead = {
1965                        .base = {
1966                                .cra_name = "echainiv(authenc(hmac(sha224),"
1967                                            "cbc(aes)))",
1968                                .cra_driver_name = "echainiv-authenc-"
1969                                                   "hmac-sha224-cbc-aes-caam-qi2",
1970                                .cra_blocksize = AES_BLOCK_SIZE,
1971                        },
1972                        .setkey = aead_setkey,
1973                        .setauthsize = aead_setauthsize,
1974                        .encrypt = aead_encrypt,
1975                        .decrypt = aead_decrypt,
1976                        .ivsize = AES_BLOCK_SIZE,
1977                        .maxauthsize = SHA224_DIGEST_SIZE,
1978                },
1979                .caam = {
1980                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1981                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1982                                           OP_ALG_AAI_HMAC_PRECOMP,
1983                        .geniv = true,
1984                }
1985        },
1986        {
1987                .aead = {
1988                        .base = {
1989                                .cra_name = "authenc(hmac(sha256),cbc(aes))",
1990                                .cra_driver_name = "authenc-hmac-sha256-"
1991                                                   "cbc-aes-caam-qi2",
1992                                .cra_blocksize = AES_BLOCK_SIZE,
1993                        },
1994                        .setkey = aead_setkey,
1995                        .setauthsize = aead_setauthsize,
1996                        .encrypt = aead_encrypt,
1997                        .decrypt = aead_decrypt,
1998                        .ivsize = AES_BLOCK_SIZE,
1999                        .maxauthsize = SHA256_DIGEST_SIZE,
2000                },
2001                .caam = {
2002                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2003                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2004                                           OP_ALG_AAI_HMAC_PRECOMP,
2005                }
2006        },
2007        {
2008                .aead = {
2009                        .base = {
2010                                .cra_name = "echainiv(authenc(hmac(sha256),"
2011                                            "cbc(aes)))",
2012                                .cra_driver_name = "echainiv-authenc-"
2013                                                   "hmac-sha256-cbc-aes-"
2014                                                   "caam-qi2",
2015                                .cra_blocksize = AES_BLOCK_SIZE,
2016                        },
2017                        .setkey = aead_setkey,
2018                        .setauthsize = aead_setauthsize,
2019                        .encrypt = aead_encrypt,
2020                        .decrypt = aead_decrypt,
2021                        .ivsize = AES_BLOCK_SIZE,
2022                        .maxauthsize = SHA256_DIGEST_SIZE,
2023                },
2024                .caam = {
2025                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2026                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2027                                           OP_ALG_AAI_HMAC_PRECOMP,
2028                        .geniv = true,
2029                }
2030        },
2031        {
2032                .aead = {
2033                        .base = {
2034                                .cra_name = "authenc(hmac(sha384),cbc(aes))",
2035                                .cra_driver_name = "authenc-hmac-sha384-"
2036                                                   "cbc-aes-caam-qi2",
2037                                .cra_blocksize = AES_BLOCK_SIZE,
2038                        },
2039                        .setkey = aead_setkey,
2040                        .setauthsize = aead_setauthsize,
2041                        .encrypt = aead_encrypt,
2042                        .decrypt = aead_decrypt,
2043                        .ivsize = AES_BLOCK_SIZE,
2044                        .maxauthsize = SHA384_DIGEST_SIZE,
2045                },
2046                .caam = {
2047                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2048                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2049                                           OP_ALG_AAI_HMAC_PRECOMP,
2050                }
2051        },
2052        {
2053                .aead = {
2054                        .base = {
2055                                .cra_name = "echainiv(authenc(hmac(sha384),"
2056                                            "cbc(aes)))",
2057                                .cra_driver_name = "echainiv-authenc-"
2058                                                   "hmac-sha384-cbc-aes-"
2059                                                   "caam-qi2",
2060                                .cra_blocksize = AES_BLOCK_SIZE,
2061                        },
2062                        .setkey = aead_setkey,
2063                        .setauthsize = aead_setauthsize,
2064                        .encrypt = aead_encrypt,
2065                        .decrypt = aead_decrypt,
2066                        .ivsize = AES_BLOCK_SIZE,
2067                        .maxauthsize = SHA384_DIGEST_SIZE,
2068                },
2069                .caam = {
2070                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2071                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2072                                           OP_ALG_AAI_HMAC_PRECOMP,
2073                        .geniv = true,
2074                }
2075        },
2076        {
2077                .aead = {
2078                        .base = {
2079                                .cra_name = "authenc(hmac(sha512),cbc(aes))",
2080                                .cra_driver_name = "authenc-hmac-sha512-"
2081                                                   "cbc-aes-caam-qi2",
2082                                .cra_blocksize = AES_BLOCK_SIZE,
2083                        },
2084                        .setkey = aead_setkey,
2085                        .setauthsize = aead_setauthsize,
2086                        .encrypt = aead_encrypt,
2087                        .decrypt = aead_decrypt,
2088                        .ivsize = AES_BLOCK_SIZE,
2089                        .maxauthsize = SHA512_DIGEST_SIZE,
2090                },
2091                .caam = {
2092                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2093                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2094                                           OP_ALG_AAI_HMAC_PRECOMP,
2095                }
2096        },
2097        {
2098                .aead = {
2099                        .base = {
2100                                .cra_name = "echainiv(authenc(hmac(sha512),"
2101                                            "cbc(aes)))",
2102                                .cra_driver_name = "echainiv-authenc-"
2103                                                   "hmac-sha512-cbc-aes-"
2104                                                   "caam-qi2",
2105                                .cra_blocksize = AES_BLOCK_SIZE,
2106                        },
2107                        .setkey = aead_setkey,
2108                        .setauthsize = aead_setauthsize,
2109                        .encrypt = aead_encrypt,
2110                        .decrypt = aead_decrypt,
2111                        .ivsize = AES_BLOCK_SIZE,
2112                        .maxauthsize = SHA512_DIGEST_SIZE,
2113                },
2114                .caam = {
2115                        .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2116                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2117                                           OP_ALG_AAI_HMAC_PRECOMP,
2118                        .geniv = true,
2119                }
2120        },
2121        {
2122                .aead = {
2123                        .base = {
2124                                .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2125                                .cra_driver_name = "authenc-hmac-md5-"
2126                                                   "cbc-des3_ede-caam-qi2",
2127                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2128                        },
2129                        .setkey = des3_aead_setkey,
2130                        .setauthsize = aead_setauthsize,
2131                        .encrypt = aead_encrypt,
2132                        .decrypt = aead_decrypt,
2133                        .ivsize = DES3_EDE_BLOCK_SIZE,
2134                        .maxauthsize = MD5_DIGEST_SIZE,
2135                },
2136                .caam = {
2137                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2138                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2139                                           OP_ALG_AAI_HMAC_PRECOMP,
2140                }
2141        },
2142        {
2143                .aead = {
2144                        .base = {
2145                                .cra_name = "echainiv(authenc(hmac(md5),"
2146                                            "cbc(des3_ede)))",
2147                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
2148                                                   "cbc-des3_ede-caam-qi2",
2149                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2150                        },
2151                        .setkey = des3_aead_setkey,
2152                        .setauthsize = aead_setauthsize,
2153                        .encrypt = aead_encrypt,
2154                        .decrypt = aead_decrypt,
2155                        .ivsize = DES3_EDE_BLOCK_SIZE,
2156                        .maxauthsize = MD5_DIGEST_SIZE,
2157                },
2158                .caam = {
2159                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2160                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2161                                           OP_ALG_AAI_HMAC_PRECOMP,
2162                        .geniv = true,
2163                }
2164        },
2165        {
2166                .aead = {
2167                        .base = {
2168                                .cra_name = "authenc(hmac(sha1),"
2169                                            "cbc(des3_ede))",
2170                                .cra_driver_name = "authenc-hmac-sha1-"
2171                                                   "cbc-des3_ede-caam-qi2",
2172                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2173                        },
2174                        .setkey = des3_aead_setkey,
2175                        .setauthsize = aead_setauthsize,
2176                        .encrypt = aead_encrypt,
2177                        .decrypt = aead_decrypt,
2178                        .ivsize = DES3_EDE_BLOCK_SIZE,
2179                        .maxauthsize = SHA1_DIGEST_SIZE,
2180                },
2181                .caam = {
2182                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2183                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2184                                           OP_ALG_AAI_HMAC_PRECOMP,
2185                },
2186        },
2187        {
2188                .aead = {
2189                        .base = {
2190                                .cra_name = "echainiv(authenc(hmac(sha1),"
2191                                            "cbc(des3_ede)))",
2192                                .cra_driver_name = "echainiv-authenc-"
2193                                                   "hmac-sha1-"
2194                                                   "cbc-des3_ede-caam-qi2",
2195                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2196                        },
2197                        .setkey = des3_aead_setkey,
2198                        .setauthsize = aead_setauthsize,
2199                        .encrypt = aead_encrypt,
2200                        .decrypt = aead_decrypt,
2201                        .ivsize = DES3_EDE_BLOCK_SIZE,
2202                        .maxauthsize = SHA1_DIGEST_SIZE,
2203                },
2204                .caam = {
2205                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2206                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2207                                           OP_ALG_AAI_HMAC_PRECOMP,
2208                        .geniv = true,
2209                }
2210        },
2211        {
2212                .aead = {
2213                        .base = {
2214                                .cra_name = "authenc(hmac(sha224),"
2215                                            "cbc(des3_ede))",
2216                                .cra_driver_name = "authenc-hmac-sha224-"
2217                                                   "cbc-des3_ede-caam-qi2",
2218                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2219                        },
2220                        .setkey = des3_aead_setkey,
2221                        .setauthsize = aead_setauthsize,
2222                        .encrypt = aead_encrypt,
2223                        .decrypt = aead_decrypt,
2224                        .ivsize = DES3_EDE_BLOCK_SIZE,
2225                        .maxauthsize = SHA224_DIGEST_SIZE,
2226                },
2227                .caam = {
2228                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2229                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2230                                           OP_ALG_AAI_HMAC_PRECOMP,
2231                },
2232        },
2233        {
2234                .aead = {
2235                        .base = {
2236                                .cra_name = "echainiv(authenc(hmac(sha224),"
2237                                            "cbc(des3_ede)))",
2238                                .cra_driver_name = "echainiv-authenc-"
2239                                                   "hmac-sha224-"
2240                                                   "cbc-des3_ede-caam-qi2",
2241                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2242                        },
2243                        .setkey = des3_aead_setkey,
2244                        .setauthsize = aead_setauthsize,
2245                        .encrypt = aead_encrypt,
2246                        .decrypt = aead_decrypt,
2247                        .ivsize = DES3_EDE_BLOCK_SIZE,
2248                        .maxauthsize = SHA224_DIGEST_SIZE,
2249                },
2250                .caam = {
2251                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2252                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2253                                           OP_ALG_AAI_HMAC_PRECOMP,
2254                        .geniv = true,
2255                }
2256        },
2257        {
2258                .aead = {
2259                        .base = {
2260                                .cra_name = "authenc(hmac(sha256),"
2261                                            "cbc(des3_ede))",
2262                                .cra_driver_name = "authenc-hmac-sha256-"
2263                                                   "cbc-des3_ede-caam-qi2",
2264                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2265                        },
2266                        .setkey = des3_aead_setkey,
2267                        .setauthsize = aead_setauthsize,
2268                        .encrypt = aead_encrypt,
2269                        .decrypt = aead_decrypt,
2270                        .ivsize = DES3_EDE_BLOCK_SIZE,
2271                        .maxauthsize = SHA256_DIGEST_SIZE,
2272                },
2273                .caam = {
2274                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2275                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2276                                           OP_ALG_AAI_HMAC_PRECOMP,
2277                },
2278        },
2279        {
2280                .aead = {
2281                        .base = {
2282                                .cra_name = "echainiv(authenc(hmac(sha256),"
2283                                            "cbc(des3_ede)))",
2284                                .cra_driver_name = "echainiv-authenc-"
2285                                                   "hmac-sha256-"
2286                                                   "cbc-des3_ede-caam-qi2",
2287                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2288                        },
2289                        .setkey = des3_aead_setkey,
2290                        .setauthsize = aead_setauthsize,
2291                        .encrypt = aead_encrypt,
2292                        .decrypt = aead_decrypt,
2293                        .ivsize = DES3_EDE_BLOCK_SIZE,
2294                        .maxauthsize = SHA256_DIGEST_SIZE,
2295                },
2296                .caam = {
2297                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2298                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2299                                           OP_ALG_AAI_HMAC_PRECOMP,
2300                        .geniv = true,
2301                }
2302        },
2303        {
2304                .aead = {
2305                        .base = {
2306                                .cra_name = "authenc(hmac(sha384),"
2307                                            "cbc(des3_ede))",
2308                                .cra_driver_name = "authenc-hmac-sha384-"
2309                                                   "cbc-des3_ede-caam-qi2",
2310                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311                        },
2312                        .setkey = des3_aead_setkey,
2313                        .setauthsize = aead_setauthsize,
2314                        .encrypt = aead_encrypt,
2315                        .decrypt = aead_decrypt,
2316                        .ivsize = DES3_EDE_BLOCK_SIZE,
2317                        .maxauthsize = SHA384_DIGEST_SIZE,
2318                },
2319                .caam = {
2320                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2321                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2322                                           OP_ALG_AAI_HMAC_PRECOMP,
2323                },
2324        },
2325        {
2326                .aead = {
2327                        .base = {
2328                                .cra_name = "echainiv(authenc(hmac(sha384),"
2329                                            "cbc(des3_ede)))",
2330                                .cra_driver_name = "echainiv-authenc-"
2331                                                   "hmac-sha384-"
2332                                                   "cbc-des3_ede-caam-qi2",
2333                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2334                        },
2335                        .setkey = des3_aead_setkey,
2336                        .setauthsize = aead_setauthsize,
2337                        .encrypt = aead_encrypt,
2338                        .decrypt = aead_decrypt,
2339                        .ivsize = DES3_EDE_BLOCK_SIZE,
2340                        .maxauthsize = SHA384_DIGEST_SIZE,
2341                },
2342                .caam = {
2343                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2344                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2345                                           OP_ALG_AAI_HMAC_PRECOMP,
2346                        .geniv = true,
2347                }
2348        },
2349        {
2350                .aead = {
2351                        .base = {
2352                                .cra_name = "authenc(hmac(sha512),"
2353                                            "cbc(des3_ede))",
2354                                .cra_driver_name = "authenc-hmac-sha512-"
2355                                                   "cbc-des3_ede-caam-qi2",
2356                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2357                        },
2358                        .setkey = des3_aead_setkey,
2359                        .setauthsize = aead_setauthsize,
2360                        .encrypt = aead_encrypt,
2361                        .decrypt = aead_decrypt,
2362                        .ivsize = DES3_EDE_BLOCK_SIZE,
2363                        .maxauthsize = SHA512_DIGEST_SIZE,
2364                },
2365                .caam = {
2366                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2367                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2368                                           OP_ALG_AAI_HMAC_PRECOMP,
2369                },
2370        },
2371        {
2372                .aead = {
2373                        .base = {
2374                                .cra_name = "echainiv(authenc(hmac(sha512),"
2375                                            "cbc(des3_ede)))",
2376                                .cra_driver_name = "echainiv-authenc-"
2377                                                   "hmac-sha512-"
2378                                                   "cbc-des3_ede-caam-qi2",
2379                                .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2380                        },
2381                        .setkey = des3_aead_setkey,
2382                        .setauthsize = aead_setauthsize,
2383                        .encrypt = aead_encrypt,
2384                        .decrypt = aead_decrypt,
2385                        .ivsize = DES3_EDE_BLOCK_SIZE,
2386                        .maxauthsize = SHA512_DIGEST_SIZE,
2387                },
2388                .caam = {
2389                        .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2390                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2391                                           OP_ALG_AAI_HMAC_PRECOMP,
2392                        .geniv = true,
2393                }
2394        },
2395        {
2396                .aead = {
2397                        .base = {
2398                                .cra_name = "authenc(hmac(md5),cbc(des))",
2399                                .cra_driver_name = "authenc-hmac-md5-"
2400                                                   "cbc-des-caam-qi2",
2401                                .cra_blocksize = DES_BLOCK_SIZE,
2402                        },
2403                        .setkey = aead_setkey,
2404                        .setauthsize = aead_setauthsize,
2405                        .encrypt = aead_encrypt,
2406                        .decrypt = aead_decrypt,
2407                        .ivsize = DES_BLOCK_SIZE,
2408                        .maxauthsize = MD5_DIGEST_SIZE,
2409                },
2410                .caam = {
2411                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2412                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2413                                           OP_ALG_AAI_HMAC_PRECOMP,
2414                },
2415        },
2416        {
2417                .aead = {
2418                        .base = {
2419                                .cra_name = "echainiv(authenc(hmac(md5),"
2420                                            "cbc(des)))",
2421                                .cra_driver_name = "echainiv-authenc-hmac-md5-"
2422                                                   "cbc-des-caam-qi2",
2423                                .cra_blocksize = DES_BLOCK_SIZE,
2424                        },
2425                        .setkey = aead_setkey,
2426                        .setauthsize = aead_setauthsize,
2427                        .encrypt = aead_encrypt,
2428                        .decrypt = aead_decrypt,
2429                        .ivsize = DES_BLOCK_SIZE,
2430                        .maxauthsize = MD5_DIGEST_SIZE,
2431                },
2432                .caam = {
2433                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2434                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2435                                           OP_ALG_AAI_HMAC_PRECOMP,
2436                        .geniv = true,
2437                }
2438        },
2439        {
2440                .aead = {
2441                        .base = {
2442                                .cra_name = "authenc(hmac(sha1),cbc(des))",
2443                                .cra_driver_name = "authenc-hmac-sha1-"
2444                                                   "cbc-des-caam-qi2",
2445                                .cra_blocksize = DES_BLOCK_SIZE,
2446                        },
2447                        .setkey = aead_setkey,
2448                        .setauthsize = aead_setauthsize,
2449                        .encrypt = aead_encrypt,
2450                        .decrypt = aead_decrypt,
2451                        .ivsize = DES_BLOCK_SIZE,
2452                        .maxauthsize = SHA1_DIGEST_SIZE,
2453                },
2454                .caam = {
2455                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2456                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2457                                           OP_ALG_AAI_HMAC_PRECOMP,
2458                },
2459        },
2460        {
2461                .aead = {
2462                        .base = {
2463                                .cra_name = "echainiv(authenc(hmac(sha1),"
2464                                            "cbc(des)))",
2465                                .cra_driver_name = "echainiv-authenc-"
2466                                                   "hmac-sha1-cbc-des-caam-qi2",
2467                                .cra_blocksize = DES_BLOCK_SIZE,
2468                        },
2469                        .setkey = aead_setkey,
2470                        .setauthsize = aead_setauthsize,
2471                        .encrypt = aead_encrypt,
2472                        .decrypt = aead_decrypt,
2473                        .ivsize = DES_BLOCK_SIZE,
2474                        .maxauthsize = SHA1_DIGEST_SIZE,
2475                },
2476                .caam = {
2477                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2478                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2479                                           OP_ALG_AAI_HMAC_PRECOMP,
2480                        .geniv = true,
2481                }
2482        },
2483        {
2484                .aead = {
2485                        .base = {
2486                                .cra_name = "authenc(hmac(sha224),cbc(des))",
2487                                .cra_driver_name = "authenc-hmac-sha224-"
2488                                                   "cbc-des-caam-qi2",
2489                                .cra_blocksize = DES_BLOCK_SIZE,
2490                        },
2491                        .setkey = aead_setkey,
2492                        .setauthsize = aead_setauthsize,
2493                        .encrypt = aead_encrypt,
2494                        .decrypt = aead_decrypt,
2495                        .ivsize = DES_BLOCK_SIZE,
2496                        .maxauthsize = SHA224_DIGEST_SIZE,
2497                },
2498                .caam = {
2499                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2500                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2501                                           OP_ALG_AAI_HMAC_PRECOMP,
2502                },
2503        },
2504        {
2505                .aead = {
2506                        .base = {
2507                                .cra_name = "echainiv(authenc(hmac(sha224),"
2508                                            "cbc(des)))",
2509                                .cra_driver_name = "echainiv-authenc-"
2510                                                   "hmac-sha224-cbc-des-"
2511                                                   "caam-qi2",
2512                                .cra_blocksize = DES_BLOCK_SIZE,
2513                        },
2514                        .setkey = aead_setkey,
2515                        .setauthsize = aead_setauthsize,
2516                        .encrypt = aead_encrypt,
2517                        .decrypt = aead_decrypt,
2518                        .ivsize = DES_BLOCK_SIZE,
2519                        .maxauthsize = SHA224_DIGEST_SIZE,
2520                },
2521                .caam = {
2522                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2523                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2524                                           OP_ALG_AAI_HMAC_PRECOMP,
2525                        .geniv = true,
2526                }
2527        },
2528        {
2529                .aead = {
2530                        .base = {
2531                                .cra_name = "authenc(hmac(sha256),cbc(des))",
2532                                .cra_driver_name = "authenc-hmac-sha256-"
2533                                                   "cbc-des-caam-qi2",
2534                                .cra_blocksize = DES_BLOCK_SIZE,
2535                        },
2536                        .setkey = aead_setkey,
2537                        .setauthsize = aead_setauthsize,
2538                        .encrypt = aead_encrypt,
2539                        .decrypt = aead_decrypt,
2540                        .ivsize = DES_BLOCK_SIZE,
2541                        .maxauthsize = SHA256_DIGEST_SIZE,
2542                },
2543                .caam = {
2544                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2545                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2546                                           OP_ALG_AAI_HMAC_PRECOMP,
2547                },
2548        },
2549        {
2550                .aead = {
2551                        .base = {
2552                                .cra_name = "echainiv(authenc(hmac(sha256),"
2553                                            "cbc(des)))",
2554                                .cra_driver_name = "echainiv-authenc-"
2555                                                   "hmac-sha256-cbc-des-"
2556                                                   "caam-qi2",
2557                                .cra_blocksize = DES_BLOCK_SIZE,
2558                        },
2559                        .setkey = aead_setkey,
2560                        .setauthsize = aead_setauthsize,
2561                        .encrypt = aead_encrypt,
2562                        .decrypt = aead_decrypt,
2563                        .ivsize = DES_BLOCK_SIZE,
2564                        .maxauthsize = SHA256_DIGEST_SIZE,
2565                },
2566                .caam = {
2567                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2568                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2569                                           OP_ALG_AAI_HMAC_PRECOMP,
2570                        .geniv = true,
2571                },
2572        },
2573        {
2574                .aead = {
2575                        .base = {
2576                                .cra_name = "authenc(hmac(sha384),cbc(des))",
2577                                .cra_driver_name = "authenc-hmac-sha384-"
2578                                                   "cbc-des-caam-qi2",
2579                                .cra_blocksize = DES_BLOCK_SIZE,
2580                        },
2581                        .setkey = aead_setkey,
2582                        .setauthsize = aead_setauthsize,
2583                        .encrypt = aead_encrypt,
2584                        .decrypt = aead_decrypt,
2585                        .ivsize = DES_BLOCK_SIZE,
2586                        .maxauthsize = SHA384_DIGEST_SIZE,
2587                },
2588                .caam = {
2589                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2590                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2591                                           OP_ALG_AAI_HMAC_PRECOMP,
2592                },
2593        },
2594        {
2595                .aead = {
2596                        .base = {
2597                                .cra_name = "echainiv(authenc(hmac(sha384),"
2598                                            "cbc(des)))",
2599                                .cra_driver_name = "echainiv-authenc-"
2600                                                   "hmac-sha384-cbc-des-"
2601                                                   "caam-qi2",
2602                                .cra_blocksize = DES_BLOCK_SIZE,
2603                        },
2604                        .setkey = aead_setkey,
2605                        .setauthsize = aead_setauthsize,
2606                        .encrypt = aead_encrypt,
2607                        .decrypt = aead_decrypt,
2608                        .ivsize = DES_BLOCK_SIZE,
2609                        .maxauthsize = SHA384_DIGEST_SIZE,
2610                },
2611                .caam = {
2612                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2613                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2614                                           OP_ALG_AAI_HMAC_PRECOMP,
2615                        .geniv = true,
2616                }
2617        },
2618        {
2619                .aead = {
2620                        .base = {
2621                                .cra_name = "authenc(hmac(sha512),cbc(des))",
2622                                .cra_driver_name = "authenc-hmac-sha512-"
2623                                                   "cbc-des-caam-qi2",
2624                                .cra_blocksize = DES_BLOCK_SIZE,
2625                        },
2626                        .setkey = aead_setkey,
2627                        .setauthsize = aead_setauthsize,
2628                        .encrypt = aead_encrypt,
2629                        .decrypt = aead_decrypt,
2630                        .ivsize = DES_BLOCK_SIZE,
2631                        .maxauthsize = SHA512_DIGEST_SIZE,
2632                },
2633                .caam = {
2634                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2635                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2636                                           OP_ALG_AAI_HMAC_PRECOMP,
2637                }
2638        },
2639        {
2640                .aead = {
2641                        .base = {
2642                                .cra_name = "echainiv(authenc(hmac(sha512),"
2643                                            "cbc(des)))",
2644                                .cra_driver_name = "echainiv-authenc-"
2645                                                   "hmac-sha512-cbc-des-"
2646                                                   "caam-qi2",
2647                                .cra_blocksize = DES_BLOCK_SIZE,
2648                        },
2649                        .setkey = aead_setkey,
2650                        .setauthsize = aead_setauthsize,
2651                        .encrypt = aead_encrypt,
2652                        .decrypt = aead_decrypt,
2653                        .ivsize = DES_BLOCK_SIZE,
2654                        .maxauthsize = SHA512_DIGEST_SIZE,
2655                },
2656                .caam = {
2657                        .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2658                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2659                                           OP_ALG_AAI_HMAC_PRECOMP,
2660                        .geniv = true,
2661                }
2662        },
2663        {
2664                .aead = {
2665                        .base = {
2666                                .cra_name = "authenc(hmac(md5),"
2667                                            "rfc3686(ctr(aes)))",
2668                                .cra_driver_name = "authenc-hmac-md5-"
2669                                                   "rfc3686-ctr-aes-caam-qi2",
2670                                .cra_blocksize = 1,
2671                        },
2672                        .setkey = aead_setkey,
2673                        .setauthsize = aead_setauthsize,
2674                        .encrypt = aead_encrypt,
2675                        .decrypt = aead_decrypt,
2676                        .ivsize = CTR_RFC3686_IV_SIZE,
2677                        .maxauthsize = MD5_DIGEST_SIZE,
2678                },
2679                .caam = {
2680                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2681                                           OP_ALG_AAI_CTR_MOD128,
2682                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2683                                           OP_ALG_AAI_HMAC_PRECOMP,
2684                        .rfc3686 = true,
2685                },
2686        },
2687        {
2688                .aead = {
2689                        .base = {
2690                                .cra_name = "seqiv(authenc("
2691                                            "hmac(md5),rfc3686(ctr(aes))))",
2692                                .cra_driver_name = "seqiv-authenc-hmac-md5-"
2693                                                   "rfc3686-ctr-aes-caam-qi2",
2694                                .cra_blocksize = 1,
2695                        },
2696                        .setkey = aead_setkey,
2697                        .setauthsize = aead_setauthsize,
2698                        .encrypt = aead_encrypt,
2699                        .decrypt = aead_decrypt,
2700                        .ivsize = CTR_RFC3686_IV_SIZE,
2701                        .maxauthsize = MD5_DIGEST_SIZE,
2702                },
2703                .caam = {
2704                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2705                                           OP_ALG_AAI_CTR_MOD128,
2706                        .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2707                                           OP_ALG_AAI_HMAC_PRECOMP,
2708                        .rfc3686 = true,
2709                        .geniv = true,
2710                },
2711        },
2712        {
2713                .aead = {
2714                        .base = {
2715                                .cra_name = "authenc(hmac(sha1),"
2716                                            "rfc3686(ctr(aes)))",
2717                                .cra_driver_name = "authenc-hmac-sha1-"
2718                                                   "rfc3686-ctr-aes-caam-qi2",
2719                                .cra_blocksize = 1,
2720                        },
2721                        .setkey = aead_setkey,
2722                        .setauthsize = aead_setauthsize,
2723                        .encrypt = aead_encrypt,
2724                        .decrypt = aead_decrypt,
2725                        .ivsize = CTR_RFC3686_IV_SIZE,
2726                        .maxauthsize = SHA1_DIGEST_SIZE,
2727                },
2728                .caam = {
2729                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2730                                           OP_ALG_AAI_CTR_MOD128,
2731                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2732                                           OP_ALG_AAI_HMAC_PRECOMP,
2733                        .rfc3686 = true,
2734                },
2735        },
2736        {
2737                .aead = {
2738                        .base = {
2739                                .cra_name = "seqiv(authenc("
2740                                            "hmac(sha1),rfc3686(ctr(aes))))",
2741                                .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2742                                                   "rfc3686-ctr-aes-caam-qi2",
2743                                .cra_blocksize = 1,
2744                        },
2745                        .setkey = aead_setkey,
2746                        .setauthsize = aead_setauthsize,
2747                        .encrypt = aead_encrypt,
2748                        .decrypt = aead_decrypt,
2749                        .ivsize = CTR_RFC3686_IV_SIZE,
2750                        .maxauthsize = SHA1_DIGEST_SIZE,
2751                },
2752                .caam = {
2753                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2754                                           OP_ALG_AAI_CTR_MOD128,
2755                        .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2756                                           OP_ALG_AAI_HMAC_PRECOMP,
2757                        .rfc3686 = true,
2758                        .geniv = true,
2759                },
2760        },
2761        {
2762                .aead = {
2763                        .base = {
2764                                .cra_name = "authenc(hmac(sha224),"
2765                                            "rfc3686(ctr(aes)))",
2766                                .cra_driver_name = "authenc-hmac-sha224-"
2767                                                   "rfc3686-ctr-aes-caam-qi2",
2768                                .cra_blocksize = 1,
2769                        },
2770                        .setkey = aead_setkey,
2771                        .setauthsize = aead_setauthsize,
2772                        .encrypt = aead_encrypt,
2773                        .decrypt = aead_decrypt,
2774                        .ivsize = CTR_RFC3686_IV_SIZE,
2775                        .maxauthsize = SHA224_DIGEST_SIZE,
2776                },
2777                .caam = {
2778                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2779                                           OP_ALG_AAI_CTR_MOD128,
2780                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2781                                           OP_ALG_AAI_HMAC_PRECOMP,
2782                        .rfc3686 = true,
2783                },
2784        },
2785        {
2786                .aead = {
2787                        .base = {
2788                                .cra_name = "seqiv(authenc("
2789                                            "hmac(sha224),rfc3686(ctr(aes))))",
2790                                .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2791                                                   "rfc3686-ctr-aes-caam-qi2",
2792                                .cra_blocksize = 1,
2793                        },
2794                        .setkey = aead_setkey,
2795                        .setauthsize = aead_setauthsize,
2796                        .encrypt = aead_encrypt,
2797                        .decrypt = aead_decrypt,
2798                        .ivsize = CTR_RFC3686_IV_SIZE,
2799                        .maxauthsize = SHA224_DIGEST_SIZE,
2800                },
2801                .caam = {
2802                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2803                                           OP_ALG_AAI_CTR_MOD128,
2804                        .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2805                                           OP_ALG_AAI_HMAC_PRECOMP,
2806                        .rfc3686 = true,
2807                        .geniv = true,
2808                },
2809        },
2810        {
2811                .aead = {
2812                        .base = {
2813                                .cra_name = "authenc(hmac(sha256),"
2814                                            "rfc3686(ctr(aes)))",
2815                                .cra_driver_name = "authenc-hmac-sha256-"
2816                                                   "rfc3686-ctr-aes-caam-qi2",
2817                                .cra_blocksize = 1,
2818                        },
2819                        .setkey = aead_setkey,
2820                        .setauthsize = aead_setauthsize,
2821                        .encrypt = aead_encrypt,
2822                        .decrypt = aead_decrypt,
2823                        .ivsize = CTR_RFC3686_IV_SIZE,
2824                        .maxauthsize = SHA256_DIGEST_SIZE,
2825                },
2826                .caam = {
2827                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2828                                           OP_ALG_AAI_CTR_MOD128,
2829                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2830                                           OP_ALG_AAI_HMAC_PRECOMP,
2831                        .rfc3686 = true,
2832                },
2833        },
2834        {
2835                .aead = {
2836                        .base = {
2837                                .cra_name = "seqiv(authenc(hmac(sha256),"
2838                                            "rfc3686(ctr(aes))))",
2839                                .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2840                                                   "rfc3686-ctr-aes-caam-qi2",
2841                                .cra_blocksize = 1,
2842                        },
2843                        .setkey = aead_setkey,
2844                        .setauthsize = aead_setauthsize,
2845                        .encrypt = aead_encrypt,
2846                        .decrypt = aead_decrypt,
2847                        .ivsize = CTR_RFC3686_IV_SIZE,
2848                        .maxauthsize = SHA256_DIGEST_SIZE,
2849                },
2850                .caam = {
2851                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2852                                           OP_ALG_AAI_CTR_MOD128,
2853                        .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2854                                           OP_ALG_AAI_HMAC_PRECOMP,
2855                        .rfc3686 = true,
2856                        .geniv = true,
2857                },
2858        },
2859        {
2860                .aead = {
2861                        .base = {
2862                                .cra_name = "authenc(hmac(sha384),"
2863                                            "rfc3686(ctr(aes)))",
2864                                .cra_driver_name = "authenc-hmac-sha384-"
2865                                                   "rfc3686-ctr-aes-caam-qi2",
2866                                .cra_blocksize = 1,
2867                        },
2868                        .setkey = aead_setkey,
2869                        .setauthsize = aead_setauthsize,
2870                        .encrypt = aead_encrypt,
2871                        .decrypt = aead_decrypt,
2872                        .ivsize = CTR_RFC3686_IV_SIZE,
2873                        .maxauthsize = SHA384_DIGEST_SIZE,
2874                },
2875                .caam = {
2876                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2877                                           OP_ALG_AAI_CTR_MOD128,
2878                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2879                                           OP_ALG_AAI_HMAC_PRECOMP,
2880                        .rfc3686 = true,
2881                },
2882        },
2883        {
2884                .aead = {
2885                        .base = {
2886                                .cra_name = "seqiv(authenc(hmac(sha384),"
2887                                            "rfc3686(ctr(aes))))",
2888                                .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2889                                                   "rfc3686-ctr-aes-caam-qi2",
2890                                .cra_blocksize = 1,
2891                        },
2892                        .setkey = aead_setkey,
2893                        .setauthsize = aead_setauthsize,
2894                        .encrypt = aead_encrypt,
2895                        .decrypt = aead_decrypt,
2896                        .ivsize = CTR_RFC3686_IV_SIZE,
2897                        .maxauthsize = SHA384_DIGEST_SIZE,
2898                },
2899                .caam = {
2900                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2901                                           OP_ALG_AAI_CTR_MOD128,
2902                        .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2903                                           OP_ALG_AAI_HMAC_PRECOMP,
2904                        .rfc3686 = true,
2905                        .geniv = true,
2906                },
2907        },
2908        {
2909                .aead = {
2910                        .base = {
2911                                .cra_name = "rfc7539(chacha20,poly1305)",
2912                                .cra_driver_name = "rfc7539-chacha20-poly1305-"
2913                                                   "caam-qi2",
2914                                .cra_blocksize = 1,
2915                        },
2916                        .setkey = chachapoly_setkey,
2917                        .setauthsize = chachapoly_setauthsize,
2918                        .encrypt = aead_encrypt,
2919                        .decrypt = aead_decrypt,
2920                        .ivsize = CHACHAPOLY_IV_SIZE,
2921                        .maxauthsize = POLY1305_DIGEST_SIZE,
2922                },
2923                .caam = {
2924                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2925                                           OP_ALG_AAI_AEAD,
2926                        .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2927                                           OP_ALG_AAI_AEAD,
2928                        .nodkp = true,
2929                },
2930        },
2931        {
2932                .aead = {
2933                        .base = {
2934                                .cra_name = "rfc7539esp(chacha20,poly1305)",
2935                                .cra_driver_name = "rfc7539esp-chacha20-"
2936                                                   "poly1305-caam-qi2",
2937                                .cra_blocksize = 1,
2938                        },
2939                        .setkey = chachapoly_setkey,
2940                        .setauthsize = chachapoly_setauthsize,
2941                        .encrypt = aead_encrypt,
2942                        .decrypt = aead_decrypt,
2943                        .ivsize = 8,
2944                        .maxauthsize = POLY1305_DIGEST_SIZE,
2945                },
2946                .caam = {
2947                        .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2948                                           OP_ALG_AAI_AEAD,
2949                        .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2950                                           OP_ALG_AAI_AEAD,
2951                        .nodkp = true,
2952                },
2953        },
2954        {
2955                .aead = {
2956                        .base = {
2957                                .cra_name = "authenc(hmac(sha512),"
2958                                            "rfc3686(ctr(aes)))",
2959                                .cra_driver_name = "authenc-hmac-sha512-"
2960                                                   "rfc3686-ctr-aes-caam-qi2",
2961                                .cra_blocksize = 1,
2962                        },
2963                        .setkey = aead_setkey,
2964                        .setauthsize = aead_setauthsize,
2965                        .encrypt = aead_encrypt,
2966                        .decrypt = aead_decrypt,
2967                        .ivsize = CTR_RFC3686_IV_SIZE,
2968                        .maxauthsize = SHA512_DIGEST_SIZE,
2969                },
2970                .caam = {
2971                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2972                                           OP_ALG_AAI_CTR_MOD128,
2973                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2974                                           OP_ALG_AAI_HMAC_PRECOMP,
2975                        .rfc3686 = true,
2976                },
2977        },
2978        {
2979                .aead = {
2980                        .base = {
2981                                .cra_name = "seqiv(authenc(hmac(sha512),"
2982                                            "rfc3686(ctr(aes))))",
2983                                .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2984                                                   "rfc3686-ctr-aes-caam-qi2",
2985                                .cra_blocksize = 1,
2986                        },
2987                        .setkey = aead_setkey,
2988                        .setauthsize = aead_setauthsize,
2989                        .encrypt = aead_encrypt,
2990                        .decrypt = aead_decrypt,
2991                        .ivsize = CTR_RFC3686_IV_SIZE,
2992                        .maxauthsize = SHA512_DIGEST_SIZE,
2993                },
2994                .caam = {
2995                        .class1_alg_type = OP_ALG_ALGSEL_AES |
2996                                           OP_ALG_AAI_CTR_MOD128,
2997                        .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2998                                           OP_ALG_AAI_HMAC_PRECOMP,
2999                        .rfc3686 = true,
3000                        .geniv = true,
3001                },
3002        },
3003};
3004
3005static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3006{
3007        struct skcipher_alg *alg = &t_alg->skcipher;
3008
3009        alg->base.cra_module = THIS_MODULE;
3010        alg->base.cra_priority = CAAM_CRA_PRIORITY;
3011        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3012        alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3013                              CRYPTO_ALG_KERN_DRIVER_ONLY);
3014
3015        alg->init = caam_cra_init_skcipher;
3016        alg->exit = caam_cra_exit;
3017}
3018
3019static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3020{
3021        struct aead_alg *alg = &t_alg->aead;
3022
3023        alg->base.cra_module = THIS_MODULE;
3024        alg->base.cra_priority = CAAM_CRA_PRIORITY;
3025        alg->base.cra_ctxsize = sizeof(struct caam_ctx);
3026        alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3027                              CRYPTO_ALG_KERN_DRIVER_ONLY;
3028
3029        alg->init = caam_cra_init_aead;
3030        alg->exit = caam_cra_exit_aead;
3031}
3032
3033/* max hash key is max split key size */
3034#define CAAM_MAX_HASH_KEY_SIZE          (SHA512_DIGEST_SIZE * 2)
3035
3036#define CAAM_MAX_HASH_BLOCK_SIZE        SHA512_BLOCK_SIZE
3037
3038/* caam context sizes for hashes: running digest + 8 */
3039#define HASH_MSG_LEN                    8
3040#define MAX_CTX_LEN                     (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3041
3042enum hash_optype {
3043        UPDATE = 0,
3044        UPDATE_FIRST,
3045        FINALIZE,
3046        DIGEST,
3047        HASH_NUM_OP
3048};
3049
3050/**
3051 * struct caam_hash_ctx - ahash per-session context
3052 * @flc: Flow Contexts array
3053 * @key: authentication key
3054 * @flc_dma: I/O virtual addresses of the Flow Contexts
3055 * @dev: dpseci device
3056 * @ctx_len: size of Context Register
3057 * @adata: hashing algorithm details
3058 */
3059struct caam_hash_ctx {
3060        struct caam_flc flc[HASH_NUM_OP];
3061        u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3062        dma_addr_t flc_dma[HASH_NUM_OP];
3063        struct device *dev;
3064        int ctx_len;
3065        struct alginfo adata;
3066};
3067
3068/* ahash state */
3069struct caam_hash_state {
3070        struct caam_request caam_req;
3071        dma_addr_t buf_dma;
3072        dma_addr_t ctx_dma;
3073        int ctx_dma_len;
3074        u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3075        int buflen;
3076        int next_buflen;
3077        u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3078        int (*update)(struct ahash_request *req);
3079        int (*final)(struct ahash_request *req);
3080        int (*finup)(struct ahash_request *req);
3081};
3082
3083struct caam_export_state {
3084        u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3085        u8 caam_ctx[MAX_CTX_LEN];
3086        int buflen;
3087        int (*update)(struct ahash_request *req);
3088        int (*final)(struct ahash_request *req);
3089        int (*finup)(struct ahash_request *req);
3090};
3091
3092/* Map current buffer in state (if length > 0) and put it in link table */
3093static inline int buf_map_to_qm_sg(struct device *dev,
3094                                   struct dpaa2_sg_entry *qm_sg,
3095                                   struct caam_hash_state *state)
3096{
3097        int buflen = state->buflen;
3098
3099        if (!buflen)
3100                return 0;
3101
3102        state->buf_dma = dma_map_single(dev, state->buf, buflen,
3103                                        DMA_TO_DEVICE);
3104        if (dma_mapping_error(dev, state->buf_dma)) {
3105                dev_err(dev, "unable to map buf\n");
3106                state->buf_dma = 0;
3107                return -ENOMEM;
3108        }
3109
3110        dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3111
3112        return 0;
3113}
3114
3115/* Map state->caam_ctx, and add it to link table */
3116static inline int ctx_map_to_qm_sg(struct device *dev,
3117                                   struct caam_hash_state *state, int ctx_len,
3118                                   struct dpaa2_sg_entry *qm_sg, u32 flag)
3119{
3120        state->ctx_dma_len = ctx_len;
3121        state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3122        if (dma_mapping_error(dev, state->ctx_dma)) {
3123                dev_err(dev, "unable to map ctx\n");
3124                state->ctx_dma = 0;
3125                return -ENOMEM;
3126        }
3127
3128        dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3129
3130        return 0;
3131}
3132
3133static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3134{
3135        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3136        int digestsize = crypto_ahash_digestsize(ahash);
3137        struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3138        struct caam_flc *flc;
3139        u32 *desc;
3140
3141        /* ahash_update shared descriptor */
3142        flc = &ctx->flc[UPDATE];
3143        desc = flc->sh_desc;
3144        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3145                          ctx->ctx_len, true, priv->sec_attr.era);
3146        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3147        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3148                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3149        print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3150                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3151                             1);
3152
3153        /* ahash_update_first shared descriptor */
3154        flc = &ctx->flc[UPDATE_FIRST];
3155        desc = flc->sh_desc;
3156        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3157                          ctx->ctx_len, false, priv->sec_attr.era);
3158        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3159        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3160                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3161        print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3162                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3163                             1);
3164
3165        /* ahash_final shared descriptor */
3166        flc = &ctx->flc[FINALIZE];
3167        desc = flc->sh_desc;
3168        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3169                          ctx->ctx_len, true, priv->sec_attr.era);
3170        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3171        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3172                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3173        print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3174                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3175                             1);
3176
3177        /* ahash_digest shared descriptor */
3178        flc = &ctx->flc[DIGEST];
3179        desc = flc->sh_desc;
3180        cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3181                          ctx->ctx_len, false, priv->sec_attr.era);
3182        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3183        dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3184                                   desc_bytes(desc), DMA_BIDIRECTIONAL);
3185        print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3186                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3187                             1);
3188
3189        return 0;
3190}
3191
3192struct split_key_sh_result {
3193        struct completion completion;
3194        int err;
3195        struct device *dev;
3196};
3197
3198static void split_key_sh_done(void *cbk_ctx, u32 err)
3199{
3200        struct split_key_sh_result *res = cbk_ctx;
3201
3202        dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3203
3204        res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3205        complete(&res->completion);
3206}
3207
3208/* Digest hash size if it is too large */
3209static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3210                           u32 digestsize)
3211{
3212        struct caam_request *req_ctx;
3213        u32 *desc;
3214        struct split_key_sh_result result;
3215        dma_addr_t key_dma;
3216        struct caam_flc *flc;
3217        dma_addr_t flc_dma;
3218        int ret = -ENOMEM;
3219        struct dpaa2_fl_entry *in_fle, *out_fle;
3220
3221        req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3222        if (!req_ctx)
3223                return -ENOMEM;
3224
3225        in_fle = &req_ctx->fd_flt[1];
3226        out_fle = &req_ctx->fd_flt[0];
3227
3228        flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3229        if (!flc)
3230                goto err_flc;
3231
3232        key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3233        if (dma_mapping_error(ctx->dev, key_dma)) {
3234                dev_err(ctx->dev, "unable to map key memory\n");
3235                goto err_key_dma;
3236        }
3237
3238        desc = flc->sh_desc;
3239
3240        init_sh_desc(desc, 0);
3241
3242        /* descriptor to perform unkeyed hash on key_in */
3243        append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3244                         OP_ALG_AS_INITFINAL);
3245        append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3246                             FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3247        append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3248                         LDST_SRCDST_BYTE_CONTEXT);
3249
3250        flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3251        flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3252                                 desc_bytes(desc), DMA_TO_DEVICE);
3253        if (dma_mapping_error(ctx->dev, flc_dma)) {
3254                dev_err(ctx->dev, "unable to map shared descriptor\n");
3255                goto err_flc_dma;
3256        }
3257
3258        dpaa2_fl_set_final(in_fle, true);
3259        dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3260        dpaa2_fl_set_addr(in_fle, key_dma);
3261        dpaa2_fl_set_len(in_fle, *keylen);
3262        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3263        dpaa2_fl_set_addr(out_fle, key_dma);
3264        dpaa2_fl_set_len(out_fle, digestsize);
3265
3266        print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3267                             DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3268        print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3269                             DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3270                             1);
3271
3272        result.err = 0;
3273        init_completion(&result.completion);
3274        result.dev = ctx->dev;
3275
3276        req_ctx->flc = flc;
3277        req_ctx->flc_dma = flc_dma;
3278        req_ctx->cbk = split_key_sh_done;
3279        req_ctx->ctx = &result;
3280
3281        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3282        if (ret == -EINPROGRESS) {
3283                /* in progress */
3284                wait_for_completion(&result.completion);
3285                ret = result.err;
3286                print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3287                                     DUMP_PREFIX_ADDRESS, 16, 4, key,
3288                                     digestsize, 1);
3289        }
3290
3291        dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3292                         DMA_TO_DEVICE);
3293err_flc_dma:
3294        dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3295err_key_dma:
3296        kfree(flc);
3297err_flc:
3298        kfree(req_ctx);
3299
3300        *keylen = digestsize;
3301
3302        return ret;
3303}
3304
3305static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3306                        unsigned int keylen)
3307{
3308        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3309        unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3310        unsigned int digestsize = crypto_ahash_digestsize(ahash);
3311        int ret;
3312        u8 *hashed_key = NULL;
3313
3314        dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3315
3316        if (keylen > blocksize) {
3317                hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3318                if (!hashed_key)
3319                        return -ENOMEM;
3320                ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3321                if (ret)
3322                        goto bad_free_key;
3323                key = hashed_key;
3324        }
3325
3326        ctx->adata.keylen = keylen;
3327        ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3328                                              OP_ALG_ALGSEL_MASK);
3329        if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3330                goto bad_free_key;
3331
3332        ctx->adata.key_virt = key;
3333        ctx->adata.key_inline = true;
3334
3335        /*
3336         * In case |user key| > |derived key|, using DKP<imm,imm> would result
3337         * in invalid opcodes (last bytes of user key) in the resulting
3338         * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3339         * addresses are needed.
3340         */
3341        if (keylen > ctx->adata.keylen_pad) {
3342                memcpy(ctx->key, key, keylen);
3343                dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3344                                           ctx->adata.keylen_pad,
3345                                           DMA_TO_DEVICE);
3346        }
3347
3348        ret = ahash_set_sh_desc(ahash);
3349        kfree(hashed_key);
3350        return ret;
3351bad_free_key:
3352        kfree(hashed_key);
3353        return -EINVAL;
3354}
3355
3356static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3357                               struct ahash_request *req)
3358{
3359        struct caam_hash_state *state = ahash_request_ctx(req);
3360
3361        if (edesc->src_nents)
3362                dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3363
3364        if (edesc->qm_sg_bytes)
3365                dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3366                                 DMA_TO_DEVICE);
3367
3368        if (state->buf_dma) {
3369                dma_unmap_single(dev, state->buf_dma, state->buflen,
3370                                 DMA_TO_DEVICE);
3371                state->buf_dma = 0;
3372        }
3373}
3374
3375static inline void ahash_unmap_ctx(struct device *dev,
3376                                   struct ahash_edesc *edesc,
3377                                   struct ahash_request *req, u32 flag)
3378{
3379        struct caam_hash_state *state = ahash_request_ctx(req);
3380
3381        if (state->ctx_dma) {
3382                dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3383                state->ctx_dma = 0;
3384        }
3385        ahash_unmap(dev, edesc, req);
3386}
3387
3388static void ahash_done(void *cbk_ctx, u32 status)
3389{
3390        struct crypto_async_request *areq = cbk_ctx;
3391        struct ahash_request *req = ahash_request_cast(areq);
3392        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3393        struct caam_hash_state *state = ahash_request_ctx(req);
3394        struct ahash_edesc *edesc = state->caam_req.edesc;
3395        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3396        int digestsize = crypto_ahash_digestsize(ahash);
3397        int ecode = 0;
3398
3399        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3400
3401        if (unlikely(status))
3402                ecode = caam_qi2_strstatus(ctx->dev, status);
3403
3404        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3405        memcpy(req->result, state->caam_ctx, digestsize);
3406        qi_cache_free(edesc);
3407
3408        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3409                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3410                             ctx->ctx_len, 1);
3411
3412        req->base.complete(&req->base, ecode);
3413}
3414
3415static void ahash_done_bi(void *cbk_ctx, u32 status)
3416{
3417        struct crypto_async_request *areq = cbk_ctx;
3418        struct ahash_request *req = ahash_request_cast(areq);
3419        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3420        struct caam_hash_state *state = ahash_request_ctx(req);
3421        struct ahash_edesc *edesc = state->caam_req.edesc;
3422        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3423        int ecode = 0;
3424
3425        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3426
3427        if (unlikely(status))
3428                ecode = caam_qi2_strstatus(ctx->dev, status);
3429
3430        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3431        qi_cache_free(edesc);
3432
3433        scatterwalk_map_and_copy(state->buf, req->src,
3434                                 req->nbytes - state->next_buflen,
3435                                 state->next_buflen, 0);
3436        state->buflen = state->next_buflen;
3437
3438        print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3439                             DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3440                             state->buflen, 1);
3441
3442        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3443                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3444                             ctx->ctx_len, 1);
3445        if (req->result)
3446                print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3447                                     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3448                                     crypto_ahash_digestsize(ahash), 1);
3449
3450        req->base.complete(&req->base, ecode);
3451}
3452
3453static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3454{
3455        struct crypto_async_request *areq = cbk_ctx;
3456        struct ahash_request *req = ahash_request_cast(areq);
3457        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3458        struct caam_hash_state *state = ahash_request_ctx(req);
3459        struct ahash_edesc *edesc = state->caam_req.edesc;
3460        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3461        int digestsize = crypto_ahash_digestsize(ahash);
3462        int ecode = 0;
3463
3464        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3465
3466        if (unlikely(status))
3467                ecode = caam_qi2_strstatus(ctx->dev, status);
3468
3469        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3470        memcpy(req->result, state->caam_ctx, digestsize);
3471        qi_cache_free(edesc);
3472
3473        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3474                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3475                             ctx->ctx_len, 1);
3476
3477        req->base.complete(&req->base, ecode);
3478}
3479
3480static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3481{
3482        struct crypto_async_request *areq = cbk_ctx;
3483        struct ahash_request *req = ahash_request_cast(areq);
3484        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3485        struct caam_hash_state *state = ahash_request_ctx(req);
3486        struct ahash_edesc *edesc = state->caam_req.edesc;
3487        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3488        int ecode = 0;
3489
3490        dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3491
3492        if (unlikely(status))
3493                ecode = caam_qi2_strstatus(ctx->dev, status);
3494
3495        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3496        qi_cache_free(edesc);
3497
3498        scatterwalk_map_and_copy(state->buf, req->src,
3499                                 req->nbytes - state->next_buflen,
3500                                 state->next_buflen, 0);
3501        state->buflen = state->next_buflen;
3502
3503        print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3504                             DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3505                             state->buflen, 1);
3506
3507        print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3508                             DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3509                             ctx->ctx_len, 1);
3510        if (req->result)
3511                print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3512                                     DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3513                                     crypto_ahash_digestsize(ahash), 1);
3514
3515        req->base.complete(&req->base, ecode);
3516}
3517
3518static int ahash_update_ctx(struct ahash_request *req)
3519{
3520        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3521        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3522        struct caam_hash_state *state = ahash_request_ctx(req);
3523        struct caam_request *req_ctx = &state->caam_req;
3524        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3525        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3526        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3527                      GFP_KERNEL : GFP_ATOMIC;
3528        u8 *buf = state->buf;
3529        int *buflen = &state->buflen;
3530        int *next_buflen = &state->next_buflen;
3531        int in_len = *buflen + req->nbytes, to_hash;
3532        int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3533        struct ahash_edesc *edesc;
3534        int ret = 0;
3535
3536        *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3537        to_hash = in_len - *next_buflen;
3538
3539        if (to_hash) {
3540                struct dpaa2_sg_entry *sg_table;
3541                int src_len = req->nbytes - *next_buflen;
3542
3543                src_nents = sg_nents_for_len(req->src, src_len);
3544                if (src_nents < 0) {
3545                        dev_err(ctx->dev, "Invalid number of src SG.\n");
3546                        return src_nents;
3547                }
3548
3549                if (src_nents) {
3550                        mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3551                                                  DMA_TO_DEVICE);
3552                        if (!mapped_nents) {
3553                                dev_err(ctx->dev, "unable to DMA map source\n");
3554                                return -ENOMEM;
3555                        }
3556                } else {
3557                        mapped_nents = 0;
3558                }
3559
3560                /* allocate space for base edesc and link tables */
3561                edesc = qi_cache_zalloc(GFP_DMA | flags);
3562                if (!edesc) {
3563                        dma_unmap_sg(ctx->dev, req->src, src_nents,
3564                                     DMA_TO_DEVICE);
3565                        return -ENOMEM;
3566                }
3567
3568                edesc->src_nents = src_nents;
3569                qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3570                qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3571                              sizeof(*sg_table);
3572                sg_table = &edesc->sgt[0];
3573
3574                ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3575                                       DMA_BIDIRECTIONAL);
3576                if (ret)
3577                        goto unmap_ctx;
3578
3579                ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3580                if (ret)
3581                        goto unmap_ctx;
3582
3583                if (mapped_nents) {
3584                        sg_to_qm_sg_last(req->src, src_len,
3585                                         sg_table + qm_sg_src_index, 0);
3586                } else {
3587                        dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3588                                           true);
3589                }
3590
3591                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3592                                                  qm_sg_bytes, DMA_TO_DEVICE);
3593                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3594                        dev_err(ctx->dev, "unable to map S/G table\n");
3595                        ret = -ENOMEM;
3596                        goto unmap_ctx;
3597                }
3598                edesc->qm_sg_bytes = qm_sg_bytes;
3599
3600                memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3601                dpaa2_fl_set_final(in_fle, true);
3602                dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3603                dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3604                dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3605                dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3606                dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3607                dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3608
3609                req_ctx->flc = &ctx->flc[UPDATE];
3610                req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3611                req_ctx->cbk = ahash_done_bi;
3612                req_ctx->ctx = &req->base;
3613                req_ctx->edesc = edesc;
3614
3615                ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3616                if (ret != -EINPROGRESS &&
3617                    !(ret == -EBUSY &&
3618                      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3619                        goto unmap_ctx;
3620        } else if (*next_buflen) {
3621                scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3622                                         req->nbytes, 0);
3623                *buflen = *next_buflen;
3624
3625                print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3626                                     DUMP_PREFIX_ADDRESS, 16, 4, buf,
3627                                     *buflen, 1);
3628        }
3629
3630        return ret;
3631unmap_ctx:
3632        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3633        qi_cache_free(edesc);
3634        return ret;
3635}
3636
3637static int ahash_final_ctx(struct ahash_request *req)
3638{
3639        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3640        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3641        struct caam_hash_state *state = ahash_request_ctx(req);
3642        struct caam_request *req_ctx = &state->caam_req;
3643        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3644        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3645        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3646                      GFP_KERNEL : GFP_ATOMIC;
3647        int buflen = state->buflen;
3648        int qm_sg_bytes;
3649        int digestsize = crypto_ahash_digestsize(ahash);
3650        struct ahash_edesc *edesc;
3651        struct dpaa2_sg_entry *sg_table;
3652        int ret;
3653
3654        /* allocate space for base edesc and link tables */
3655        edesc = qi_cache_zalloc(GFP_DMA | flags);
3656        if (!edesc)
3657                return -ENOMEM;
3658
3659        qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3660        sg_table = &edesc->sgt[0];
3661
3662        ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3663                               DMA_BIDIRECTIONAL);
3664        if (ret)
3665                goto unmap_ctx;
3666
3667        ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3668        if (ret)
3669                goto unmap_ctx;
3670
3671        dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3672
3673        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3674                                          DMA_TO_DEVICE);
3675        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3676                dev_err(ctx->dev, "unable to map S/G table\n");
3677                ret = -ENOMEM;
3678                goto unmap_ctx;
3679        }
3680        edesc->qm_sg_bytes = qm_sg_bytes;
3681
3682        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3683        dpaa2_fl_set_final(in_fle, true);
3684        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3685        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3686        dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3687        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3688        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3689        dpaa2_fl_set_len(out_fle, digestsize);
3690
3691        req_ctx->flc = &ctx->flc[FINALIZE];
3692        req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3693        req_ctx->cbk = ahash_done_ctx_src;
3694        req_ctx->ctx = &req->base;
3695        req_ctx->edesc = edesc;
3696
3697        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3698        if (ret == -EINPROGRESS ||
3699            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3700                return ret;
3701
3702unmap_ctx:
3703        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3704        qi_cache_free(edesc);
3705        return ret;
3706}
3707
3708static int ahash_finup_ctx(struct ahash_request *req)
3709{
3710        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3711        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3712        struct caam_hash_state *state = ahash_request_ctx(req);
3713        struct caam_request *req_ctx = &state->caam_req;
3714        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3715        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3716        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3717                      GFP_KERNEL : GFP_ATOMIC;
3718        int buflen = state->buflen;
3719        int qm_sg_bytes, qm_sg_src_index;
3720        int src_nents, mapped_nents;
3721        int digestsize = crypto_ahash_digestsize(ahash);
3722        struct ahash_edesc *edesc;
3723        struct dpaa2_sg_entry *sg_table;
3724        int ret;
3725
3726        src_nents = sg_nents_for_len(req->src, req->nbytes);
3727        if (src_nents < 0) {
3728                dev_err(ctx->dev, "Invalid number of src SG.\n");
3729                return src_nents;
3730        }
3731
3732        if (src_nents) {
3733                mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3734                                          DMA_TO_DEVICE);
3735                if (!mapped_nents) {
3736                        dev_err(ctx->dev, "unable to DMA map source\n");
3737                        return -ENOMEM;
3738                }
3739        } else {
3740                mapped_nents = 0;
3741        }
3742
3743        /* allocate space for base edesc and link tables */
3744        edesc = qi_cache_zalloc(GFP_DMA | flags);
3745        if (!edesc) {
3746                dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3747                return -ENOMEM;
3748        }
3749
3750        edesc->src_nents = src_nents;
3751        qm_sg_src_index = 1 + (buflen ? 1 : 0);
3752        qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3753                      sizeof(*sg_table);
3754        sg_table = &edesc->sgt[0];
3755
3756        ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3757                               DMA_BIDIRECTIONAL);
3758        if (ret)
3759                goto unmap_ctx;
3760
3761        ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3762        if (ret)
3763                goto unmap_ctx;
3764
3765        sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3766
3767        edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3768                                          DMA_TO_DEVICE);
3769        if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3770                dev_err(ctx->dev, "unable to map S/G table\n");
3771                ret = -ENOMEM;
3772                goto unmap_ctx;
3773        }
3774        edesc->qm_sg_bytes = qm_sg_bytes;
3775
3776        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3777        dpaa2_fl_set_final(in_fle, true);
3778        dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3779        dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3780        dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3781        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3782        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3783        dpaa2_fl_set_len(out_fle, digestsize);
3784
3785        req_ctx->flc = &ctx->flc[FINALIZE];
3786        req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3787        req_ctx->cbk = ahash_done_ctx_src;
3788        req_ctx->ctx = &req->base;
3789        req_ctx->edesc = edesc;
3790
3791        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3792        if (ret == -EINPROGRESS ||
3793            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3794                return ret;
3795
3796unmap_ctx:
3797        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3798        qi_cache_free(edesc);
3799        return ret;
3800}
3801
3802static int ahash_digest(struct ahash_request *req)
3803{
3804        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3805        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3806        struct caam_hash_state *state = ahash_request_ctx(req);
3807        struct caam_request *req_ctx = &state->caam_req;
3808        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3809        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3810        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3811                      GFP_KERNEL : GFP_ATOMIC;
3812        int digestsize = crypto_ahash_digestsize(ahash);
3813        int src_nents, mapped_nents;
3814        struct ahash_edesc *edesc;
3815        int ret = -ENOMEM;
3816
3817        state->buf_dma = 0;
3818
3819        src_nents = sg_nents_for_len(req->src, req->nbytes);
3820        if (src_nents < 0) {
3821                dev_err(ctx->dev, "Invalid number of src SG.\n");
3822                return src_nents;
3823        }
3824
3825        if (src_nents) {
3826                mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3827                                          DMA_TO_DEVICE);
3828                if (!mapped_nents) {
3829                        dev_err(ctx->dev, "unable to map source for DMA\n");
3830                        return ret;
3831                }
3832        } else {
3833                mapped_nents = 0;
3834        }
3835
3836        /* allocate space for base edesc and link tables */
3837        edesc = qi_cache_zalloc(GFP_DMA | flags);
3838        if (!edesc) {
3839                dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3840                return ret;
3841        }
3842
3843        edesc->src_nents = src_nents;
3844        memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3845
3846        if (mapped_nents > 1) {
3847                int qm_sg_bytes;
3848                struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3849
3850                qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3851                sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3852                edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3853                                                  qm_sg_bytes, DMA_TO_DEVICE);
3854                if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3855                        dev_err(ctx->dev, "unable to map S/G table\n");
3856                        goto unmap;
3857                }
3858                edesc->qm_sg_bytes = qm_sg_bytes;
3859                dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3860                dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3861        } else {
3862                dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3863                dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3864        }
3865
3866        state->ctx_dma_len = digestsize;
3867        state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3868                                        DMA_FROM_DEVICE);
3869        if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3870                dev_err(ctx->dev, "unable to map ctx\n");
3871                state->ctx_dma = 0;
3872                goto unmap;
3873        }
3874
3875        dpaa2_fl_set_final(in_fle, true);
3876        dpaa2_fl_set_len(in_fle, req->nbytes);
3877        dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3878        dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3879        dpaa2_fl_set_len(out_fle, digestsize);
3880
3881        req_ctx->flc = &ctx->flc[DIGEST];
3882        req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3883        req_ctx->cbk = ahash_done;
3884        req_ctx->ctx = &req->base;
3885        req_ctx->edesc = edesc;
3886        ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3887        if (ret == -EINPROGRESS ||
3888            (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3889                return ret;
3890
3891unmap:
3892        ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3893        qi_cache_free(edesc);
3894        return ret;
3895}
3896
3897static int ahash_final_no_ctx(struct ahash_request *req)
3898{
3899        struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3900        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
3901        struct caam_hash_state *state = ahash_request_ctx(req);
3902        struct caam_request *req_ctx = &state->caam_req;
3903        struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3904        struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3905        gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3906                      GFP_KERNEL : GFP_ATOMIC;
3907        u8 *buf = state->buf;
3908        int buflen = state->buflen;
3909        int digestsize = crypto_ahash_digestsize(ahash);
3910        struct ahash_edesc *edesc;
3911        int ret = -ENOMEM;
3912
3913        /* allocate space for base edesc and link tables */
3914        edesc = qi_cache_zalloc