linux/drivers/crypto/nx/nx-sha256.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * SHA-256 routines supporting the Power 7+ Nest Accelerators driver
   4 *
   5 * Copyright (C) 2011-2012 International Business Machines Inc.
   6 *
   7 * Author: Kent Yoder <yoder1@us.ibm.com>
   8 */
   9
  10#include <crypto/internal/hash.h>
  11#include <crypto/sha2.h>
  12#include <linux/module.h>
  13#include <asm/vio.h>
  14#include <asm/byteorder.h>
  15
  16#include "nx_csbcpb.h"
  17#include "nx.h"
  18
  19struct sha256_state_be {
  20        __be32 state[SHA256_DIGEST_SIZE / 4];
  21        u64 count;
  22        u8 buf[SHA256_BLOCK_SIZE];
  23};
  24
  25static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
  26{
  27        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
  28        int err;
  29
  30        err = nx_crypto_ctx_sha_init(tfm);
  31        if (err)
  32                return err;
  33
  34        nx_ctx_init(nx_ctx, HCOP_FC_SHA);
  35
  36        nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
  37
  38        NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
  39
  40        return 0;
  41}
  42
  43static int nx_sha256_init(struct shash_desc *desc) {
  44        struct sha256_state_be *sctx = shash_desc_ctx(desc);
  45
  46        memset(sctx, 0, sizeof *sctx);
  47
  48        sctx->state[0] = __cpu_to_be32(SHA256_H0);
  49        sctx->state[1] = __cpu_to_be32(SHA256_H1);
  50        sctx->state[2] = __cpu_to_be32(SHA256_H2);
  51        sctx->state[3] = __cpu_to_be32(SHA256_H3);
  52        sctx->state[4] = __cpu_to_be32(SHA256_H4);
  53        sctx->state[5] = __cpu_to_be32(SHA256_H5);
  54        sctx->state[6] = __cpu_to_be32(SHA256_H6);
  55        sctx->state[7] = __cpu_to_be32(SHA256_H7);
  56        sctx->count = 0;
  57
  58        return 0;
  59}
  60
  61static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
  62                            unsigned int len)
  63{
  64        struct sha256_state_be *sctx = shash_desc_ctx(desc);
  65        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
  66        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
  67        struct nx_sg *out_sg;
  68        u64 to_process = 0, leftover, total;
  69        unsigned long irq_flags;
  70        int rc = 0;
  71        int data_len;
  72        u32 max_sg_len;
  73        u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
  74
  75        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
  76
  77        /* 2 cases for total data len:
  78         *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
  79         *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
  80         */
  81        total = (sctx->count % SHA256_BLOCK_SIZE) + len;
  82        if (total < SHA256_BLOCK_SIZE) {
  83                memcpy(sctx->buf + buf_len, data, len);
  84                sctx->count += len;
  85                goto out;
  86        }
  87
  88        memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
  89        NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
  90        NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
  91
  92        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
  93                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
  94        max_sg_len = min_t(u64, max_sg_len,
  95                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
  96
  97        data_len = SHA256_DIGEST_SIZE;
  98        out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
  99                                  &data_len, max_sg_len);
 100        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 101
 102        if (data_len != SHA256_DIGEST_SIZE) {
 103                rc = -EINVAL;
 104                goto out;
 105        }
 106
 107        do {
 108                int used_sgs = 0;
 109                struct nx_sg *in_sg = nx_ctx->in_sg;
 110
 111                if (buf_len) {
 112                        data_len = buf_len;
 113                        in_sg = nx_build_sg_list(in_sg,
 114                                                 (u8 *) sctx->buf,
 115                                                 &data_len,
 116                                                 max_sg_len);
 117
 118                        if (data_len != buf_len) {
 119                                rc = -EINVAL;
 120                                goto out;
 121                        }
 122                        used_sgs = in_sg - nx_ctx->in_sg;
 123                }
 124
 125                /* to_process: SHA256_BLOCK_SIZE aligned chunk to be
 126                 * processed in this iteration. This value is restricted
 127                 * by sg list limits and number of sgs we already used
 128                 * for leftover data. (see above)
 129                 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
 130                 * but because data may not be aligned, we need to account
 131                 * for that too. */
 132                to_process = min_t(u64, total,
 133                        (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
 134                to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
 135
 136                data_len = to_process - buf_len;
 137                in_sg = nx_build_sg_list(in_sg, (u8 *) data,
 138                                         &data_len, max_sg_len);
 139
 140                nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 141
 142                to_process = data_len + buf_len;
 143                leftover = total - to_process;
 144
 145                /*
 146                 * we've hit the nx chip previously and we're updating
 147                 * again, so copy over the partial digest.
 148                 */
 149                memcpy(csbcpb->cpb.sha256.input_partial_digest,
 150                               csbcpb->cpb.sha256.message_digest,
 151                               SHA256_DIGEST_SIZE);
 152
 153                if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
 154                        rc = -EINVAL;
 155                        goto out;
 156                }
 157
 158                rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 159                if (rc)
 160                        goto out;
 161
 162                atomic_inc(&(nx_ctx->stats->sha256_ops));
 163
 164                total -= to_process;
 165                data += to_process - buf_len;
 166                buf_len = 0;
 167
 168        } while (leftover >= SHA256_BLOCK_SIZE);
 169
 170        /* copy the leftover back into the state struct */
 171        if (leftover)
 172                memcpy(sctx->buf, data, leftover);
 173
 174        sctx->count += len;
 175        memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 176out:
 177        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 178        return rc;
 179}
 180
 181static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 182{
 183        struct sha256_state_be *sctx = shash_desc_ctx(desc);
 184        struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 185        struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
 186        struct nx_sg *in_sg, *out_sg;
 187        unsigned long irq_flags;
 188        u32 max_sg_len;
 189        int rc = 0;
 190        int len;
 191
 192        spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 193
 194        max_sg_len = min_t(u64, nx_ctx->ap->sglen,
 195                        nx_driver.of.max_sg_len/sizeof(struct nx_sg));
 196        max_sg_len = min_t(u64, max_sg_len,
 197                        nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 198
 199        /* final is represented by continuing the operation and indicating that
 200         * this is not an intermediate operation */
 201        if (sctx->count >= SHA256_BLOCK_SIZE) {
 202                /* we've hit the nx chip previously, now we're finalizing,
 203                 * so copy over the partial digest */
 204                memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
 205                NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 206                NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 207        } else {
 208                NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 209                NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
 210        }
 211
 212        csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
 213
 214        len = sctx->count & (SHA256_BLOCK_SIZE - 1);
 215        in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
 216                                 &len, max_sg_len);
 217
 218        if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
 219                rc = -EINVAL;
 220                goto out;
 221        }
 222
 223        len = SHA256_DIGEST_SIZE;
 224        out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
 225
 226        if (len != SHA256_DIGEST_SIZE) {
 227                rc = -EINVAL;
 228                goto out;
 229        }
 230
 231        nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 232        nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 233        if (!nx_ctx->op.outlen) {
 234                rc = -EINVAL;
 235                goto out;
 236        }
 237
 238        rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0);
 239        if (rc)
 240                goto out;
 241
 242        atomic_inc(&(nx_ctx->stats->sha256_ops));
 243
 244        atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
 245        memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 246out:
 247        spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 248        return rc;
 249}
 250
 251static int nx_sha256_export(struct shash_desc *desc, void *out)
 252{
 253        struct sha256_state_be *sctx = shash_desc_ctx(desc);
 254
 255        memcpy(out, sctx, sizeof(*sctx));
 256
 257        return 0;
 258}
 259
 260static int nx_sha256_import(struct shash_desc *desc, const void *in)
 261{
 262        struct sha256_state_be *sctx = shash_desc_ctx(desc);
 263
 264        memcpy(sctx, in, sizeof(*sctx));
 265
 266        return 0;
 267}
 268
 269struct shash_alg nx_shash_sha256_alg = {
 270        .digestsize = SHA256_DIGEST_SIZE,
 271        .init       = nx_sha256_init,
 272        .update     = nx_sha256_update,
 273        .final      = nx_sha256_final,
 274        .export     = nx_sha256_export,
 275        .import     = nx_sha256_import,
 276        .descsize   = sizeof(struct sha256_state_be),
 277        .statesize  = sizeof(struct sha256_state_be),
 278        .base       = {
 279                .cra_name        = "sha256",
 280                .cra_driver_name = "sha256-nx",
 281                .cra_priority    = 300,
 282                .cra_blocksize   = SHA256_BLOCK_SIZE,
 283                .cra_module      = THIS_MODULE,
 284                .cra_ctxsize     = sizeof(struct nx_crypto_ctx),
 285                .cra_init        = nx_crypto_ctx_sha256_init,
 286                .cra_exit        = nx_crypto_ctx_exit,
 287        }
 288};
 289
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.