linux/drivers/crypto/atmel-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukr\xC3\xA9a Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/of_device.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/aes.h>
  39#include <crypto/xts.h>
  40#include <crypto/internal/aead.h>
  41#include <linux/platform_data/crypto-atmel.h>
  42#include <dt-bindings/dma/at91.h>
  43#include "atmel-aes-regs.h"
  44
  45#define ATMEL_AES_PRIORITY      300
  46
  47#define ATMEL_AES_BUFFER_ORDER  2
  48#define ATMEL_AES_BUFFER_SIZE   (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
  49
  50#define CFB8_BLOCK_SIZE         1
  51#define CFB16_BLOCK_SIZE        2
  52#define CFB32_BLOCK_SIZE        4
  53#define CFB64_BLOCK_SIZE        8
  54
  55#define SIZE_IN_WORDS(x)        ((x) >> 2)
  56
  57/* AES flags */
  58/* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */
  59#define AES_FLAGS_ENCRYPT       AES_MR_CYPHER_ENC
  60#define AES_FLAGS_GTAGEN        AES_MR_GTAGEN
  61#define AES_FLAGS_OPMODE_MASK   (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
  62#define AES_FLAGS_ECB           AES_MR_OPMOD_ECB
  63#define AES_FLAGS_CBC           AES_MR_OPMOD_CBC
  64#define AES_FLAGS_OFB           AES_MR_OPMOD_OFB
  65#define AES_FLAGS_CFB128        (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
  66#define AES_FLAGS_CFB64         (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
  67#define AES_FLAGS_CFB32         (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
  68#define AES_FLAGS_CFB16         (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
  69#define AES_FLAGS_CFB8          (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
  70#define AES_FLAGS_CTR           AES_MR_OPMOD_CTR
  71#define AES_FLAGS_GCM           AES_MR_OPMOD_GCM
  72#define AES_FLAGS_XTS           AES_MR_OPMOD_XTS
  73
  74#define AES_FLAGS_MODE_MASK     (AES_FLAGS_OPMODE_MASK |        \
  75                                 AES_FLAGS_ENCRYPT |            \
  76                                 AES_FLAGS_GTAGEN)
  77
  78#define AES_FLAGS_INIT          BIT(2)
  79#define AES_FLAGS_BUSY          BIT(3)
  80#define AES_FLAGS_DUMP_REG      BIT(4)
  81
  82#define AES_FLAGS_PERSISTENT    (AES_FLAGS_INIT | AES_FLAGS_BUSY)
  83
  84#define ATMEL_AES_QUEUE_LENGTH  50
  85
  86#define ATMEL_AES_DMA_THRESHOLD         256
  87
  88
  89struct atmel_aes_caps {
  90        bool                    has_dualbuff;
  91        bool                    has_cfb64;
  92        bool                    has_ctr32;
  93        bool                    has_gcm;
  94        bool                    has_xts;
  95        u32                     max_burst_size;
  96};
  97
  98struct atmel_aes_dev;
  99
 100
 101typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *);
 102
 103
 104struct atmel_aes_base_ctx {
 105        struct atmel_aes_dev    *dd;
 106        atmel_aes_fn_t          start;
 107        int                     keylen;
 108        u32                     key[AES_KEYSIZE_256 / sizeof(u32)];
 109        u16                     block_size;
 110};
 111
 112struct atmel_aes_ctx {
 113        struct atmel_aes_base_ctx       base;
 114};
 115
 116struct atmel_aes_ctr_ctx {
 117        struct atmel_aes_base_ctx       base;
 118
 119        u32                     iv[AES_BLOCK_SIZE / sizeof(u32)];
 120        size_t                  offset;
 121        struct scatterlist      src[2];
 122        struct scatterlist      dst[2];
 123};
 124
 125struct atmel_aes_gcm_ctx {
 126        struct atmel_aes_base_ctx       base;
 127
 128        struct scatterlist      src[2];
 129        struct scatterlist      dst[2];
 130
 131        u32                     j0[AES_BLOCK_SIZE / sizeof(u32)];
 132        u32                     tag[AES_BLOCK_SIZE / sizeof(u32)];
 133        u32                     ghash[AES_BLOCK_SIZE / sizeof(u32)];
 134        size_t                  textlen;
 135
 136        const u32               *ghash_in;
 137        u32                     *ghash_out;
 138        atmel_aes_fn_t          ghash_resume;
 139};
 140
 141struct atmel_aes_xts_ctx {
 142        struct atmel_aes_base_ctx       base;
 143
 144        u32                     key2[AES_KEYSIZE_256 / sizeof(u32)];
 145};
 146
 147struct atmel_aes_reqctx {
 148        unsigned long           mode;
 149};
 150
 151struct atmel_aes_dma {
 152        struct dma_chan         *chan;
 153        struct scatterlist      *sg;
 154        int                     nents;
 155        unsigned int            remainder;
 156        unsigned int            sg_len;
 157};
 158
 159struct atmel_aes_dev {
 160        struct list_head        list;
 161        unsigned long           phys_base;
 162        void __iomem            *io_base;
 163
 164        struct crypto_async_request     *areq;
 165        struct atmel_aes_base_ctx       *ctx;
 166
 167        bool                    is_async;
 168        atmel_aes_fn_t          resume;
 169        atmel_aes_fn_t          cpu_transfer_complete;
 170
 171        struct device           *dev;
 172        struct clk              *iclk;
 173        int                     irq;
 174
 175        unsigned long           flags;
 176
 177        spinlock_t              lock;
 178        struct crypto_queue     queue;
 179
 180        struct tasklet_struct   done_task;
 181        struct tasklet_struct   queue_task;
 182
 183        size_t                  total;
 184        size_t                  datalen;
 185        u32                     *data;
 186
 187        struct atmel_aes_dma    src;
 188        struct atmel_aes_dma    dst;
 189
 190        size_t                  buflen;
 191        void                    *buf;
 192        struct scatterlist      aligned_sg;
 193        struct scatterlist      *real_dst;
 194
 195        struct atmel_aes_caps   caps;
 196
 197        u32                     hw_version;
 198};
 199
 200struct atmel_aes_drv {
 201        struct list_head        dev_list;
 202        spinlock_t              lock;
 203};
 204
 205static struct atmel_aes_drv atmel_aes = {
 206        .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 207        .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 208};
 209
 210#ifdef VERBOSE_DEBUG
 211static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz)
 212{
 213        switch (offset) {
 214        case AES_CR:
 215                return "CR";
 216
 217        case AES_MR:
 218                return "MR";
 219
 220        case AES_ISR:
 221                return "ISR";
 222
 223        case AES_IMR:
 224                return "IMR";
 225
 226        case AES_IER:
 227                return "IER";
 228
 229        case AES_IDR:
 230                return "IDR";
 231
 232        case AES_KEYWR(0):
 233        case AES_KEYWR(1):
 234        case AES_KEYWR(2):
 235        case AES_KEYWR(3):
 236        case AES_KEYWR(4):
 237        case AES_KEYWR(5):
 238        case AES_KEYWR(6):
 239        case AES_KEYWR(7):
 240                snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2);
 241                break;
 242
 243        case AES_IDATAR(0):
 244        case AES_IDATAR(1):
 245        case AES_IDATAR(2):
 246        case AES_IDATAR(3):
 247                snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2);
 248                break;
 249
 250        case AES_ODATAR(0):
 251        case AES_ODATAR(1):
 252        case AES_ODATAR(2):
 253        case AES_ODATAR(3):
 254                snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2);
 255                break;
 256
 257        case AES_IVR(0):
 258        case AES_IVR(1):
 259        case AES_IVR(2):
 260        case AES_IVR(3):
 261                snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2);
 262                break;
 263
 264        case AES_AADLENR:
 265                return "AADLENR";
 266
 267        case AES_CLENR:
 268                return "CLENR";
 269
 270        case AES_GHASHR(0):
 271        case AES_GHASHR(1):
 272        case AES_GHASHR(2):
 273        case AES_GHASHR(3):
 274                snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2);
 275                break;
 276
 277        case AES_TAGR(0):
 278        case AES_TAGR(1):
 279        case AES_TAGR(2):
 280        case AES_TAGR(3):
 281                snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2);
 282                break;
 283
 284        case AES_CTRR:
 285                return "CTRR";
 286
 287        case AES_GCMHR(0):
 288        case AES_GCMHR(1):
 289        case AES_GCMHR(2):
 290        case AES_GCMHR(3):
 291                snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2);
 292                break;
 293
 294        case AES_TWR(0):
 295        case AES_TWR(1):
 296        case AES_TWR(2):
 297        case AES_TWR(3):
 298                snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2);
 299                break;
 300
 301        case AES_ALPHAR(0):
 302        case AES_ALPHAR(1):
 303        case AES_ALPHAR(2):
 304        case AES_ALPHAR(3):
 305                snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2);
 306                break;
 307
 308        default:
 309                snprintf(tmp, sz, "0x%02x", offset);
 310                break;
 311        }
 312
 313        return tmp;
 314}
 315#endif /* VERBOSE_DEBUG */
 316
 317/* Shared functions */
 318
 319static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 320{
 321        u32 value = readl_relaxed(dd->io_base + offset);
 322
 323#ifdef VERBOSE_DEBUG
 324        if (dd->flags & AES_FLAGS_DUMP_REG) {
 325                char tmp[16];
 326
 327                dev_vdbg(dd->dev, "read 0x%08x from %s\n", value,
 328                         atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 329        }
 330#endif /* VERBOSE_DEBUG */
 331
 332        return value;
 333}
 334
 335static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 336                                        u32 offset, u32 value)
 337{
 338#ifdef VERBOSE_DEBUG
 339        if (dd->flags & AES_FLAGS_DUMP_REG) {
 340                char tmp[16];
 341
 342                dev_vdbg(dd->dev, "write 0x%08x into %s\n", value,
 343                         atmel_aes_reg_name(offset, tmp, sizeof(tmp)));
 344        }
 345#endif /* VERBOSE_DEBUG */
 346
 347        writel_relaxed(value, dd->io_base + offset);
 348}
 349
 350static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 351                                        u32 *value, int count)
 352{
 353        for (; count--; value++, offset += 4)
 354                *value = atmel_aes_read(dd, offset);
 355}
 356
 357static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 358                              const u32 *value, int count)
 359{
 360        for (; count--; value++, offset += 4)
 361                atmel_aes_write(dd, offset, *value);
 362}
 363
 364static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset,
 365                                        u32 *value)
 366{
 367        atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 368}
 369
 370static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset,
 371                                         const u32 *value)
 372{
 373        atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE));
 374}
 375
 376static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd,
 377                                                atmel_aes_fn_t resume)
 378{
 379        u32 isr = atmel_aes_read(dd, AES_ISR);
 380
 381        if (unlikely(isr & AES_INT_DATARDY))
 382                return resume(dd);
 383
 384        dd->resume = resume;
 385        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 386        return -EINPROGRESS;
 387}
 388
 389static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
 390{
 391        len &= block_size - 1;
 392        return len ? block_size - len : 0;
 393}
 394
 395static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 396{
 397        struct atmel_aes_dev *aes_dd = NULL;
 398        struct atmel_aes_dev *tmp;
 399
 400        spin_lock_bh(&atmel_aes.lock);
 401        if (!ctx->dd) {
 402                list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 403                        aes_dd = tmp;
 404                        break;
 405                }
 406                ctx->dd = aes_dd;
 407        } else {
 408                aes_dd = ctx->dd;
 409        }
 410
 411        spin_unlock_bh(&atmel_aes.lock);
 412
 413        return aes_dd;
 414}
 415
 416static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 417{
 418        int err;
 419
 420        err = clk_enable(dd->iclk);
 421        if (err)
 422                return err;
 423
 424        if (!(dd->flags & AES_FLAGS_INIT)) {
 425                atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 426                atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 427                dd->flags |= AES_FLAGS_INIT;
 428        }
 429
 430        return 0;
 431}
 432
 433static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 434{
 435        return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 436}
 437
 438static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 439{
 440        int err;
 441
 442        err = atmel_aes_hw_init(dd);
 443        if (err)
 444                return err;
 445
 446        dd->hw_version = atmel_aes_get_version(dd);
 447
 448        dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 449
 450        clk_disable(dd->iclk);
 451        return 0;
 452}
 453
 454static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd,
 455                                      const struct atmel_aes_reqctx *rctx)
 456{
 457        /* Clear all but persistent flags and set request flags. */
 458        dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode;
 459}
 460
 461static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 462{
 463        return (dd->flags & AES_FLAGS_ENCRYPT);
 464}
 465
 466static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 467{
 468        clk_disable(dd->iclk);
 469        dd->flags &= ~AES_FLAGS_BUSY;
 470
 471        if (dd->is_async)
 472                dd->areq->complete(dd->areq, err);
 473
 474        tasklet_schedule(&dd->queue_task);
 475
 476        return err;
 477}
 478
 479static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma,
 480                                     const u32 *iv, const u32 *key, int keylen)
 481{
 482        u32 valmr = 0;
 483
 484        /* MR register must be set before IV registers */
 485        if (keylen == AES_KEYSIZE_128)
 486                valmr |= AES_MR_KEYSIZE_128;
 487        else if (keylen == AES_KEYSIZE_192)
 488                valmr |= AES_MR_KEYSIZE_192;
 489        else
 490                valmr |= AES_MR_KEYSIZE_256;
 491
 492        valmr |= dd->flags & AES_FLAGS_MODE_MASK;
 493
 494        if (use_dma) {
 495                valmr |= AES_MR_SMOD_IDATAR0;
 496                if (dd->caps.has_dualbuff)
 497                        valmr |= AES_MR_DUALBUFF;
 498        } else {
 499                valmr |= AES_MR_SMOD_AUTO;
 500        }
 501
 502        atmel_aes_write(dd, AES_MR, valmr);
 503
 504        atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen));
 505
 506        if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB)
 507                atmel_aes_write_block(dd, AES_IVR(0), iv);
 508}
 509
 510static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma,
 511                                        const u32 *iv)
 512
 513{
 514        atmel_aes_write_ctrl_key(dd, use_dma, iv,
 515                                 dd->ctx->key, dd->ctx->keylen);
 516}
 517
 518/* CPU transfer */
 519
 520static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd)
 521{
 522        int err = 0;
 523        u32 isr;
 524
 525        for (;;) {
 526                atmel_aes_read_block(dd, AES_ODATAR(0), dd->data);
 527                dd->data += 4;
 528                dd->datalen -= AES_BLOCK_SIZE;
 529
 530                if (dd->datalen < AES_BLOCK_SIZE)
 531                        break;
 532
 533                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 534
 535                isr = atmel_aes_read(dd, AES_ISR);
 536                if (!(isr & AES_INT_DATARDY)) {
 537                        dd->resume = atmel_aes_cpu_transfer;
 538                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 539                        return -EINPROGRESS;
 540                }
 541        }
 542
 543        if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 544                                 dd->buf, dd->total))
 545                err = -EINVAL;
 546
 547        if (err)
 548                return atmel_aes_complete(dd, err);
 549
 550        return dd->cpu_transfer_complete(dd);
 551}
 552
 553static int atmel_aes_cpu_start(struct atmel_aes_dev *dd,
 554                               struct scatterlist *src,
 555                               struct scatterlist *dst,
 556                               size_t len,
 557                               atmel_aes_fn_t resume)
 558{
 559        size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE);
 560
 561        if (unlikely(len == 0))
 562                return -EINVAL;
 563
 564        sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 565
 566        dd->total = len;
 567        dd->real_dst = dst;
 568        dd->cpu_transfer_complete = resume;
 569        dd->datalen = len + padlen;
 570        dd->data = (u32 *)dd->buf;
 571        atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
 572        return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer);
 573}
 574
 575
 576/* DMA transfer */
 577
 578static void atmel_aes_dma_callback(void *data);
 579
 580static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd,
 581                                    struct scatterlist *sg,
 582                                    size_t len,
 583                                    struct atmel_aes_dma *dma)
 584{
 585        int nents;
 586
 587        if (!IS_ALIGNED(len, dd->ctx->block_size))
 588                return false;
 589
 590        for (nents = 0; sg; sg = sg_next(sg), ++nents) {
 591                if (!IS_ALIGNED(sg->offset, sizeof(u32)))
 592                        return false;
 593
 594                if (len <= sg->length) {
 595                        if (!IS_ALIGNED(len, dd->ctx->block_size))
 596                                return false;
 597
 598                        dma->nents = nents+1;
 599                        dma->remainder = sg->length - len;
 600                        sg->length = len;
 601                        return true;
 602                }
 603
 604                if (!IS_ALIGNED(sg->length, dd->ctx->block_size))
 605                        return false;
 606
 607                len -= sg->length;
 608        }
 609
 610        return false;
 611}
 612
 613static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma)
 614{
 615        struct scatterlist *sg = dma->sg;
 616        int nents = dma->nents;
 617
 618        if (!dma->remainder)
 619                return;
 620
 621        while (--nents > 0 && sg)
 622                sg = sg_next(sg);
 623
 624        if (!sg)
 625                return;
 626
 627        sg->length += dma->remainder;
 628}
 629
 630static int atmel_aes_map(struct atmel_aes_dev *dd,
 631                         struct scatterlist *src,
 632                         struct scatterlist *dst,
 633                         size_t len)
 634{
 635        bool src_aligned, dst_aligned;
 636        size_t padlen;
 637
 638        dd->total = len;
 639        dd->src.sg = src;
 640        dd->dst.sg = dst;
 641        dd->real_dst = dst;
 642
 643        src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src);
 644        if (src == dst)
 645                dst_aligned = src_aligned;
 646        else
 647                dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst);
 648        if (!src_aligned || !dst_aligned) {
 649                padlen = atmel_aes_padlen(len, dd->ctx->block_size);
 650
 651                if (dd->buflen < len + padlen)
 652                        return -ENOMEM;
 653
 654                if (!src_aligned) {
 655                        sg_copy_to_buffer(src, sg_nents(src), dd->buf, len);
 656                        dd->src.sg = &dd->aligned_sg;
 657                        dd->src.nents = 1;
 658                        dd->src.remainder = 0;
 659                }
 660
 661                if (!dst_aligned) {
 662                        dd->dst.sg = &dd->aligned_sg;
 663                        dd->dst.nents = 1;
 664                        dd->dst.remainder = 0;
 665                }
 666
 667                sg_init_table(&dd->aligned_sg, 1);
 668                sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen);
 669        }
 670
 671        if (dd->src.sg == dd->dst.sg) {
 672                dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 673                                            DMA_BIDIRECTIONAL);
 674                dd->dst.sg_len = dd->src.sg_len;
 675                if (!dd->src.sg_len)
 676                        return -EFAULT;
 677        } else {
 678                dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents,
 679                                            DMA_TO_DEVICE);
 680                if (!dd->src.sg_len)
 681                        return -EFAULT;
 682
 683                dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 684                                            DMA_FROM_DEVICE);
 685                if (!dd->dst.sg_len) {
 686                        dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 687                                     DMA_TO_DEVICE);
 688                        return -EFAULT;
 689                }
 690        }
 691
 692        return 0;
 693}
 694
 695static void atmel_aes_unmap(struct atmel_aes_dev *dd)
 696{
 697        if (dd->src.sg == dd->dst.sg) {
 698                dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 699                             DMA_BIDIRECTIONAL);
 700
 701                if (dd->src.sg != &dd->aligned_sg)
 702                        atmel_aes_restore_sg(&dd->src);
 703        } else {
 704                dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents,
 705                             DMA_FROM_DEVICE);
 706
 707                if (dd->dst.sg != &dd->aligned_sg)
 708                        atmel_aes_restore_sg(&dd->dst);
 709
 710                dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents,
 711                             DMA_TO_DEVICE);
 712
 713                if (dd->src.sg != &dd->aligned_sg)
 714                        atmel_aes_restore_sg(&dd->src);
 715        }
 716
 717        if (dd->dst.sg == &dd->aligned_sg)
 718                sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst),
 719                                    dd->buf, dd->total);
 720}
 721
 722static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd,
 723                                        enum dma_slave_buswidth addr_width,
 724                                        enum dma_transfer_direction dir,
 725                                        u32 maxburst)
 726{
 727        struct dma_async_tx_descriptor *desc;
 728        struct dma_slave_config config;
 729        dma_async_tx_callback callback;
 730        struct atmel_aes_dma *dma;
 731        int err;
 732
 733        memset(&config, 0, sizeof(config));
 734        config.direction = dir;
 735        config.src_addr_width = addr_width;
 736        config.dst_addr_width = addr_width;
 737        config.src_maxburst = maxburst;
 738        config.dst_maxburst = maxburst;
 739
 740        switch (dir) {
 741        case DMA_MEM_TO_DEV:
 742                dma = &dd->src;
 743                callback = NULL;
 744                config.dst_addr = dd->phys_base + AES_IDATAR(0);
 745                break;
 746
 747        case DMA_DEV_TO_MEM:
 748                dma = &dd->dst;
 749                callback = atmel_aes_dma_callback;
 750                config.src_addr = dd->phys_base + AES_ODATAR(0);
 751                break;
 752
 753        default:
 754                return -EINVAL;
 755        }
 756
 757        err = dmaengine_slave_config(dma->chan, &config);
 758        if (err)
 759                return err;
 760
 761        desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir,
 762                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 763        if (!desc)
 764                return -ENOMEM;
 765
 766        desc->callback = callback;
 767        desc->callback_param = dd;
 768        dmaengine_submit(desc);
 769        dma_async_issue_pending(dma->chan);
 770
 771        return 0;
 772}
 773
 774static void atmel_aes_dma_transfer_stop(struct atmel_aes_dev *dd,
 775                                        enum dma_transfer_direction dir)
 776{
 777        struct atmel_aes_dma *dma;
 778
 779        switch (dir) {
 780        case DMA_MEM_TO_DEV:
 781                dma = &dd->src;
 782                break;
 783
 784        case DMA_DEV_TO_MEM:
 785                dma = &dd->dst;
 786                break;
 787
 788        default:
 789                return;
 790        }
 791
 792        dmaengine_terminate_all(dma->chan);
 793}
 794
 795static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
 796                               struct scatterlist *src,
 797                               struct scatterlist *dst,
 798                               size_t len,
 799                               atmel_aes_fn_t resume)
 800{
 801        enum dma_slave_buswidth addr_width;
 802        u32 maxburst;
 803        int err;
 804
 805        switch (dd->ctx->block_size) {
 806        case CFB8_BLOCK_SIZE:
 807                addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
 808                maxburst = 1;
 809                break;
 810
 811        case CFB16_BLOCK_SIZE:
 812                addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
 813                maxburst = 1;
 814                break;
 815
 816        case CFB32_BLOCK_SIZE:
 817        case CFB64_BLOCK_SIZE:
 818                addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 819                maxburst = 1;
 820                break;
 821
 822        case AES_BLOCK_SIZE:
 823                addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 824                maxburst = dd->caps.max_burst_size;
 825                break;
 826
 827        default:
 828                err = -EINVAL;
 829                goto exit;
 830        }
 831
 832        err = atmel_aes_map(dd, src, dst, len);
 833        if (err)
 834                goto exit;
 835
 836        dd->resume = resume;
 837
 838        /* Set output DMA transfer first */
 839        err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM,
 840                                           maxburst);
 841        if (err)
 842                goto unmap;
 843
 844        /* Then set input DMA transfer */
 845        err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV,
 846                                           maxburst);
 847        if (err)
 848                goto output_transfer_stop;
 849
 850        return -EINPROGRESS;
 851
 852output_transfer_stop:
 853        atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 854unmap:
 855        atmel_aes_unmap(dd);
 856exit:
 857        return atmel_aes_complete(dd, err);
 858}
 859
 860static void atmel_aes_dma_stop(struct atmel_aes_dev *dd)
 861{
 862        atmel_aes_dma_transfer_stop(dd, DMA_MEM_TO_DEV);
 863        atmel_aes_dma_transfer_stop(dd, DMA_DEV_TO_MEM);
 864        atmel_aes_unmap(dd);
 865}
 866
 867static void atmel_aes_dma_callback(void *data)
 868{
 869        struct atmel_aes_dev *dd = data;
 870
 871        atmel_aes_dma_stop(dd);
 872        dd->is_async = true;
 873        (void)dd->resume(dd);
 874}
 875
 876static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 877                                  struct crypto_async_request *new_areq)
 878{
 879        struct crypto_async_request *areq, *backlog;
 880        struct atmel_aes_base_ctx *ctx;
 881        unsigned long flags;
 882        int err, ret = 0;
 883
 884        spin_lock_irqsave(&dd->lock, flags);
 885        if (new_areq)
 886                ret = crypto_enqueue_request(&dd->queue, new_areq);
 887        if (dd->flags & AES_FLAGS_BUSY) {
 888                spin_unlock_irqrestore(&dd->lock, flags);
 889                return ret;
 890        }
 891        backlog = crypto_get_backlog(&dd->queue);
 892        areq = crypto_dequeue_request(&dd->queue);
 893        if (areq)
 894                dd->flags |= AES_FLAGS_BUSY;
 895        spin_unlock_irqrestore(&dd->lock, flags);
 896
 897        if (!areq)
 898                return ret;
 899
 900        if (backlog)
 901                backlog->complete(backlog, -EINPROGRESS);
 902
 903        ctx = crypto_tfm_ctx(areq->tfm);
 904
 905        dd->areq = areq;
 906        dd->ctx = ctx;
 907        dd->is_async = (areq != new_areq);
 908
 909        err = ctx->start(dd);
 910        return (dd->is_async) ? ret : err;
 911}
 912
 913
 914/* AES async block ciphers */
 915
 916static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd)
 917{
 918        return atmel_aes_complete(dd, 0);
 919}
 920
 921static int atmel_aes_start(struct atmel_aes_dev *dd)
 922{
 923        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 924        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 925        bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD ||
 926                        dd->ctx->block_size != AES_BLOCK_SIZE);
 927        int err;
 928
 929        atmel_aes_set_mode(dd, rctx);
 930
 931        err = atmel_aes_hw_init(dd);
 932        if (err)
 933                return atmel_aes_complete(dd, err);
 934
 935        atmel_aes_write_ctrl(dd, use_dma, req->info);
 936        if (use_dma)
 937                return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
 938                                           atmel_aes_transfer_complete);
 939
 940        return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
 941                                   atmel_aes_transfer_complete);
 942}
 943
 944static inline struct atmel_aes_ctr_ctx *
 945atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx)
 946{
 947        return container_of(ctx, struct atmel_aes_ctr_ctx, base);
 948}
 949
 950static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
 951{
 952        struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
 953        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
 954        struct scatterlist *src, *dst;
 955        u32 ctr, blocks;
 956        size_t datalen;
 957        bool use_dma, fragmented = false;
 958
 959        /* Check for transfer completion. */
 960        ctx->offset += dd->total;
 961        if (ctx->offset >= req->nbytes)
 962                return atmel_aes_transfer_complete(dd);
 963
 964        /* Compute data length. */
 965        datalen = req->nbytes - ctx->offset;
 966        blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
 967        ctr = be32_to_cpu(ctx->iv[3]);
 968        if (dd->caps.has_ctr32) {
 969                /* Check 32bit counter overflow. */
 970                u32 start = ctr;
 971                u32 end = start + blocks - 1;
 972
 973                if (end < start) {
 974                        ctr |= 0xffffffff;
 975                        datalen = AES_BLOCK_SIZE * -start;
 976                        fragmented = true;
 977                }
 978        } else {
 979                /* Check 16bit counter overflow. */
 980                u16 start = ctr & 0xffff;
 981                u16 end = start + (u16)blocks - 1;
 982
 983                if (blocks >> 16 || end < start) {
 984                        ctr |= 0xffff;
 985                        datalen = AES_BLOCK_SIZE * (0x10000-start);
 986                        fragmented = true;
 987                }
 988        }
 989        use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
 990
 991        /* Jump to offset. */
 992        src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset);
 993        dst = ((req->src == req->dst) ? src :
 994               scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset));
 995
 996        /* Configure hardware. */
 997        atmel_aes_write_ctrl(dd, use_dma, ctx->iv);
 998        if (unlikely(fragmented)) {
 999                /*
1000                 * Increment the counter manually to cope with the hardware
1001                 * counter overflow.
1002                 */
1003                ctx->iv[3] = cpu_to_be32(ctr);
1004                crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE);
1005        }
1006
1007        if (use_dma)
1008                return atmel_aes_dma_start(dd, src, dst, datalen,
1009                                           atmel_aes_ctr_transfer);
1010
1011        return atmel_aes_cpu_start(dd, src, dst, datalen,
1012                                   atmel_aes_ctr_transfer);
1013}
1014
1015static int atmel_aes_ctr_start(struct atmel_aes_dev *dd)
1016{
1017        struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
1018        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1019        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
1020        int err;
1021
1022        atmel_aes_set_mode(dd, rctx);
1023
1024        err = atmel_aes_hw_init(dd);
1025        if (err)
1026                return atmel_aes_complete(dd, err);
1027
1028        memcpy(ctx->iv, req->info, AES_BLOCK_SIZE);
1029        ctx->offset = 0;
1030        dd->total = 0;
1031        return atmel_aes_ctr_transfer(dd);
1032}
1033
1034static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
1035{
1036        struct atmel_aes_base_ctx *ctx;
1037        struct atmel_aes_reqctx *rctx;
1038        struct atmel_aes_dev *dd;
1039
1040        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
1041        switch (mode & AES_FLAGS_OPMODE_MASK) {
1042        case AES_FLAGS_CFB8:
1043                ctx->block_size = CFB8_BLOCK_SIZE;
1044                break;
1045
1046        case AES_FLAGS_CFB16:
1047                ctx->block_size = CFB16_BLOCK_SIZE;
1048                break;
1049
1050        case AES_FLAGS_CFB32:
1051                ctx->block_size = CFB32_BLOCK_SIZE;
1052                break;
1053
1054        case AES_FLAGS_CFB64:
1055                ctx->block_size = CFB64_BLOCK_SIZE;
1056                break;
1057
1058        default:
1059                ctx->block_size = AES_BLOCK_SIZE;
1060                break;
1061        }
1062
1063        dd = atmel_aes_find_dev(ctx);
1064        if (!dd)
1065                return -ENODEV;
1066
1067        rctx = ablkcipher_request_ctx(req);
1068        rctx->mode = mode;
1069
1070        return atmel_aes_handle_queue(dd, &req->base);
1071}
1072
1073static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1074                           unsigned int keylen)
1075{
1076        struct atmel_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1077
1078        if (keylen != AES_KEYSIZE_128 &&
1079            keylen != AES_KEYSIZE_192 &&
1080            keylen != AES_KEYSIZE_256) {
1081                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1082                return -EINVAL;
1083        }
1084
1085        memcpy(ctx->key, key, keylen);
1086        ctx->keylen = keylen;
1087
1088        return 0;
1089}
1090
1091static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
1092{
1093        return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1094}
1095
1096static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
1097{
1098        return atmel_aes_crypt(req, AES_FLAGS_ECB);
1099}
1100
1101static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
1102{
1103        return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT);
1104}
1105
1106static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
1107{
1108        return atmel_aes_crypt(req, AES_FLAGS_CBC);
1109}
1110
1111static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
1112{
1113        return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
1114}
1115
1116static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
1117{
1118        return atmel_aes_crypt(req, AES_FLAGS_OFB);
1119}
1120
1121static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
1122{
1123        return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
1124}
1125
1126static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
1127{
1128        return atmel_aes_crypt(req, AES_FLAGS_CFB128);
1129}
1130
1131static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
1132{
1133        return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
1134}
1135
1136static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
1137{
1138        return atmel_aes_crypt(req, AES_FLAGS_CFB64);
1139}
1140
1141static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
1142{
1143        return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
1144}
1145
1146static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
1147{
1148        return atmel_aes_crypt(req, AES_FLAGS_CFB32);
1149}
1150
1151static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
1152{
1153        return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
1154}
1155
1156static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
1157{
1158        return atmel_aes_crypt(req, AES_FLAGS_CFB16);
1159}
1160
1161static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
1162{
1163        return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
1164}
1165
1166static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
1167{
1168        return atmel_aes_crypt(req, AES_FLAGS_CFB8);
1169}
1170
1171static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
1172{
1173        return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
1174}
1175
1176static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
1177{
1178        return atmel_aes_crypt(req, AES_FLAGS_CTR);
1179}
1180
1181static int atmel_aes_cra_init(struct crypto_tfm *tfm)
1182{
1183        struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1184
1185        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1186        ctx->base.start = atmel_aes_start;
1187
1188        return 0;
1189}
1190
1191static int atmel_aes_ctr_cra_init(struct crypto_tfm *tfm)
1192{
1193        struct atmel_aes_ctx *ctx = crypto_tfm_ctx(tfm);
1194
1195        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1196        ctx->base.start = atmel_aes_ctr_start;
1197
1198        return 0;
1199}
1200
1201static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
1202{
1203}
1204
1205static struct crypto_alg aes_algs[] = {
1206{
1207        .cra_name               = "ecb(aes)",
1208        .cra_driver_name        = "atmel-ecb-aes",
1209        .cra_priority           = ATMEL_AES_PRIORITY,
1210        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1211        .cra_blocksize          = AES_BLOCK_SIZE,
1212        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1213        .cra_alignmask          = 0xf,
1214        .cra_type               = &crypto_ablkcipher_type,
1215        .cra_module             = THIS_MODULE,
1216        .cra_init               = atmel_aes_cra_init,
1217        .cra_exit               = atmel_aes_cra_exit,
1218        .cra_u.ablkcipher = {
1219                .min_keysize    = AES_MIN_KEY_SIZE,
1220                .max_keysize    = AES_MAX_KEY_SIZE,
1221                .setkey         = atmel_aes_setkey,
1222                .encrypt        = atmel_aes_ecb_encrypt,
1223                .decrypt        = atmel_aes_ecb_decrypt,
1224        }
1225},
1226{
1227        .cra_name               = "cbc(aes)",
1228        .cra_driver_name        = "atmel-cbc-aes",
1229        .cra_priority           = ATMEL_AES_PRIORITY,
1230        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1231        .cra_blocksize          = AES_BLOCK_SIZE,
1232        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1233        .cra_alignmask          = 0xf,
1234        .cra_type               = &crypto_ablkcipher_type,
1235        .cra_module             = THIS_MODULE,
1236        .cra_init               = atmel_aes_cra_init,
1237        .cra_exit               = atmel_aes_cra_exit,
1238        .cra_u.ablkcipher = {
1239                .min_keysize    = AES_MIN_KEY_SIZE,
1240                .max_keysize    = AES_MAX_KEY_SIZE,
1241                .ivsize         = AES_BLOCK_SIZE,
1242                .setkey         = atmel_aes_setkey,
1243                .encrypt        = atmel_aes_cbc_encrypt,
1244                .decrypt        = atmel_aes_cbc_decrypt,
1245        }
1246},
1247{
1248        .cra_name               = "ofb(aes)",
1249        .cra_driver_name        = "atmel-ofb-aes",
1250        .cra_priority           = ATMEL_AES_PRIORITY,
1251        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1252        .cra_blocksize          = AES_BLOCK_SIZE,
1253        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1254        .cra_alignmask          = 0xf,
1255        .cra_type               = &crypto_ablkcipher_type,
1256        .cra_module             = THIS_MODULE,
1257        .cra_init               = atmel_aes_cra_init,
1258        .cra_exit               = atmel_aes_cra_exit,
1259        .cra_u.ablkcipher = {
1260                .min_keysize    = AES_MIN_KEY_SIZE,
1261                .max_keysize    = AES_MAX_KEY_SIZE,
1262                .ivsize         = AES_BLOCK_SIZE,
1263                .setkey         = atmel_aes_setkey,
1264                .encrypt        = atmel_aes_ofb_encrypt,
1265                .decrypt        = atmel_aes_ofb_decrypt,
1266        }
1267},
1268{
1269        .cra_name               = "cfb(aes)",
1270        .cra_driver_name        = "atmel-cfb-aes",
1271        .cra_priority           = ATMEL_AES_PRIORITY,
1272        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1273        .cra_blocksize          = AES_BLOCK_SIZE,
1274        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1275        .cra_alignmask          = 0xf,
1276        .cra_type               = &crypto_ablkcipher_type,
1277        .cra_module             = THIS_MODULE,
1278        .cra_init               = atmel_aes_cra_init,
1279        .cra_exit               = atmel_aes_cra_exit,
1280        .cra_u.ablkcipher = {
1281                .min_keysize    = AES_MIN_KEY_SIZE,
1282                .max_keysize    = AES_MAX_KEY_SIZE,
1283                .ivsize         = AES_BLOCK_SIZE,
1284                .setkey         = atmel_aes_setkey,
1285                .encrypt        = atmel_aes_cfb_encrypt,
1286                .decrypt        = atmel_aes_cfb_decrypt,
1287        }
1288},
1289{
1290        .cra_name               = "cfb32(aes)",
1291        .cra_driver_name        = "atmel-cfb32-aes",
1292        .cra_priority           = ATMEL_AES_PRIORITY,
1293        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1294        .cra_blocksize          = CFB32_BLOCK_SIZE,
1295        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1296        .cra_alignmask          = 0x3,
1297        .cra_type               = &crypto_ablkcipher_type,
1298        .cra_module             = THIS_MODULE,
1299        .cra_init               = atmel_aes_cra_init,
1300        .cra_exit               = atmel_aes_cra_exit,
1301        .cra_u.ablkcipher = {
1302                .min_keysize    = AES_MIN_KEY_SIZE,
1303                .max_keysize    = AES_MAX_KEY_SIZE,
1304                .ivsize         = AES_BLOCK_SIZE,
1305                .setkey         = atmel_aes_setkey,
1306                .encrypt        = atmel_aes_cfb32_encrypt,
1307                .decrypt        = atmel_aes_cfb32_decrypt,
1308        }
1309},
1310{
1311        .cra_name               = "cfb16(aes)",
1312        .cra_driver_name        = "atmel-cfb16-aes",
1313        .cra_priority           = ATMEL_AES_PRIORITY,
1314        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1315        .cra_blocksize          = CFB16_BLOCK_SIZE,
1316        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1317        .cra_alignmask          = 0x1,
1318        .cra_type               = &crypto_ablkcipher_type,
1319        .cra_module             = THIS_MODULE,
1320        .cra_init               = atmel_aes_cra_init,
1321        .cra_exit               = atmel_aes_cra_exit,
1322        .cra_u.ablkcipher = {
1323                .min_keysize    = AES_MIN_KEY_SIZE,
1324                .max_keysize    = AES_MAX_KEY_SIZE,
1325                .ivsize         = AES_BLOCK_SIZE,
1326                .setkey         = atmel_aes_setkey,
1327                .encrypt        = atmel_aes_cfb16_encrypt,
1328                .decrypt        = atmel_aes_cfb16_decrypt,
1329        }
1330},
1331{
1332        .cra_name               = "cfb8(aes)",
1333        .cra_driver_name        = "atmel-cfb8-aes",
1334        .cra_priority           = ATMEL_AES_PRIORITY,
1335        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1336        .cra_blocksize          = CFB8_BLOCK_SIZE,
1337        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1338        .cra_alignmask          = 0x0,
1339        .cra_type               = &crypto_ablkcipher_type,
1340        .cra_module             = THIS_MODULE,
1341        .cra_init               = atmel_aes_cra_init,
1342        .cra_exit               = atmel_aes_cra_exit,
1343        .cra_u.ablkcipher = {
1344                .min_keysize    = AES_MIN_KEY_SIZE,
1345                .max_keysize    = AES_MAX_KEY_SIZE,
1346                .ivsize         = AES_BLOCK_SIZE,
1347                .setkey         = atmel_aes_setkey,
1348                .encrypt        = atmel_aes_cfb8_encrypt,
1349                .decrypt        = atmel_aes_cfb8_decrypt,
1350        }
1351},
1352{
1353        .cra_name               = "ctr(aes)",
1354        .cra_driver_name        = "atmel-ctr-aes",
1355        .cra_priority           = ATMEL_AES_PRIORITY,
1356        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1357        .cra_blocksize          = 1,
1358        .cra_ctxsize            = sizeof(struct atmel_aes_ctr_ctx),
1359        .cra_alignmask          = 0xf,
1360        .cra_type               = &crypto_ablkcipher_type,
1361        .cra_module             = THIS_MODULE,
1362        .cra_init               = atmel_aes_ctr_cra_init,
1363        .cra_exit               = atmel_aes_cra_exit,
1364        .cra_u.ablkcipher = {
1365                .min_keysize    = AES_MIN_KEY_SIZE,
1366                .max_keysize    = AES_MAX_KEY_SIZE,
1367                .ivsize         = AES_BLOCK_SIZE,
1368                .setkey         = atmel_aes_setkey,
1369                .encrypt        = atmel_aes_ctr_encrypt,
1370                .decrypt        = atmel_aes_ctr_decrypt,
1371        }
1372},
1373};
1374
1375static struct crypto_alg aes_cfb64_alg = {
1376        .cra_name               = "cfb64(aes)",
1377        .cra_driver_name        = "atmel-cfb64-aes",
1378        .cra_priority           = ATMEL_AES_PRIORITY,
1379        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1380        .cra_blocksize          = CFB64_BLOCK_SIZE,
1381        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1382        .cra_alignmask          = 0x7,
1383        .cra_type               = &crypto_ablkcipher_type,
1384        .cra_module             = THIS_MODULE,
1385        .cra_init               = atmel_aes_cra_init,
1386        .cra_exit               = atmel_aes_cra_exit,
1387        .cra_u.ablkcipher = {
1388                .min_keysize    = AES_MIN_KEY_SIZE,
1389                .max_keysize    = AES_MAX_KEY_SIZE,
1390                .ivsize         = AES_BLOCK_SIZE,
1391                .setkey         = atmel_aes_setkey,
1392                .encrypt        = atmel_aes_cfb64_encrypt,
1393                .decrypt        = atmel_aes_cfb64_decrypt,
1394        }
1395};
1396
1397
1398/* gcm aead functions */
1399
1400static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1401                               const u32 *data, size_t datalen,
1402                               const u32 *ghash_in, u32 *ghash_out,
1403                               atmel_aes_fn_t resume);
1404static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd);
1405static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd);
1406
1407static int atmel_aes_gcm_start(struct atmel_aes_dev *dd);
1408static int atmel_aes_gcm_process(struct atmel_aes_dev *dd);
1409static int atmel_aes_gcm_length(struct atmel_aes_dev *dd);
1410static int atmel_aes_gcm_data(struct atmel_aes_dev *dd);
1411static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd);
1412static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd);
1413static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd);
1414
1415static inline struct atmel_aes_gcm_ctx *
1416atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx)
1417{
1418        return container_of(ctx, struct atmel_aes_gcm_ctx, base);
1419}
1420
1421static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd,
1422                               const u32 *data, size_t datalen,
1423                               const u32 *ghash_in, u32 *ghash_out,
1424                               atmel_aes_fn_t resume)
1425{
1426        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1427
1428        dd->data = (u32 *)data;
1429        dd->datalen = datalen;
1430        ctx->ghash_in = ghash_in;
1431        ctx->ghash_out = ghash_out;
1432        ctx->ghash_resume = resume;
1433
1434        atmel_aes_write_ctrl(dd, false, NULL);
1435        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init);
1436}
1437
1438static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd)
1439{
1440        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1441
1442        /* Set the data length. */
1443        atmel_aes_write(dd, AES_AADLENR, dd->total);
1444        atmel_aes_write(dd, AES_CLENR, 0);
1445
1446        /* If needed, overwrite the GCM Intermediate Hash Word Registers */
1447        if (ctx->ghash_in)
1448                atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in);
1449
1450        return atmel_aes_gcm_ghash_finalize(dd);
1451}
1452
1453static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd)
1454{
1455        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1456        u32 isr;
1457
1458        /* Write data into the Input Data Registers. */
1459        while (dd->datalen > 0) {
1460                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1461                dd->data += 4;
1462                dd->datalen -= AES_BLOCK_SIZE;
1463
1464                isr = atmel_aes_read(dd, AES_ISR);
1465                if (!(isr & AES_INT_DATARDY)) {
1466                        dd->resume = atmel_aes_gcm_ghash_finalize;
1467                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1468                        return -EINPROGRESS;
1469                }
1470        }
1471
1472        /* Read the computed hash from GHASHRx. */
1473        atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out);
1474
1475        return ctx->ghash_resume(dd);
1476}
1477
1478
1479static int atmel_aes_gcm_start(struct atmel_aes_dev *dd)
1480{
1481        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1482        struct aead_request *req = aead_request_cast(dd->areq);
1483        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1484        struct atmel_aes_reqctx *rctx = aead_request_ctx(req);
1485        size_t ivsize = crypto_aead_ivsize(tfm);
1486        size_t datalen, padlen;
1487        const void *iv = req->iv;
1488        u8 *data = dd->buf;
1489        int err;
1490
1491        atmel_aes_set_mode(dd, rctx);
1492
1493        err = atmel_aes_hw_init(dd);
1494        if (err)
1495                return atmel_aes_complete(dd, err);
1496
1497        if (likely(ivsize == 12)) {
1498                memcpy(ctx->j0, iv, ivsize);
1499                ctx->j0[3] = cpu_to_be32(1);
1500                return atmel_aes_gcm_process(dd);
1501        }
1502
1503        padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE);
1504        datalen = ivsize + padlen + AES_BLOCK_SIZE;
1505        if (datalen > dd->buflen)
1506                return atmel_aes_complete(dd, -EINVAL);
1507
1508        memcpy(data, iv, ivsize);
1509        memset(data + ivsize, 0, padlen + sizeof(u64));
1510        ((u64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8);
1511
1512        return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen,
1513                                   NULL, ctx->j0, atmel_aes_gcm_process);
1514}
1515
1516static int atmel_aes_gcm_process(struct atmel_aes_dev *dd)
1517{
1518        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1519        struct aead_request *req = aead_request_cast(dd->areq);
1520        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1521        bool enc = atmel_aes_is_encrypt(dd);
1522        u32 authsize;
1523
1524        /* Compute text length. */
1525        authsize = crypto_aead_authsize(tfm);
1526        ctx->textlen = req->cryptlen - (enc ? 0 : authsize);
1527
1528        /*
1529         * According to tcrypt test suite, the GCM Automatic Tag Generation
1530         * fails when both the message and its associated data are empty.
1531         */
1532        if (likely(req->assoclen != 0 || ctx->textlen != 0))
1533                dd->flags |= AES_FLAGS_GTAGEN;
1534
1535        atmel_aes_write_ctrl(dd, false, NULL);
1536        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length);
1537}
1538
1539static int atmel_aes_gcm_length(struct atmel_aes_dev *dd)
1540{
1541        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1542        struct aead_request *req = aead_request_cast(dd->areq);
1543        u32 j0_lsw, *j0 = ctx->j0;
1544        size_t padlen;
1545
1546        /* Write incr32(J0) into IV. */
1547        j0_lsw = j0[3];
1548        j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1);
1549        atmel_aes_write_block(dd, AES_IVR(0), j0);
1550        j0[3] = j0_lsw;
1551
1552        /* Set aad and text lengths. */
1553        atmel_aes_write(dd, AES_AADLENR, req->assoclen);
1554        atmel_aes_write(dd, AES_CLENR, ctx->textlen);
1555
1556        /* Check whether AAD are present. */
1557        if (unlikely(req->assoclen == 0)) {
1558                dd->datalen = 0;
1559                return atmel_aes_gcm_data(dd);
1560        }
1561
1562        /* Copy assoc data and add padding. */
1563        padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE);
1564        if (unlikely(req->assoclen + padlen > dd->buflen))
1565                return atmel_aes_complete(dd, -EINVAL);
1566        sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen);
1567
1568        /* Write assoc data into the Input Data register. */
1569        dd->data = (u32 *)dd->buf;
1570        dd->datalen = req->assoclen + padlen;
1571        return atmel_aes_gcm_data(dd);
1572}
1573
1574static int atmel_aes_gcm_data(struct atmel_aes_dev *dd)
1575{
1576        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1577        struct aead_request *req = aead_request_cast(dd->areq);
1578        bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD);
1579        struct scatterlist *src, *dst;
1580        u32 isr, mr;
1581
1582        /* Write AAD first. */
1583        while (dd->datalen > 0) {
1584                atmel_aes_write_block(dd, AES_IDATAR(0), dd->data);
1585                dd->data += 4;
1586                dd->datalen -= AES_BLOCK_SIZE;
1587
1588                isr = atmel_aes_read(dd, AES_ISR);
1589                if (!(isr & AES_INT_DATARDY)) {
1590                        dd->resume = atmel_aes_gcm_data;
1591                        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
1592                        return -EINPROGRESS;
1593                }
1594        }
1595
1596        /* GMAC only. */
1597        if (unlikely(ctx->textlen == 0))
1598                return atmel_aes_gcm_tag_init(dd);
1599
1600        /* Prepare src and dst scatter lists to transfer cipher/plain texts */
1601        src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen);
1602        dst = ((req->src == req->dst) ? src :
1603               scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen));
1604
1605        if (use_dma) {
1606                /* Update the Mode Register for DMA transfers. */
1607                mr = atmel_aes_read(dd, AES_MR);
1608                mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF);
1609                mr |= AES_MR_SMOD_IDATAR0;
1610                if (dd->caps.has_dualbuff)
1611                        mr |= AES_MR_DUALBUFF;
1612                atmel_aes_write(dd, AES_MR, mr);
1613
1614                return atmel_aes_dma_start(dd, src, dst, ctx->textlen,
1615                                           atmel_aes_gcm_tag_init);
1616        }
1617
1618        return atmel_aes_cpu_start(dd, src, dst, ctx->textlen,
1619                                   atmel_aes_gcm_tag_init);
1620}
1621
1622static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd)
1623{
1624        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1625        struct aead_request *req = aead_request_cast(dd->areq);
1626        u64 *data = dd->buf;
1627
1628        if (likely(dd->flags & AES_FLAGS_GTAGEN)) {
1629                if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) {
1630                        dd->resume = atmel_aes_gcm_tag_init;
1631                        atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY);
1632                        return -EINPROGRESS;
1633                }
1634
1635                return atmel_aes_gcm_finalize(dd);
1636        }
1637
1638        /* Read the GCM Intermediate Hash Word Registers. */
1639        atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash);
1640
1641        data[0] = cpu_to_be64(req->assoclen * 8);
1642        data[1] = cpu_to_be64(ctx->textlen * 8);
1643
1644        return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE,
1645                                   ctx->ghash, ctx->ghash, atmel_aes_gcm_tag);
1646}
1647
1648static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd)
1649{
1650        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1651        unsigned long flags;
1652
1653        /*
1654         * Change mode to CTR to complete the tag generation.
1655         * Use J0 as Initialization Vector.
1656         */
1657        flags = dd->flags;
1658        dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN);
1659        dd->flags |= AES_FLAGS_CTR;
1660        atmel_aes_write_ctrl(dd, false, ctx->j0);
1661        dd->flags = flags;
1662
1663        atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash);
1664        return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize);
1665}
1666
1667static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd)
1668{
1669        struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx);
1670        struct aead_request *req = aead_request_cast(dd->areq);
1671        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1672        bool enc = atmel_aes_is_encrypt(dd);
1673        u32 offset, authsize, itag[4], *otag = ctx->tag;
1674        int err;
1675
1676        /* Read the computed tag. */
1677        if (likely(dd->flags & AES_FLAGS_GTAGEN))
1678                atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag);
1679        else
1680                atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag);
1681
1682        offset = req->assoclen + ctx->textlen;
1683        authsize = crypto_aead_authsize(tfm);
1684        if (enc) {
1685                scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1);
1686                err = 0;
1687        } else {
1688                scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0);
1689                err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0;
1690        }
1691
1692        return atmel_aes_complete(dd, err);
1693}
1694
1695static int atmel_aes_gcm_crypt(struct aead_request *req,
1696                               unsigned long mode)
1697{
1698        struct atmel_aes_base_ctx *ctx;
1699        struct atmel_aes_reqctx *rctx;
1700        struct atmel_aes_dev *dd;
1701
1702        ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
1703        ctx->block_size = AES_BLOCK_SIZE;
1704
1705        dd = atmel_aes_find_dev(ctx);
1706        if (!dd)
1707                return -ENODEV;
1708
1709        rctx = aead_request_ctx(req);
1710        rctx->mode = AES_FLAGS_GCM | mode;
1711
1712        return atmel_aes_handle_queue(dd, &req->base);
1713}
1714
1715static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
1716                                unsigned int keylen)
1717{
1718        struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm);
1719
1720        if (keylen != AES_KEYSIZE_256 &&
1721            keylen != AES_KEYSIZE_192 &&
1722            keylen != AES_KEYSIZE_128) {
1723                crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1724                return -EINVAL;
1725        }
1726
1727        memcpy(ctx->key, key, keylen);
1728        ctx->keylen = keylen;
1729
1730        return 0;
1731}
1732
1733static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm,
1734                                     unsigned int authsize)
1735{
1736        /* Same as crypto_gcm_authsize() from crypto/gcm.c */
1737        switch (authsize) {
1738        case 4:
1739        case 8:
1740        case 12:
1741        case 13:
1742        case 14:
1743        case 15:
1744        case 16:
1745                break;
1746        default:
1747                return -EINVAL;
1748        }
1749
1750        return 0;
1751}
1752
1753static int atmel_aes_gcm_encrypt(struct aead_request *req)
1754{
1755        return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
1756}
1757
1758static int atmel_aes_gcm_decrypt(struct aead_request *req)
1759{
1760        return atmel_aes_gcm_crypt(req, 0);
1761}
1762
1763static int atmel_aes_gcm_init(struct crypto_aead *tfm)
1764{
1765        struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm);
1766
1767        crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx));
1768        ctx->base.start = atmel_aes_gcm_start;
1769
1770        return 0;
1771}
1772
1773static void atmel_aes_gcm_exit(struct crypto_aead *tfm)
1774{
1775
1776}
1777
1778static struct aead_alg aes_gcm_alg = {
1779        .setkey         = atmel_aes_gcm_setkey,
1780        .setauthsize    = atmel_aes_gcm_setauthsize,
1781        .encrypt        = atmel_aes_gcm_encrypt,
1782        .decrypt        = atmel_aes_gcm_decrypt,
1783        .init           = atmel_aes_gcm_init,
1784        .exit           = atmel_aes_gcm_exit,
1785        .ivsize         = 12,
1786        .maxauthsize    = AES_BLOCK_SIZE,
1787
1788        .base = {
1789                .cra_name               = "gcm(aes)",
1790                .cra_driver_name        = "atmel-gcm-aes",
1791                .cra_priority           = ATMEL_AES_PRIORITY,
1792                .cra_flags              = CRYPTO_ALG_ASYNC,
1793                .cra_blocksize          = 1,
1794                .cra_ctxsize            = sizeof(struct atmel_aes_gcm_ctx),
1795                .cra_alignmask          = 0xf,
1796                .cra_module             = THIS_MODULE,
1797        },
1798};
1799
1800
1801/* xts functions */
1802
1803static inline struct atmel_aes_xts_ctx *
1804atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx)
1805{
1806        return container_of(ctx, struct atmel_aes_xts_ctx, base);
1807}
1808
1809static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd);
1810
1811static int atmel_aes_xts_start(struct atmel_aes_dev *dd)
1812{
1813        struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx);
1814        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1815        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
1816        unsigned long flags;
1817        int err;
1818
1819        atmel_aes_set_mode(dd, rctx);
1820
1821        err = atmel_aes_hw_init(dd);
1822        if (err)
1823                return atmel_aes_complete(dd, err);
1824
1825        /* Compute the tweak value from req->info with ecb(aes). */
1826        flags = dd->flags;
1827        dd->flags &= ~AES_FLAGS_MODE_MASK;
1828        dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT);
1829        atmel_aes_write_ctrl_key(dd, false, NULL,
1830                                 ctx->key2, ctx->base.keylen);
1831        dd->flags = flags;
1832
1833        atmel_aes_write_block(dd, AES_IDATAR(0), req->info);
1834        return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data);
1835}
1836
1837static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd)
1838{
1839        struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
1840        bool use_dma = (req->nbytes >= ATMEL_AES_DMA_THRESHOLD);
1841        u32 tweak[AES_BLOCK_SIZE / sizeof(u32)];
1842        static const u32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), };
1843        u8 *tweak_bytes = (u8 *)tweak;
1844        int i;
1845
1846        /* Read the computed ciphered tweak value. */
1847        atmel_aes_read_block(dd, AES_ODATAR(0), tweak);
1848        /*
1849         * Hardware quirk:
1850         * the order of the ciphered tweak bytes need to be reversed before
1851         * writing them into the ODATARx registers.
1852         */
1853        for (i = 0; i < AES_BLOCK_SIZE/2; ++i) {
1854                u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i];
1855
1856                tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i];
1857                tweak_bytes[i] = tmp;
1858        }
1859
1860        /* Process the data. */
1861        atmel_aes_write_ctrl(dd, use_dma, NULL);
1862        atmel_aes_write_block(dd, AES_TWR(0), tweak);
1863        atmel_aes_write_block(dd, AES_ALPHAR(0), one);
1864        if (use_dma)
1865                return atmel_aes_dma_start(dd, req->src, req->dst, req->nbytes,
1866                                           atmel_aes_transfer_complete);
1867
1868        return atmel_aes_cpu_start(dd, req->src, req->dst, req->nbytes,
1869                                   atmel_aes_transfer_complete);
1870}
1871
1872static int atmel_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
1873                                unsigned int keylen)
1874{
1875        struct atmel_aes_xts_ctx *ctx = crypto_ablkcipher_ctx(tfm);
1876        int err;
1877
1878        err = xts_check_key(crypto_ablkcipher_tfm(tfm), key, keylen);
1879        if (err)
1880                return err;
1881
1882        memcpy(ctx->base.key, key, keylen/2);
1883        memcpy(ctx->key2, key + keylen/2, keylen/2);
1884        ctx->base.keylen = keylen/2;
1885
1886        return 0;
1887}
1888
1889static int atmel_aes_xts_encrypt(struct ablkcipher_request *req)
1890{
1891        return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT);
1892}
1893
1894static int atmel_aes_xts_decrypt(struct ablkcipher_request *req)
1895{
1896        return atmel_aes_crypt(req, AES_FLAGS_XTS);
1897}
1898
1899static int atmel_aes_xts_cra_init(struct crypto_tfm *tfm)
1900{
1901        struct atmel_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
1902
1903        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
1904        ctx->base.start = atmel_aes_xts_start;
1905
1906        return 0;
1907}
1908
1909static struct crypto_alg aes_xts_alg = {
1910        .cra_name               = "xts(aes)",
1911        .cra_driver_name        = "atmel-xts-aes",
1912        .cra_priority           = ATMEL_AES_PRIORITY,
1913        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1914        .cra_blocksize          = AES_BLOCK_SIZE,
1915        .cra_ctxsize            = sizeof(struct atmel_aes_xts_ctx),
1916        .cra_alignmask          = 0xf,
1917        .cra_type               = &crypto_ablkcipher_type,
1918        .cra_module             = THIS_MODULE,
1919        .cra_init               = atmel_aes_xts_cra_init,
1920        .cra_exit               = atmel_aes_cra_exit,
1921        .cra_u.ablkcipher = {
1922                .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1923                .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1924                .ivsize         = AES_BLOCK_SIZE,
1925                .setkey         = atmel_aes_xts_setkey,
1926                .encrypt        = atmel_aes_xts_encrypt,
1927                .decrypt        = atmel_aes_xts_decrypt,
1928        }
1929};
1930
1931
1932/* Probe functions */
1933
1934static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
1935{
1936        dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER);
1937        dd->buflen = ATMEL_AES_BUFFER_SIZE;
1938        dd->buflen &= ~(AES_BLOCK_SIZE - 1);
1939
1940        if (!dd->buf) {
1941                dev_err(dd->dev, "unable to alloc pages.\n");
1942                return -ENOMEM;
1943        }
1944
1945        return 0;
1946}
1947
1948static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
1949{
1950        free_page((unsigned long)dd->buf);
1951}
1952
1953static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
1954{
1955        struct at_dma_slave     *sl = slave;
1956
1957        if (sl && sl->dma_dev == chan->device->dev) {
1958                chan->private = sl;
1959                return true;
1960        } else {
1961                return false;
1962        }
1963}
1964
1965static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
1966                              struct crypto_platform_data *pdata)
1967{
1968        struct at_dma_slave *slave;
1969        int err = -ENOMEM;
1970        dma_cap_mask_t mask;
1971
1972        dma_cap_zero(mask);
1973        dma_cap_set(DMA_SLAVE, mask);
1974
1975        /* Try to grab 2 DMA channels */
1976        slave = &pdata->dma_slave->rxdata;
1977        dd->src.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1978                                                        slave, dd->dev, "tx");
1979        if (!dd->src.chan)
1980                goto err_dma_in;
1981
1982        slave = &pdata->dma_slave->txdata;
1983        dd->dst.chan = dma_request_slave_channel_compat(mask, atmel_aes_filter,
1984                                                        slave, dd->dev, "rx");
1985        if (!dd->dst.chan)
1986                goto err_dma_out;
1987
1988        return 0;
1989
1990err_dma_out:
1991        dma_release_channel(dd->src.chan);
1992err_dma_in:
1993        dev_warn(dd->dev, "no DMA channel available\n");
1994        return err;
1995}
1996
1997static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
1998{
1999        dma_release_channel(dd->dst.chan);
2000        dma_release_channel(dd->src.chan);
2001}
2002
2003static void atmel_aes_queue_task(unsigned long data)
2004{
2005        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2006
2007        atmel_aes_handle_queue(dd, NULL);
2008}
2009
2010static void atmel_aes_done_task(unsigned long data)
2011{
2012        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
2013
2014        dd->is_async = true;
2015        (void)dd->resume(dd);
2016}
2017
2018static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
2019{
2020        struct atmel_aes_dev *aes_dd = dev_id;
2021        u32 reg;
2022
2023        reg = atmel_aes_read(aes_dd, AES_ISR);
2024        if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
2025                atmel_aes_write(aes_dd, AES_IDR, reg);
2026                if (AES_FLAGS_BUSY & aes_dd->flags)
2027                        tasklet_schedule(&aes_dd->done_task);
2028                else
2029                        dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
2030                return IRQ_HANDLED;
2031        }
2032
2033        return IRQ_NONE;
2034}
2035
2036static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
2037{
2038        int i;
2039
2040        if (dd->caps.has_xts)
2041                crypto_unregister_alg(&aes_xts_alg);
2042
2043        if (dd->caps.has_gcm)
2044                crypto_unregister_aead(&aes_gcm_alg);
2045
2046        if (dd->caps.has_cfb64)
2047                crypto_unregister_alg(&aes_cfb64_alg);
2048
2049        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
2050                crypto_unregister_alg(&aes_algs[i]);
2051}
2052
2053static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
2054{
2055        int err, i, j;
2056
2057        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
2058                err = crypto_register_alg(&aes_algs[i]);
2059                if (err)
2060                        goto err_aes_algs;
2061        }
2062
2063        if (dd->caps.has_cfb64) {
2064                err = crypto_register_alg(&aes_cfb64_alg);
2065                if (err)
2066                        goto err_aes_cfb64_alg;
2067        }
2068
2069        if (dd->caps.has_gcm) {
2070                err = crypto_register_aead(&aes_gcm_alg);
2071                if (err)
2072                        goto err_aes_gcm_alg;
2073        }
2074
2075        if (dd->caps.has_xts) {
2076                err = crypto_register_alg(&aes_xts_alg);
2077                if (err)
2078                        goto err_aes_xts_alg;
2079        }
2080
2081        return 0;
2082
2083err_aes_xts_alg:
2084        crypto_unregister_aead(&aes_gcm_alg);
2085err_aes_gcm_alg:
2086        crypto_unregister_alg(&aes_cfb64_alg);
2087err_aes_cfb64_alg:
2088        i = ARRAY_SIZE(aes_algs);
2089err_aes_algs:
2090        for (j = 0; j < i; j++)
2091                crypto_unregister_alg(&aes_algs[j]);
2092
2093        return err;
2094}
2095
2096static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
2097{
2098        dd->caps.has_dualbuff = 0;
2099        dd->caps.has_cfb64 = 0;
2100        dd->caps.has_ctr32 = 0;
2101        dd->caps.has_gcm = 0;
2102        dd->caps.has_xts = 0;
2103        dd->caps.max_burst_size = 1;
2104
2105        /* keep only major version number */
2106        switch (dd->hw_version & 0xff0) {
2107        case 0x500:
2108                dd->caps.has_dualbuff = 1;
2109                dd->caps.has_cfb64 = 1;
2110                dd->caps.has_ctr32 = 1;
2111                dd->caps.has_gcm = 1;
2112                dd->caps.has_xts = 1;
2113                dd->caps.max_burst_size = 4;
2114                break;
2115        case 0x200:
2116                dd->caps.has_dualbuff = 1;
2117                dd->caps.has_cfb64 = 1;
2118                dd->caps.has_ctr32 = 1;
2119                dd->caps.has_gcm = 1;
2120                dd->caps.max_burst_size = 4;
2121                break;
2122        case 0x130:
2123                dd->caps.has_dualbuff = 1;
2124                dd->caps.has_cfb64 = 1;
2125                dd->caps.max_burst_size = 4;
2126                break;
2127        case 0x120:
2128                break;
2129        default:
2130                dev_warn(dd->dev,
2131                                "Unmanaged aes version, set minimum capabilities\n");
2132                break;
2133        }
2134}
2135
2136#if defined(CONFIG_OF)
2137static const struct of_device_id atmel_aes_dt_ids[] = {
2138        { .compatible = "atmel,at91sam9g46-aes" },
2139        { /* sentinel */ }
2140};
2141MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids);
2142
2143static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2144{
2145        struct device_node *np = pdev->dev.of_node;
2146        struct crypto_platform_data *pdata;
2147
2148        if (!np) {
2149                dev_err(&pdev->dev, "device node not found\n");
2150                return ERR_PTR(-EINVAL);
2151        }
2152
2153        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2154        if (!pdata) {
2155                dev_err(&pdev->dev, "could not allocate memory for pdata\n");
2156                return ERR_PTR(-ENOMEM);
2157        }
2158
2159        pdata->dma_slave = devm_kzalloc(&pdev->dev,
2160                                        sizeof(*(pdata->dma_slave)),
2161                                        GFP_KERNEL);
2162        if (!pdata->dma_slave) {
2163                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
2164                devm_kfree(&pdev->dev, pdata);
2165                return ERR_PTR(-ENOMEM);
2166        }
2167
2168        return pdata;
2169}
2170#else
2171static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev)
2172{
2173        return ERR_PTR(-EINVAL);
2174}
2175#endif
2176
2177static int atmel_aes_probe(struct platform_device *pdev)
2178{
2179        struct atmel_aes_dev *aes_dd;
2180        struct crypto_platform_data *pdata;
2181        struct device *dev = &pdev->dev;
2182        struct resource *aes_res;
2183        int err;
2184
2185        pdata = pdev->dev.platform_data;
2186        if (!pdata) {
2187                pdata = atmel_aes_of_init(pdev);
2188                if (IS_ERR(pdata)) {
2189                        err = PTR_ERR(pdata);
2190                        goto aes_dd_err;
2191                }
2192        }
2193
2194        if (!pdata->dma_slave) {
2195                err = -ENXIO;
2196                goto aes_dd_err;
2197        }
2198
2199        aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
2200        if (aes_dd == NULL) {
2201                dev_err(dev, "unable to alloc data struct.\n");
2202                err = -ENOMEM;
2203                goto aes_dd_err;
2204        }
2205
2206        aes_dd->dev = dev;
2207
2208        platform_set_drvdata(pdev, aes_dd);
2209
2210        INIT_LIST_HEAD(&aes_dd->list);
2211        spin_lock_init(&aes_dd->lock);
2212
2213        tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
2214                                        (unsigned long)aes_dd);
2215        tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
2216                                        (unsigned long)aes_dd);
2217
2218        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
2219
2220        aes_dd->irq = -1;
2221
2222        /* Get the base address */
2223        aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2224        if (!aes_res) {
2225                dev_err(dev, "no MEM resource info\n");
2226                err = -ENODEV;
2227                goto res_err;
2228        }
2229        aes_dd->phys_base = aes_res->start;
2230
2231        /* Get the IRQ */
2232        aes_dd->irq = platform_get_irq(pdev,  0);
2233        if (aes_dd->irq < 0) {
2234                dev_err(dev, "no IRQ resource info\n");
2235                err = aes_dd->irq;
2236                goto res_err;
2237        }
2238
2239        err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
2240                               IRQF_SHARED, "atmel-aes", aes_dd);
2241        if (err) {
2242                dev_err(dev, "unable to request aes irq.\n");
2243                goto res_err;
2244        }
2245
2246        /* Initializing the clock */
2247        aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
2248        if (IS_ERR(aes_dd->iclk)) {
2249                dev_err(dev, "clock initialization failed.\n");
2250                err = PTR_ERR(aes_dd->iclk);
2251                goto res_err;
2252        }
2253
2254        aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
2255        if (IS_ERR(aes_dd->io_base)) {
2256                dev_err(dev, "can't ioremap\n");
2257                err = PTR_ERR(aes_dd->io_base);
2258                goto res_err;
2259        }
2260
2261        err = clk_prepare(aes_dd->iclk);
2262        if (err)
2263                goto res_err;
2264
2265        err = atmel_aes_hw_version_init(aes_dd);
2266        if (err)
2267                goto iclk_unprepare;
2268
2269        atmel_aes_get_cap(aes_dd);
2270
2271        err = atmel_aes_buff_init(aes_dd);
2272        if (err)
2273                goto err_aes_buff;
2274
2275        err = atmel_aes_dma_init(aes_dd, pdata);
2276        if (err)
2277                goto err_aes_dma;
2278
2279        spin_lock(&atmel_aes.lock);
2280        list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
2281        spin_unlock(&atmel_aes.lock);
2282
2283        err = atmel_aes_register_algs(aes_dd);
2284        if (err)
2285                goto err_algs;
2286
2287        dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n",
2288                        dma_chan_name(aes_dd->src.chan),
2289                        dma_chan_name(aes_dd->dst.chan));
2290
2291        return 0;
2292
2293err_algs:
2294        spin_lock(&atmel_aes.lock);
2295        list_del(&aes_dd->list);
2296        spin_unlock(&atmel_aes.lock);
2297        atmel_aes_dma_cleanup(aes_dd);
2298err_aes_dma:
2299        atmel_aes_buff_cleanup(aes_dd);
2300err_aes_buff:
2301iclk_unprepare:
2302        clk_unprepare(aes_dd->iclk);
2303res_err:
2304        tasklet_kill(&aes_dd->done_task);
2305        tasklet_kill(&aes_dd->queue_task);
2306aes_dd_err:
2307        dev_err(dev, "initialization failed.\n");
2308
2309        return err;
2310}
2311
2312static int atmel_aes_remove(struct platform_device *pdev)
2313{
2314        struct atmel_aes_dev *aes_dd;
2315
2316        aes_dd = platform_get_drvdata(pdev);
2317        if (!aes_dd)
2318                return -ENODEV;
2319        spin_lock(&atmel_aes.lock);
2320        list_del(&aes_dd->list);
2321        spin_unlock(&atmel_aes.lock);
2322
2323        atmel_aes_unregister_algs(aes_dd);
2324
2325        tasklet_kill(&aes_dd->done_task);
2326        tasklet_kill(&aes_dd->queue_task);
2327
2328        atmel_aes_dma_cleanup(aes_dd);
2329        atmel_aes_buff_cleanup(aes_dd);
2330
2331        clk_unprepare(aes_dd->iclk);
2332
2333        return 0;
2334}
2335
2336static struct platform_driver atmel_aes_driver = {
2337        .probe          = atmel_aes_probe,
2338        .remove         = atmel_aes_remove,
2339        .driver         = {
2340                .name   = "atmel_aes",
2341                .of_match_table = of_match_ptr(atmel_aes_dt_ids),
2342        },
2343};
2344
2345module_platform_driver(atmel_aes_driver);
2346
2347MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
2348MODULE_LICENSE("GPL v2");
2349MODULE_AUTHOR("Nicolas Royer - Eukr\xC3\xA9a Electromatique");
2350
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.