linux/drivers/crypto/atmel-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/module.h>
  28#include <linux/init.h>
  29#include <linux/errno.h>
  30#include <linux/interrupt.h>
  31#include <linux/kernel.h>
  32#include <linux/clk.h>
  33#include <linux/irq.h>
  34#include <linux/io.h>
  35#include <linux/platform_device.h>
  36#include <linux/scatterlist.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/delay.h>
  39#include <linux/crypto.h>
  40#include <linux/cryptohash.h>
  41#include <crypto/scatterwalk.h>
  42#include <crypto/algapi.h>
  43#include <crypto/aes.h>
  44#include <crypto/hash.h>
  45#include <crypto/internal/hash.h>
  46#include <linux/platform_data/atmel-aes.h>
  47#include "atmel-aes-regs.h"
  48
  49#define CFB8_BLOCK_SIZE         1
  50#define CFB16_BLOCK_SIZE        2
  51#define CFB32_BLOCK_SIZE        4
  52#define CFB64_BLOCK_SIZE        8
  53
  54/* AES flags */
  55#define AES_FLAGS_MODE_MASK     0x01ff
  56#define AES_FLAGS_ENCRYPT       BIT(0)
  57#define AES_FLAGS_CBC           BIT(1)
  58#define AES_FLAGS_CFB           BIT(2)
  59#define AES_FLAGS_CFB8          BIT(3)
  60#define AES_FLAGS_CFB16         BIT(4)
  61#define AES_FLAGS_CFB32         BIT(5)
  62#define AES_FLAGS_CFB64         BIT(6)
  63#define AES_FLAGS_OFB           BIT(7)
  64#define AES_FLAGS_CTR           BIT(8)
  65
  66#define AES_FLAGS_INIT          BIT(16)
  67#define AES_FLAGS_DMA           BIT(17)
  68#define AES_FLAGS_BUSY          BIT(18)
  69
  70#define AES_FLAGS_DUALBUFF      BIT(24)
  71
  72#define ATMEL_AES_QUEUE_LENGTH  1
  73#define ATMEL_AES_CACHE_SIZE    0
  74
  75#define ATMEL_AES_DMA_THRESHOLD         16
  76
  77
  78struct atmel_aes_dev;
  79
  80struct atmel_aes_ctx {
  81        struct atmel_aes_dev *dd;
  82
  83        int             keylen;
  84        u32             key[AES_KEYSIZE_256 / sizeof(u32)];
  85};
  86
  87struct atmel_aes_reqctx {
  88        unsigned long mode;
  89};
  90
  91struct atmel_aes_dma {
  92        struct dma_chan                 *chan;
  93        struct dma_slave_config dma_conf;
  94};
  95
  96struct atmel_aes_dev {
  97        struct list_head        list;
  98        unsigned long           phys_base;
  99        void __iomem            *io_base;
 100
 101        struct atmel_aes_ctx    *ctx;
 102        struct device           *dev;
 103        struct clk              *iclk;
 104        int     irq;
 105
 106        unsigned long           flags;
 107        int     err;
 108
 109        spinlock_t              lock;
 110        struct crypto_queue     queue;
 111
 112        struct tasklet_struct   done_task;
 113        struct tasklet_struct   queue_task;
 114
 115        struct ablkcipher_request       *req;
 116        size_t  total;
 117
 118        struct scatterlist      *in_sg;
 119        unsigned int            nb_in_sg;
 120
 121        struct scatterlist      *out_sg;
 122        unsigned int            nb_out_sg;
 123
 124        size_t  bufcnt;
 125
 126        u8      buf_in[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
 127        int     dma_in;
 128        struct atmel_aes_dma    dma_lch_in;
 129
 130        u8      buf_out[ATMEL_AES_DMA_THRESHOLD] __aligned(sizeof(u32));
 131        int     dma_out;
 132        struct atmel_aes_dma    dma_lch_out;
 133
 134        u32     hw_version;
 135};
 136
 137struct atmel_aes_drv {
 138        struct list_head        dev_list;
 139        spinlock_t              lock;
 140};
 141
 142static struct atmel_aes_drv atmel_aes = {
 143        .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 144        .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 145};
 146
 147static int atmel_aes_sg_length(struct ablkcipher_request *req,
 148                        struct scatterlist *sg)
 149{
 150        unsigned int total = req->nbytes;
 151        int sg_nb;
 152        unsigned int len;
 153        struct scatterlist *sg_list;
 154
 155        sg_nb = 0;
 156        sg_list = sg;
 157        total = req->nbytes;
 158
 159        while (total) {
 160                len = min(sg_list->length, total);
 161
 162                sg_nb++;
 163                total -= len;
 164
 165                sg_list = sg_next(sg_list);
 166                if (!sg_list)
 167                        total = 0;
 168        }
 169
 170        return sg_nb;
 171}
 172
 173static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 174{
 175        return readl_relaxed(dd->io_base + offset);
 176}
 177
 178static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 179                                        u32 offset, u32 value)
 180{
 181        writel_relaxed(value, dd->io_base + offset);
 182}
 183
 184static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 185                                        u32 *value, int count)
 186{
 187        for (; count--; value++, offset += 4)
 188                *value = atmel_aes_read(dd, offset);
 189}
 190
 191static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 192                                        u32 *value, int count)
 193{
 194        for (; count--; value++, offset += 4)
 195                atmel_aes_write(dd, offset, *value);
 196}
 197
 198static void atmel_aes_dualbuff_test(struct atmel_aes_dev *dd)
 199{
 200        atmel_aes_write(dd, AES_MR, AES_MR_DUALBUFF);
 201
 202        if (atmel_aes_read(dd, AES_MR) & AES_MR_DUALBUFF)
 203                dd->flags |= AES_FLAGS_DUALBUFF;
 204}
 205
 206static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
 207{
 208        struct atmel_aes_dev *aes_dd = NULL;
 209        struct atmel_aes_dev *tmp;
 210
 211        spin_lock_bh(&atmel_aes.lock);
 212        if (!ctx->dd) {
 213                list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 214                        aes_dd = tmp;
 215                        break;
 216                }
 217                ctx->dd = aes_dd;
 218        } else {
 219                aes_dd = ctx->dd;
 220        }
 221
 222        spin_unlock_bh(&atmel_aes.lock);
 223
 224        return aes_dd;
 225}
 226
 227static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 228{
 229        clk_prepare_enable(dd->iclk);
 230
 231        if (!(dd->flags & AES_FLAGS_INIT)) {
 232                atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 233                atmel_aes_dualbuff_test(dd);
 234                dd->flags |= AES_FLAGS_INIT;
 235                dd->err = 0;
 236        }
 237
 238        return 0;
 239}
 240
 241static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 242{
 243        atmel_aes_hw_init(dd);
 244
 245        dd->hw_version = atmel_aes_read(dd, AES_HW_VERSION);
 246
 247        clk_disable_unprepare(dd->iclk);
 248}
 249
 250static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
 251{
 252        struct ablkcipher_request *req = dd->req;
 253
 254        clk_disable_unprepare(dd->iclk);
 255        dd->flags &= ~AES_FLAGS_BUSY;
 256
 257        req->base.complete(&req->base, err);
 258}
 259
 260static void atmel_aes_dma_callback(void *data)
 261{
 262        struct atmel_aes_dev *dd = data;
 263
 264        /* dma_lch_out - completed */
 265        tasklet_schedule(&dd->done_task);
 266}
 267
 268static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd)
 269{
 270        struct dma_async_tx_descriptor  *in_desc, *out_desc;
 271        int nb_dma_sg_in, nb_dma_sg_out;
 272
 273        dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
 274        if (!dd->nb_in_sg)
 275                goto exit_err;
 276
 277        nb_dma_sg_in = dma_map_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
 278                        DMA_TO_DEVICE);
 279        if (!nb_dma_sg_in)
 280                goto exit_err;
 281
 282        in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, dd->in_sg,
 283                                nb_dma_sg_in, DMA_MEM_TO_DEV,
 284                                DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 285
 286        if (!in_desc)
 287                goto unmap_in;
 288
 289        /* callback not needed */
 290
 291        dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
 292        if (!dd->nb_out_sg)
 293                goto unmap_in;
 294
 295        nb_dma_sg_out = dma_map_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
 296                        DMA_FROM_DEVICE);
 297        if (!nb_dma_sg_out)
 298                goto unmap_out;
 299
 300        out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, dd->out_sg,
 301                                nb_dma_sg_out, DMA_DEV_TO_MEM,
 302                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 303
 304        if (!out_desc)
 305                goto unmap_out;
 306
 307        out_desc->callback = atmel_aes_dma_callback;
 308        out_desc->callback_param = dd;
 309
 310        dd->total -= dd->req->nbytes;
 311
 312        dmaengine_submit(out_desc);
 313        dma_async_issue_pending(dd->dma_lch_out.chan);
 314
 315        dmaengine_submit(in_desc);
 316        dma_async_issue_pending(dd->dma_lch_in.chan);
 317
 318        return 0;
 319
 320unmap_out:
 321        dma_unmap_sg(dd->dev, dd->out_sg, dd->nb_out_sg,
 322                DMA_FROM_DEVICE);
 323unmap_in:
 324        dma_unmap_sg(dd->dev, dd->in_sg, dd->nb_in_sg,
 325                DMA_TO_DEVICE);
 326exit_err:
 327        return -EINVAL;
 328}
 329
 330static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
 331{
 332        dd->flags &= ~AES_FLAGS_DMA;
 333
 334        /* use cache buffers */
 335        dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
 336        if (!dd->nb_in_sg)
 337                return -EINVAL;
 338
 339        dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
 340        if (!dd->nb_in_sg)
 341                return -EINVAL;
 342
 343        dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
 344                                        dd->buf_in, dd->total);
 345
 346        if (!dd->bufcnt)
 347                return -EINVAL;
 348
 349        dd->total -= dd->bufcnt;
 350
 351        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 352        atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
 353                                dd->bufcnt >> 2);
 354
 355        return 0;
 356}
 357
 358static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
 359{
 360        int err;
 361
 362        if (dd->flags & AES_FLAGS_CFB8) {
 363                dd->dma_lch_in.dma_conf.dst_addr_width =
 364                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 365                dd->dma_lch_out.dma_conf.src_addr_width =
 366                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 367        } else if (dd->flags & AES_FLAGS_CFB16) {
 368                dd->dma_lch_in.dma_conf.dst_addr_width =
 369                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 370                dd->dma_lch_out.dma_conf.src_addr_width =
 371                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 372        } else {
 373                dd->dma_lch_in.dma_conf.dst_addr_width =
 374                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 375                dd->dma_lch_out.dma_conf.src_addr_width =
 376                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 377        }
 378
 379        dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 380        dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 381
 382        dd->flags |= AES_FLAGS_DMA;
 383        err = atmel_aes_crypt_dma(dd);
 384
 385        return err;
 386}
 387
 388static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
 389{
 390        int err;
 391        u32 valcr = 0, valmr = 0;
 392
 393        err = atmel_aes_hw_init(dd);
 394
 395        if (err)
 396                return err;
 397
 398        /* MR register must be set before IV registers */
 399        if (dd->ctx->keylen == AES_KEYSIZE_128)
 400                valmr |= AES_MR_KEYSIZE_128;
 401        else if (dd->ctx->keylen == AES_KEYSIZE_192)
 402                valmr |= AES_MR_KEYSIZE_192;
 403        else
 404                valmr |= AES_MR_KEYSIZE_256;
 405
 406        if (dd->flags & AES_FLAGS_CBC) {
 407                valmr |= AES_MR_OPMOD_CBC;
 408        } else if (dd->flags & AES_FLAGS_CFB) {
 409                valmr |= AES_MR_OPMOD_CFB;
 410                if (dd->flags & AES_FLAGS_CFB8)
 411                        valmr |= AES_MR_CFBS_8b;
 412                else if (dd->flags & AES_FLAGS_CFB16)
 413                        valmr |= AES_MR_CFBS_16b;
 414                else if (dd->flags & AES_FLAGS_CFB32)
 415                        valmr |= AES_MR_CFBS_32b;
 416                else if (dd->flags & AES_FLAGS_CFB64)
 417                        valmr |= AES_MR_CFBS_64b;
 418        } else if (dd->flags & AES_FLAGS_OFB) {
 419                valmr |= AES_MR_OPMOD_OFB;
 420        } else if (dd->flags & AES_FLAGS_CTR) {
 421                valmr |= AES_MR_OPMOD_CTR;
 422        } else {
 423                valmr |= AES_MR_OPMOD_ECB;
 424        }
 425
 426        if (dd->flags & AES_FLAGS_ENCRYPT)
 427                valmr |= AES_MR_CYPHER_ENC;
 428
 429        if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
 430                valmr |= AES_MR_SMOD_IDATAR0;
 431                if (dd->flags & AES_FLAGS_DUALBUFF)
 432                        valmr |= AES_MR_DUALBUFF;
 433        } else {
 434                valmr |= AES_MR_SMOD_AUTO;
 435        }
 436
 437        atmel_aes_write(dd, AES_CR, valcr);
 438        atmel_aes_write(dd, AES_MR, valmr);
 439
 440        atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
 441                                                dd->ctx->keylen >> 2);
 442
 443        if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
 444           (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
 445           dd->req->info) {
 446                atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
 447        }
 448
 449        return 0;
 450}
 451
 452static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 453                               struct ablkcipher_request *req)
 454{
 455        struct crypto_async_request *async_req, *backlog;
 456        struct atmel_aes_ctx *ctx;
 457        struct atmel_aes_reqctx *rctx;
 458        unsigned long flags;
 459        int err, ret = 0;
 460
 461        spin_lock_irqsave(&dd->lock, flags);
 462        if (req)
 463                ret = ablkcipher_enqueue_request(&dd->queue, req);
 464        if (dd->flags & AES_FLAGS_BUSY) {
 465                spin_unlock_irqrestore(&dd->lock, flags);
 466                return ret;
 467        }
 468        backlog = crypto_get_backlog(&dd->queue);
 469        async_req = crypto_dequeue_request(&dd->queue);
 470        if (async_req)
 471                dd->flags |= AES_FLAGS_BUSY;
 472        spin_unlock_irqrestore(&dd->lock, flags);
 473
 474        if (!async_req)
 475                return ret;
 476
 477        if (backlog)
 478                backlog->complete(backlog, -EINPROGRESS);
 479
 480        req = ablkcipher_request_cast(async_req);
 481
 482        /* assign new request to device */
 483        dd->req = req;
 484        dd->total = req->nbytes;
 485        dd->in_sg = req->src;
 486        dd->out_sg = req->dst;
 487
 488        rctx = ablkcipher_request_ctx(req);
 489        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 490        rctx->mode &= AES_FLAGS_MODE_MASK;
 491        dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
 492        dd->ctx = ctx;
 493        ctx->dd = dd;
 494
 495        err = atmel_aes_write_ctrl(dd);
 496        if (!err) {
 497                if (dd->total > ATMEL_AES_DMA_THRESHOLD)
 498                        err = atmel_aes_crypt_dma_start(dd);
 499                else
 500                        err = atmel_aes_crypt_cpu_start(dd);
 501        }
 502        if (err) {
 503                /* aes_task will not finish it, so do it here */
 504                atmel_aes_finish_req(dd, err);
 505                tasklet_schedule(&dd->queue_task);
 506        }
 507
 508        return ret;
 509}
 510
 511static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
 512{
 513        int err = -EINVAL;
 514
 515        if (dd->flags & AES_FLAGS_DMA) {
 516                dma_unmap_sg(dd->dev, dd->out_sg,
 517                        dd->nb_out_sg, DMA_FROM_DEVICE);
 518                dma_unmap_sg(dd->dev, dd->in_sg,
 519                        dd->nb_in_sg, DMA_TO_DEVICE);
 520                err = 0;
 521        }
 522
 523        return err;
 524}
 525
 526static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 527{
 528        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
 529                        crypto_ablkcipher_reqtfm(req));
 530        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 531        struct atmel_aes_dev *dd;
 532
 533        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 534                pr_err("request size is not exact amount of AES blocks\n");
 535                return -EINVAL;
 536        }
 537
 538        dd = atmel_aes_find_dev(ctx);
 539        if (!dd)
 540                return -ENODEV;
 541
 542        rctx->mode = mode;
 543
 544        return atmel_aes_handle_queue(dd, req);
 545}
 546
 547static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
 548{
 549        struct at_dma_slave     *sl = slave;
 550
 551        if (sl && sl->dma_dev == chan->device->dev) {
 552                chan->private = sl;
 553                return true;
 554        } else {
 555                return false;
 556        }
 557}
 558
 559static int atmel_aes_dma_init(struct atmel_aes_dev *dd)
 560{
 561        int err = -ENOMEM;
 562        struct aes_platform_data        *pdata;
 563        dma_cap_mask_t mask_in, mask_out;
 564
 565        pdata = dd->dev->platform_data;
 566
 567        if (pdata && pdata->dma_slave->txdata.dma_dev &&
 568                pdata->dma_slave->rxdata.dma_dev) {
 569
 570                /* Try to grab 2 DMA channels */
 571                dma_cap_zero(mask_in);
 572                dma_cap_set(DMA_SLAVE, mask_in);
 573
 574                dd->dma_lch_in.chan = dma_request_channel(mask_in,
 575                                atmel_aes_filter, &pdata->dma_slave->rxdata);
 576                if (!dd->dma_lch_in.chan)
 577                        goto err_dma_in;
 578
 579                dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
 580                dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 581                        AES_IDATAR(0);
 582                dd->dma_lch_in.dma_conf.src_maxburst = 1;
 583                dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 584                dd->dma_lch_in.dma_conf.device_fc = false;
 585
 586                dma_cap_zero(mask_out);
 587                dma_cap_set(DMA_SLAVE, mask_out);
 588                dd->dma_lch_out.chan = dma_request_channel(mask_out,
 589                                atmel_aes_filter, &pdata->dma_slave->txdata);
 590                if (!dd->dma_lch_out.chan)
 591                        goto err_dma_out;
 592
 593                dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
 594                dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 595                        AES_ODATAR(0);
 596                dd->dma_lch_out.dma_conf.src_maxburst = 1;
 597                dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 598                dd->dma_lch_out.dma_conf.device_fc = false;
 599
 600                return 0;
 601        } else {
 602                return -ENODEV;
 603        }
 604
 605err_dma_out:
 606        dma_release_channel(dd->dma_lch_in.chan);
 607err_dma_in:
 608        return err;
 609}
 610
 611static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
 612{
 613        dma_release_channel(dd->dma_lch_in.chan);
 614        dma_release_channel(dd->dma_lch_out.chan);
 615}
 616
 617static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 618                           unsigned int keylen)
 619{
 620        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 621
 622        if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 623                   keylen != AES_KEYSIZE_256) {
 624                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 625                return -EINVAL;
 626        }
 627
 628        memcpy(ctx->key, key, keylen);
 629        ctx->keylen = keylen;
 630
 631        return 0;
 632}
 633
 634static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
 635{
 636        return atmel_aes_crypt(req,
 637                AES_FLAGS_ENCRYPT);
 638}
 639
 640static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
 641{
 642        return atmel_aes_crypt(req,
 643                0);
 644}
 645
 646static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
 647{
 648        return atmel_aes_crypt(req,
 649                AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
 650}
 651
 652static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
 653{
 654        return atmel_aes_crypt(req,
 655                AES_FLAGS_CBC);
 656}
 657
 658static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
 659{
 660        return atmel_aes_crypt(req,
 661                AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
 662}
 663
 664static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
 665{
 666        return atmel_aes_crypt(req,
 667                AES_FLAGS_OFB);
 668}
 669
 670static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
 671{
 672        return atmel_aes_crypt(req,
 673                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB);
 674}
 675
 676static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
 677{
 678        return atmel_aes_crypt(req,
 679                AES_FLAGS_CFB);
 680}
 681
 682static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
 683{
 684        return atmel_aes_crypt(req,
 685                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
 686}
 687
 688static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
 689{
 690        return atmel_aes_crypt(req,
 691                AES_FLAGS_CFB | AES_FLAGS_CFB64);
 692}
 693
 694static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
 695{
 696        return atmel_aes_crypt(req,
 697                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
 698}
 699
 700static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
 701{
 702        return atmel_aes_crypt(req,
 703                AES_FLAGS_CFB | AES_FLAGS_CFB32);
 704}
 705
 706static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
 707{
 708        return atmel_aes_crypt(req,
 709                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
 710}
 711
 712static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
 713{
 714        return atmel_aes_crypt(req,
 715                AES_FLAGS_CFB | AES_FLAGS_CFB16);
 716}
 717
 718static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
 719{
 720        return atmel_aes_crypt(req,
 721                AES_FLAGS_ENCRYPT |     AES_FLAGS_CFB | AES_FLAGS_CFB8);
 722}
 723
 724static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
 725{
 726        return atmel_aes_crypt(req,
 727                AES_FLAGS_CFB | AES_FLAGS_CFB8);
 728}
 729
 730static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
 731{
 732        return atmel_aes_crypt(req,
 733                AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
 734}
 735
 736static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
 737{
 738        return atmel_aes_crypt(req,
 739                AES_FLAGS_CTR);
 740}
 741
 742static int atmel_aes_cra_init(struct crypto_tfm *tfm)
 743{
 744        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 745
 746        return 0;
 747}
 748
 749static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
 750{
 751}
 752
 753static struct crypto_alg aes_algs[] = {
 754{
 755        .cra_name               = "ecb(aes)",
 756        .cra_driver_name        = "atmel-ecb-aes",
 757        .cra_priority           = 100,
 758        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 759        .cra_blocksize          = AES_BLOCK_SIZE,
 760        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 761        .cra_alignmask          = 0x0,
 762        .cra_type               = &crypto_ablkcipher_type,
 763        .cra_module             = THIS_MODULE,
 764        .cra_init               = atmel_aes_cra_init,
 765        .cra_exit               = atmel_aes_cra_exit,
 766        .cra_u.ablkcipher = {
 767                .min_keysize    = AES_MIN_KEY_SIZE,
 768                .max_keysize    = AES_MAX_KEY_SIZE,
 769                .setkey         = atmel_aes_setkey,
 770                .encrypt        = atmel_aes_ecb_encrypt,
 771                .decrypt        = atmel_aes_ecb_decrypt,
 772        }
 773},
 774{
 775        .cra_name               = "cbc(aes)",
 776        .cra_driver_name        = "atmel-cbc-aes",
 777        .cra_priority           = 100,
 778        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 779        .cra_blocksize          = AES_BLOCK_SIZE,
 780        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 781        .cra_alignmask          = 0x0,
 782        .cra_type               = &crypto_ablkcipher_type,
 783        .cra_module             = THIS_MODULE,
 784        .cra_init               = atmel_aes_cra_init,
 785        .cra_exit               = atmel_aes_cra_exit,
 786        .cra_u.ablkcipher = {
 787                .min_keysize    = AES_MIN_KEY_SIZE,
 788                .max_keysize    = AES_MAX_KEY_SIZE,
 789                .ivsize         = AES_BLOCK_SIZE,
 790                .setkey         = atmel_aes_setkey,
 791                .encrypt        = atmel_aes_cbc_encrypt,
 792                .decrypt        = atmel_aes_cbc_decrypt,
 793        }
 794},
 795{
 796        .cra_name               = "ofb(aes)",
 797        .cra_driver_name        = "atmel-ofb-aes",
 798        .cra_priority           = 100,
 799        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 800        .cra_blocksize          = AES_BLOCK_SIZE,
 801        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 802        .cra_alignmask          = 0x0,
 803        .cra_type               = &crypto_ablkcipher_type,
 804        .cra_module             = THIS_MODULE,
 805        .cra_init               = atmel_aes_cra_init,
 806        .cra_exit               = atmel_aes_cra_exit,
 807        .cra_u.ablkcipher = {
 808                .min_keysize    = AES_MIN_KEY_SIZE,
 809                .max_keysize    = AES_MAX_KEY_SIZE,
 810                .ivsize         = AES_BLOCK_SIZE,
 811                .setkey         = atmel_aes_setkey,
 812                .encrypt        = atmel_aes_ofb_encrypt,
 813                .decrypt        = atmel_aes_ofb_decrypt,
 814        }
 815},
 816{
 817        .cra_name               = "cfb(aes)",
 818        .cra_driver_name        = "atmel-cfb-aes",
 819        .cra_priority           = 100,
 820        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 821        .cra_blocksize          = AES_BLOCK_SIZE,
 822        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 823        .cra_alignmask          = 0x0,
 824        .cra_type               = &crypto_ablkcipher_type,
 825        .cra_module             = THIS_MODULE,
 826        .cra_init               = atmel_aes_cra_init,
 827        .cra_exit               = atmel_aes_cra_exit,
 828        .cra_u.ablkcipher = {
 829                .min_keysize    = AES_MIN_KEY_SIZE,
 830                .max_keysize    = AES_MAX_KEY_SIZE,
 831                .ivsize         = AES_BLOCK_SIZE,
 832                .setkey         = atmel_aes_setkey,
 833                .encrypt        = atmel_aes_cfb_encrypt,
 834                .decrypt        = atmel_aes_cfb_decrypt,
 835        }
 836},
 837{
 838        .cra_name               = "cfb32(aes)",
 839        .cra_driver_name        = "atmel-cfb32-aes",
 840        .cra_priority           = 100,
 841        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 842        .cra_blocksize          = CFB32_BLOCK_SIZE,
 843        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 844        .cra_alignmask          = 0x0,
 845        .cra_type               = &crypto_ablkcipher_type,
 846        .cra_module             = THIS_MODULE,
 847        .cra_init               = atmel_aes_cra_init,
 848        .cra_exit               = atmel_aes_cra_exit,
 849        .cra_u.ablkcipher = {
 850                .min_keysize    = AES_MIN_KEY_SIZE,
 851                .max_keysize    = AES_MAX_KEY_SIZE,
 852                .ivsize         = AES_BLOCK_SIZE,
 853                .setkey         = atmel_aes_setkey,
 854                .encrypt        = atmel_aes_cfb32_encrypt,
 855                .decrypt        = atmel_aes_cfb32_decrypt,
 856        }
 857},
 858{
 859        .cra_name               = "cfb16(aes)",
 860        .cra_driver_name        = "atmel-cfb16-aes",
 861        .cra_priority           = 100,
 862        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 863        .cra_blocksize          = CFB16_BLOCK_SIZE,
 864        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 865        .cra_alignmask          = 0x0,
 866        .cra_type               = &crypto_ablkcipher_type,
 867        .cra_module             = THIS_MODULE,
 868        .cra_init               = atmel_aes_cra_init,
 869        .cra_exit               = atmel_aes_cra_exit,
 870        .cra_u.ablkcipher = {
 871                .min_keysize    = AES_MIN_KEY_SIZE,
 872                .max_keysize    = AES_MAX_KEY_SIZE,
 873                .ivsize         = AES_BLOCK_SIZE,
 874                .setkey         = atmel_aes_setkey,
 875                .encrypt        = atmel_aes_cfb16_encrypt,
 876                .decrypt        = atmel_aes_cfb16_decrypt,
 877        }
 878},
 879{
 880        .cra_name               = "cfb8(aes)",
 881        .cra_driver_name        = "atmel-cfb8-aes",
 882        .cra_priority           = 100,
 883        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 884        .cra_blocksize          = CFB64_BLOCK_SIZE,
 885        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 886        .cra_alignmask          = 0x0,
 887        .cra_type               = &crypto_ablkcipher_type,
 888        .cra_module             = THIS_MODULE,
 889        .cra_init               = atmel_aes_cra_init,
 890        .cra_exit               = atmel_aes_cra_exit,
 891        .cra_u.ablkcipher = {
 892                .min_keysize    = AES_MIN_KEY_SIZE,
 893                .max_keysize    = AES_MAX_KEY_SIZE,
 894                .ivsize         = AES_BLOCK_SIZE,
 895                .setkey         = atmel_aes_setkey,
 896                .encrypt        = atmel_aes_cfb8_encrypt,
 897                .decrypt        = atmel_aes_cfb8_decrypt,
 898        }
 899},
 900{
 901        .cra_name               = "ctr(aes)",
 902        .cra_driver_name        = "atmel-ctr-aes",
 903        .cra_priority           = 100,
 904        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 905        .cra_blocksize          = AES_BLOCK_SIZE,
 906        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 907        .cra_alignmask          = 0x0,
 908        .cra_type               = &crypto_ablkcipher_type,
 909        .cra_module             = THIS_MODULE,
 910        .cra_init               = atmel_aes_cra_init,
 911        .cra_exit               = atmel_aes_cra_exit,
 912        .cra_u.ablkcipher = {
 913                .min_keysize    = AES_MIN_KEY_SIZE,
 914                .max_keysize    = AES_MAX_KEY_SIZE,
 915                .ivsize         = AES_BLOCK_SIZE,
 916                .setkey         = atmel_aes_setkey,
 917                .encrypt        = atmel_aes_ctr_encrypt,
 918                .decrypt        = atmel_aes_ctr_decrypt,
 919        }
 920},
 921};
 922
 923static struct crypto_alg aes_cfb64_alg[] = {
 924{
 925        .cra_name               = "cfb64(aes)",
 926        .cra_driver_name        = "atmel-cfb64-aes",
 927        .cra_priority           = 100,
 928        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 929        .cra_blocksize          = CFB64_BLOCK_SIZE,
 930        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 931        .cra_alignmask          = 0x0,
 932        .cra_type               = &crypto_ablkcipher_type,
 933        .cra_module             = THIS_MODULE,
 934        .cra_init               = atmel_aes_cra_init,
 935        .cra_exit               = atmel_aes_cra_exit,
 936        .cra_u.ablkcipher = {
 937                .min_keysize    = AES_MIN_KEY_SIZE,
 938                .max_keysize    = AES_MAX_KEY_SIZE,
 939                .ivsize         = AES_BLOCK_SIZE,
 940                .setkey         = atmel_aes_setkey,
 941                .encrypt        = atmel_aes_cfb64_encrypt,
 942                .decrypt        = atmel_aes_cfb64_decrypt,
 943        }
 944},
 945};
 946
 947static void atmel_aes_queue_task(unsigned long data)
 948{
 949        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
 950
 951        atmel_aes_handle_queue(dd, NULL);
 952}
 953
 954static void atmel_aes_done_task(unsigned long data)
 955{
 956        struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
 957        int err;
 958
 959        if (!(dd->flags & AES_FLAGS_DMA)) {
 960                atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
 961                                dd->bufcnt >> 2);
 962
 963                if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
 964                        dd->buf_out, dd->bufcnt))
 965                        err = 0;
 966                else
 967                        err = -EINVAL;
 968
 969                goto cpu_end;
 970        }
 971
 972        err = atmel_aes_crypt_dma_stop(dd);
 973
 974        err = dd->err ? : err;
 975
 976        if (dd->total && !err) {
 977                err = atmel_aes_crypt_dma_start(dd);
 978                if (!err)
 979                        return; /* DMA started. Not fininishing. */
 980        }
 981
 982cpu_end:
 983        atmel_aes_finish_req(dd, err);
 984        atmel_aes_handle_queue(dd, NULL);
 985}
 986
 987static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
 988{
 989        struct atmel_aes_dev *aes_dd = dev_id;
 990        u32 reg;
 991
 992        reg = atmel_aes_read(aes_dd, AES_ISR);
 993        if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
 994                atmel_aes_write(aes_dd, AES_IDR, reg);
 995                if (AES_FLAGS_BUSY & aes_dd->flags)
 996                        tasklet_schedule(&aes_dd->done_task);
 997                else
 998                        dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
 999                return IRQ_HANDLED;
1000        }
1001
1002        return IRQ_NONE;
1003}
1004
1005static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1006{
1007        int i;
1008
1009        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1010                crypto_unregister_alg(&aes_algs[i]);
1011        if (dd->hw_version >= 0x130)
1012                crypto_unregister_alg(&aes_cfb64_alg[0]);
1013}
1014
1015static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1016{
1017        int err, i, j;
1018
1019        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1020                INIT_LIST_HEAD(&aes_algs[i].cra_list);
1021                err = crypto_register_alg(&aes_algs[i]);
1022                if (err)
1023                        goto err_aes_algs;
1024        }
1025
1026        atmel_aes_hw_version_init(dd);
1027
1028        if (dd->hw_version >= 0x130) {
1029                INIT_LIST_HEAD(&aes_cfb64_alg[0].cra_list);
1030                err = crypto_register_alg(&aes_cfb64_alg[0]);
1031                if (err)
1032                        goto err_aes_cfb64_alg;
1033        }
1034
1035        return 0;
1036
1037err_aes_cfb64_alg:
1038        i = ARRAY_SIZE(aes_algs);
1039err_aes_algs:
1040        for (j = 0; j < i; j++)
1041                crypto_unregister_alg(&aes_algs[j]);
1042
1043        return err;
1044}
1045
1046static int __devinit atmel_aes_probe(struct platform_device *pdev)
1047{
1048        struct atmel_aes_dev *aes_dd;
1049        struct aes_platform_data        *pdata;
1050        struct device *dev = &pdev->dev;
1051        struct resource *aes_res;
1052        unsigned long aes_phys_size;
1053        int err;
1054
1055        pdata = pdev->dev.platform_data;
1056        if (!pdata) {
1057                err = -ENXIO;
1058                goto aes_dd_err;
1059        }
1060
1061        aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1062        if (aes_dd == NULL) {
1063                dev_err(dev, "unable to alloc data struct.\n");
1064                err = -ENOMEM;
1065                goto aes_dd_err;
1066        }
1067
1068        aes_dd->dev = dev;
1069
1070        platform_set_drvdata(pdev, aes_dd);
1071
1072        INIT_LIST_HEAD(&aes_dd->list);
1073
1074        tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1075                                        (unsigned long)aes_dd);
1076        tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1077                                        (unsigned long)aes_dd);
1078
1079        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1080
1081        aes_dd->irq = -1;
1082
1083        /* Get the base address */
1084        aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1085        if (!aes_res) {
1086                dev_err(dev, "no MEM resource info\n");
1087                err = -ENODEV;
1088                goto res_err;
1089        }
1090        aes_dd->phys_base = aes_res->start;
1091        aes_phys_size = resource_size(aes_res);
1092
1093        /* Get the IRQ */
1094        aes_dd->irq = platform_get_irq(pdev,  0);
1095        if (aes_dd->irq < 0) {
1096                dev_err(dev, "no IRQ resource info\n");
1097                err = aes_dd->irq;
1098                goto aes_irq_err;
1099        }
1100
1101        err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1102                                                aes_dd);
1103        if (err) {
1104                dev_err(dev, "unable to request aes irq.\n");
1105                goto aes_irq_err;
1106        }
1107
1108        /* Initializing the clock */
1109        aes_dd->iclk = clk_get(&pdev->dev, NULL);
1110        if (IS_ERR(aes_dd->iclk)) {
1111                dev_err(dev, "clock intialization failed.\n");
1112                err = PTR_ERR(aes_dd->iclk);
1113                goto clk_err;
1114        }
1115
1116        aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1117        if (!aes_dd->io_base) {
1118                dev_err(dev, "can't ioremap\n");
1119                err = -ENOMEM;
1120                goto aes_io_err;
1121        }
1122
1123        err = atmel_aes_dma_init(aes_dd);
1124        if (err)
1125                goto err_aes_dma;
1126
1127        spin_lock(&atmel_aes.lock);
1128        list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1129        spin_unlock(&atmel_aes.lock);
1130
1131        err = atmel_aes_register_algs(aes_dd);
1132        if (err)
1133                goto err_algs;
1134
1135        dev_info(dev, "Atmel AES\n");
1136
1137        return 0;
1138
1139err_algs:
1140        spin_lock(&atmel_aes.lock);
1141        list_del(&aes_dd->list);
1142        spin_unlock(&atmel_aes.lock);
1143        atmel_aes_dma_cleanup(aes_dd);
1144err_aes_dma:
1145        iounmap(aes_dd->io_base);
1146aes_io_err:
1147        clk_put(aes_dd->iclk);
1148clk_err:
1149        free_irq(aes_dd->irq, aes_dd);
1150aes_irq_err:
1151res_err:
1152        tasklet_kill(&aes_dd->done_task);
1153        tasklet_kill(&aes_dd->queue_task);
1154        kfree(aes_dd);
1155        aes_dd = NULL;
1156aes_dd_err:
1157        dev_err(dev, "initialization failed.\n");
1158
1159        return err;
1160}
1161
1162static int __devexit atmel_aes_remove(struct platform_device *pdev)
1163{
1164        static struct atmel_aes_dev *aes_dd;
1165
1166        aes_dd = platform_get_drvdata(pdev);
1167        if (!aes_dd)
1168                return -ENODEV;
1169        spin_lock(&atmel_aes.lock);
1170        list_del(&aes_dd->list);
1171        spin_unlock(&atmel_aes.lock);
1172
1173        atmel_aes_unregister_algs(aes_dd);
1174
1175        tasklet_kill(&aes_dd->done_task);
1176        tasklet_kill(&aes_dd->queue_task);
1177
1178        atmel_aes_dma_cleanup(aes_dd);
1179
1180        iounmap(aes_dd->io_base);
1181
1182        clk_put(aes_dd->iclk);
1183
1184        if (aes_dd->irq > 0)
1185                free_irq(aes_dd->irq, aes_dd);
1186
1187        kfree(aes_dd);
1188        aes_dd = NULL;
1189
1190        return 0;
1191}
1192
1193static struct platform_driver atmel_aes_driver = {
1194        .probe          = atmel_aes_probe,
1195        .remove         = __devexit_p(atmel_aes_remove),
1196        .driver         = {
1197                .name   = "atmel_aes",
1198                .owner  = THIS_MODULE,
1199        },
1200};
1201
1202module_platform_driver(atmel_aes_driver);
1203
1204MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1205MODULE_LICENSE("GPL v2");
1206MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1207
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.