linux/drivers/crypto/atmel-aes.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for ATMEL AES HW acceleration.
   5 *
   6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
   7 * Author: Nicolas Royer <nicolas@eukrea.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from omap-aes.c driver.
  14 */
  15
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/err.h>
  21#include <linux/clk.h>
  22#include <linux/io.h>
  23#include <linux/hw_random.h>
  24#include <linux/platform_device.h>
  25
  26#include <linux/device.h>
  27#include <linux/init.h>
  28#include <linux/errno.h>
  29#include <linux/interrupt.h>
  30#include <linux/irq.h>
  31#include <linux/scatterlist.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/delay.h>
  34#include <linux/crypto.h>
  35#include <linux/cryptohash.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/aes.h>
  39#include <crypto/hash.h>
  40#include <crypto/internal/hash.h>
  41#include <linux/platform_data/crypto-atmel.h>
  42#include "atmel-aes-regs.h"
  43
  44#define CFB8_BLOCK_SIZE         1
  45#define CFB16_BLOCK_SIZE        2
  46#define CFB32_BLOCK_SIZE        4
  47#define CFB64_BLOCK_SIZE        8
  48
  49/* AES flags */
  50#define AES_FLAGS_MODE_MASK     0x03ff
  51#define AES_FLAGS_ENCRYPT       BIT(0)
  52#define AES_FLAGS_CBC           BIT(1)
  53#define AES_FLAGS_CFB           BIT(2)
  54#define AES_FLAGS_CFB8          BIT(3)
  55#define AES_FLAGS_CFB16         BIT(4)
  56#define AES_FLAGS_CFB32         BIT(5)
  57#define AES_FLAGS_CFB64         BIT(6)
  58#define AES_FLAGS_CFB128        BIT(7)
  59#define AES_FLAGS_OFB           BIT(8)
  60#define AES_FLAGS_CTR           BIT(9)
  61
  62#define AES_FLAGS_INIT          BIT(16)
  63#define AES_FLAGS_DMA           BIT(17)
  64#define AES_FLAGS_BUSY          BIT(18)
  65#define AES_FLAGS_FAST          BIT(19)
  66
  67#define ATMEL_AES_QUEUE_LENGTH  50
  68
  69#define ATMEL_AES_DMA_THRESHOLD         16
  70
  71
  72struct atmel_aes_caps {
  73        bool    has_dualbuff;
  74        bool    has_cfb64;
  75        u32             max_burst_size;
  76};
  77
  78struct atmel_aes_dev;
  79
  80struct atmel_aes_ctx {
  81        struct atmel_aes_dev *dd;
  82
  83        int             keylen;
  84        u32             key[AES_KEYSIZE_256 / sizeof(u32)];
  85
  86        u16             block_size;
  87};
  88
  89struct atmel_aes_reqctx {
  90        unsigned long mode;
  91};
  92
  93struct atmel_aes_dma {
  94        struct dma_chan                 *chan;
  95        struct dma_slave_config dma_conf;
  96};
  97
  98struct atmel_aes_dev {
  99        struct list_head        list;
 100        unsigned long           phys_base;
 101        void __iomem            *io_base;
 102
 103        struct atmel_aes_ctx    *ctx;
 104        struct device           *dev;
 105        struct clk              *iclk;
 106        int     irq;
 107
 108        unsigned long           flags;
 109        int     err;
 110
 111        spinlock_t              lock;
 112        struct crypto_queue     queue;
 113
 114        struct tasklet_struct   done_task;
 115        struct tasklet_struct   queue_task;
 116
 117        struct ablkcipher_request       *req;
 118        size_t  total;
 119
 120        struct scatterlist      *in_sg;
 121        unsigned int            nb_in_sg;
 122        size_t                          in_offset;
 123        struct scatterlist      *out_sg;
 124        unsigned int            nb_out_sg;
 125        size_t                          out_offset;
 126
 127        size_t  bufcnt;
 128        size_t  buflen;
 129        size_t  dma_size;
 130
 131        void    *buf_in;
 132        int             dma_in;
 133        dma_addr_t      dma_addr_in;
 134        struct atmel_aes_dma    dma_lch_in;
 135
 136        void    *buf_out;
 137        int             dma_out;
 138        dma_addr_t      dma_addr_out;
 139        struct atmel_aes_dma    dma_lch_out;
 140
 141        struct atmel_aes_caps   caps;
 142
 143        u32     hw_version;
 144};
 145
 146struct atmel_aes_drv {
 147        struct list_head        dev_list;
 148        spinlock_t              lock;
 149};
 150
 151static struct atmel_aes_drv atmel_aes = {
 152        .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list),
 153        .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock),
 154};
 155
 156static int atmel_aes_sg_length(struct ablkcipher_request *req,
 157                        struct scatterlist *sg)
 158{
 159        unsigned int total = req->nbytes;
 160        int sg_nb;
 161        unsigned int len;
 162        struct scatterlist *sg_list;
 163
 164        sg_nb = 0;
 165        sg_list = sg;
 166        total = req->nbytes;
 167
 168        while (total) {
 169                len = min(sg_list->length, total);
 170
 171                sg_nb++;
 172                total -= len;
 173
 174                sg_list = sg_next(sg_list);
 175                if (!sg_list)
 176                        total = 0;
 177        }
 178
 179        return sg_nb;
 180}
 181
 182static int atmel_aes_sg_copy(struct scatterlist **sg, size_t *offset,
 183                        void *buf, size_t buflen, size_t total, int out)
 184{
 185        unsigned int count, off = 0;
 186
 187        while (buflen && total) {
 188                count = min((*sg)->length - *offset, total);
 189                count = min(count, buflen);
 190
 191                if (!count)
 192                        return off;
 193
 194                scatterwalk_map_and_copy(buf + off, *sg, *offset, count, out);
 195
 196                off += count;
 197                buflen -= count;
 198                *offset += count;
 199                total -= count;
 200
 201                if (*offset == (*sg)->length) {
 202                        *sg = sg_next(*sg);
 203                        if (*sg)
 204                                *offset = 0;
 205                        else
 206                                total = 0;
 207                }
 208        }
 209
 210        return off;
 211}
 212
 213static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset)
 214{
 215        return readl_relaxed(dd->io_base + offset);
 216}
 217
 218static inline void atmel_aes_write(struct atmel_aes_dev *dd,
 219                                        u32 offset, u32 value)
 220{
 221        writel_relaxed(value, dd->io_base + offset);
 222}
 223
 224static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset,
 225                                        u32 *value, int count)
 226{
 227        for (; count--; value++, offset += 4)
 228                *value = atmel_aes_read(dd, offset);
 229}
 230
 231static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset,
 232                                        u32 *value, int count)
 233{
 234        for (; count--; value++, offset += 4)
 235                atmel_aes_write(dd, offset, *value);
 236}
 237
 238static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
 239{
 240        struct atmel_aes_dev *aes_dd = NULL;
 241        struct atmel_aes_dev *tmp;
 242
 243        spin_lock_bh(&atmel_aes.lock);
 244        if (!ctx->dd) {
 245                list_for_each_entry(tmp, &atmel_aes.dev_list, list) {
 246                        aes_dd = tmp;
 247                        break;
 248                }
 249                ctx->dd = aes_dd;
 250        } else {
 251                aes_dd = ctx->dd;
 252        }
 253
 254        spin_unlock_bh(&atmel_aes.lock);
 255
 256        return aes_dd;
 257}
 258
 259static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 260{
 261        clk_prepare_enable(dd->iclk);
 262
 263        if (!(dd->flags & AES_FLAGS_INIT)) {
 264                atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
 265                atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET);
 266                dd->flags |= AES_FLAGS_INIT;
 267                dd->err = 0;
 268        }
 269
 270        return 0;
 271}
 272
 273static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd)
 274{
 275        return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff;
 276}
 277
 278static void atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 279{
 280        atmel_aes_hw_init(dd);
 281
 282        dd->hw_version = atmel_aes_get_version(dd);
 283
 284        dev_info(dd->dev,
 285                        "version: 0x%x\n", dd->hw_version);
 286
 287        clk_disable_unprepare(dd->iclk);
 288}
 289
 290static void atmel_aes_finish_req(struct atmel_aes_dev *dd, int err)
 291{
 292        struct ablkcipher_request *req = dd->req;
 293
 294        clk_disable_unprepare(dd->iclk);
 295        dd->flags &= ~AES_FLAGS_BUSY;
 296
 297        req->base.complete(&req->base, err);
 298}
 299
 300static void atmel_aes_dma_callback(void *data)
 301{
 302        struct atmel_aes_dev *dd = data;
 303
 304        /* dma_lch_out - completed */
 305        tasklet_schedule(&dd->done_task);
 306}
 307
 308static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
 309                dma_addr_t dma_addr_in, dma_addr_t dma_addr_out, int length)
 310{
 311        struct scatterlist sg[2];
 312        struct dma_async_tx_descriptor  *in_desc, *out_desc;
 313
 314        dd->dma_size = length;
 315
 316        if (!(dd->flags & AES_FLAGS_FAST)) {
 317                dma_sync_single_for_device(dd->dev, dma_addr_in, length,
 318                                           DMA_TO_DEVICE);
 319        }
 320
 321        if (dd->flags & AES_FLAGS_CFB8) {
 322                dd->dma_lch_in.dma_conf.dst_addr_width =
 323                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 324                dd->dma_lch_out.dma_conf.src_addr_width =
 325                        DMA_SLAVE_BUSWIDTH_1_BYTE;
 326        } else if (dd->flags & AES_FLAGS_CFB16) {
 327                dd->dma_lch_in.dma_conf.dst_addr_width =
 328                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 329                dd->dma_lch_out.dma_conf.src_addr_width =
 330                        DMA_SLAVE_BUSWIDTH_2_BYTES;
 331        } else {
 332                dd->dma_lch_in.dma_conf.dst_addr_width =
 333                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 334                dd->dma_lch_out.dma_conf.src_addr_width =
 335                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 336        }
 337
 338        if (dd->flags & (AES_FLAGS_CFB8 | AES_FLAGS_CFB16 |
 339                        AES_FLAGS_CFB32 | AES_FLAGS_CFB64)) {
 340                dd->dma_lch_in.dma_conf.src_maxburst = 1;
 341                dd->dma_lch_in.dma_conf.dst_maxburst = 1;
 342                dd->dma_lch_out.dma_conf.src_maxburst = 1;
 343                dd->dma_lch_out.dma_conf.dst_maxburst = 1;
 344        } else {
 345                dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
 346                dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 347                dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
 348                dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 349        }
 350
 351        dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf);
 352        dmaengine_slave_config(dd->dma_lch_out.chan, &dd->dma_lch_out.dma_conf);
 353
 354        dd->flags |= AES_FLAGS_DMA;
 355
 356        sg_init_table(&sg[0], 1);
 357        sg_dma_address(&sg[0]) = dma_addr_in;
 358        sg_dma_len(&sg[0]) = length;
 359
 360        sg_init_table(&sg[1], 1);
 361        sg_dma_address(&sg[1]) = dma_addr_out;
 362        sg_dma_len(&sg[1]) = length;
 363
 364        in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, &sg[0],
 365                                1, DMA_MEM_TO_DEV,
 366                                DMA_PREP_INTERRUPT  |  DMA_CTRL_ACK);
 367        if (!in_desc)
 368                return -EINVAL;
 369
 370        out_desc = dmaengine_prep_slave_sg(dd->dma_lch_out.chan, &sg[1],
 371                                1, DMA_DEV_TO_MEM,
 372                                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 373        if (!out_desc)
 374                return -EINVAL;
 375
 376        out_desc->callback = atmel_aes_dma_callback;
 377        out_desc->callback_param = dd;
 378
 379        dmaengine_submit(out_desc);
 380        dma_async_issue_pending(dd->dma_lch_out.chan);
 381
 382        dmaengine_submit(in_desc);
 383        dma_async_issue_pending(dd->dma_lch_in.chan);
 384
 385        return 0;
 386}
 387
 388static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
 389{
 390        dd->flags &= ~AES_FLAGS_DMA;
 391
 392        /* use cache buffers */
 393        dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
 394        if (!dd->nb_in_sg)
 395                return -EINVAL;
 396
 397        dd->nb_out_sg = atmel_aes_sg_length(dd->req, dd->out_sg);
 398        if (!dd->nb_out_sg)
 399                return -EINVAL;
 400
 401        dd->bufcnt = sg_copy_to_buffer(dd->in_sg, dd->nb_in_sg,
 402                                        dd->buf_in, dd->total);
 403
 404        if (!dd->bufcnt)
 405                return -EINVAL;
 406
 407        dd->total -= dd->bufcnt;
 408
 409        atmel_aes_write(dd, AES_IER, AES_INT_DATARDY);
 410        atmel_aes_write_n(dd, AES_IDATAR(0), (u32 *) dd->buf_in,
 411                                dd->bufcnt >> 2);
 412
 413        return 0;
 414}
 415
 416static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
 417{
 418        int err, fast = 0, in, out;
 419        size_t count;
 420        dma_addr_t addr_in, addr_out;
 421
 422        if ((!dd->in_offset) && (!dd->out_offset)) {
 423                /* check for alignment */
 424                in = IS_ALIGNED((u32)dd->in_sg->offset, sizeof(u32)) &&
 425                        IS_ALIGNED(dd->in_sg->length, dd->ctx->block_size);
 426                out = IS_ALIGNED((u32)dd->out_sg->offset, sizeof(u32)) &&
 427                        IS_ALIGNED(dd->out_sg->length, dd->ctx->block_size);
 428                fast = in && out;
 429
 430                if (sg_dma_len(dd->in_sg) != sg_dma_len(dd->out_sg))
 431                        fast = 0;
 432        }
 433
 434
 435        if (fast)  {
 436                count = min(dd->total, sg_dma_len(dd->in_sg));
 437                count = min(count, sg_dma_len(dd->out_sg));
 438
 439                err = dma_map_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 440                if (!err) {
 441                        dev_err(dd->dev, "dma_map_sg() error\n");
 442                        return -EINVAL;
 443                }
 444
 445                err = dma_map_sg(dd->dev, dd->out_sg, 1,
 446                                DMA_FROM_DEVICE);
 447                if (!err) {
 448                        dev_err(dd->dev, "dma_map_sg() error\n");
 449                        dma_unmap_sg(dd->dev, dd->in_sg, 1,
 450                                DMA_TO_DEVICE);
 451                        return -EINVAL;
 452                }
 453
 454                addr_in = sg_dma_address(dd->in_sg);
 455                addr_out = sg_dma_address(dd->out_sg);
 456
 457                dd->flags |= AES_FLAGS_FAST;
 458
 459        } else {
 460                /* use cache buffers */
 461                count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
 462                                dd->buf_in, dd->buflen, dd->total, 0);
 463
 464                addr_in = dd->dma_addr_in;
 465                addr_out = dd->dma_addr_out;
 466
 467                dd->flags &= ~AES_FLAGS_FAST;
 468        }
 469
 470        dd->total -= count;
 471
 472        err = atmel_aes_crypt_dma(dd, addr_in, addr_out, count);
 473
 474        if (err && (dd->flags & AES_FLAGS_FAST)) {
 475                dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 476                dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
 477        }
 478
 479        return err;
 480}
 481
 482static int atmel_aes_write_ctrl(struct atmel_aes_dev *dd)
 483{
 484        int err;
 485        u32 valcr = 0, valmr = 0;
 486
 487        err = atmel_aes_hw_init(dd);
 488
 489        if (err)
 490                return err;
 491
 492        /* MR register must be set before IV registers */
 493        if (dd->ctx->keylen == AES_KEYSIZE_128)
 494                valmr |= AES_MR_KEYSIZE_128;
 495        else if (dd->ctx->keylen == AES_KEYSIZE_192)
 496                valmr |= AES_MR_KEYSIZE_192;
 497        else
 498                valmr |= AES_MR_KEYSIZE_256;
 499
 500        if (dd->flags & AES_FLAGS_CBC) {
 501                valmr |= AES_MR_OPMOD_CBC;
 502        } else if (dd->flags & AES_FLAGS_CFB) {
 503                valmr |= AES_MR_OPMOD_CFB;
 504                if (dd->flags & AES_FLAGS_CFB8)
 505                        valmr |= AES_MR_CFBS_8b;
 506                else if (dd->flags & AES_FLAGS_CFB16)
 507                        valmr |= AES_MR_CFBS_16b;
 508                else if (dd->flags & AES_FLAGS_CFB32)
 509                        valmr |= AES_MR_CFBS_32b;
 510                else if (dd->flags & AES_FLAGS_CFB64)
 511                        valmr |= AES_MR_CFBS_64b;
 512                else if (dd->flags & AES_FLAGS_CFB128)
 513                        valmr |= AES_MR_CFBS_128b;
 514        } else if (dd->flags & AES_FLAGS_OFB) {
 515                valmr |= AES_MR_OPMOD_OFB;
 516        } else if (dd->flags & AES_FLAGS_CTR) {
 517                valmr |= AES_MR_OPMOD_CTR;
 518        } else {
 519                valmr |= AES_MR_OPMOD_ECB;
 520        }
 521
 522        if (dd->flags & AES_FLAGS_ENCRYPT)
 523                valmr |= AES_MR_CYPHER_ENC;
 524
 525        if (dd->total > ATMEL_AES_DMA_THRESHOLD) {
 526                valmr |= AES_MR_SMOD_IDATAR0;
 527                if (dd->caps.has_dualbuff)
 528                        valmr |= AES_MR_DUALBUFF;
 529        } else {
 530                valmr |= AES_MR_SMOD_AUTO;
 531        }
 532
 533        atmel_aes_write(dd, AES_CR, valcr);
 534        atmel_aes_write(dd, AES_MR, valmr);
 535
 536        atmel_aes_write_n(dd, AES_KEYWR(0), dd->ctx->key,
 537                                                dd->ctx->keylen >> 2);
 538
 539        if (((dd->flags & AES_FLAGS_CBC) || (dd->flags & AES_FLAGS_CFB) ||
 540           (dd->flags & AES_FLAGS_OFB) || (dd->flags & AES_FLAGS_CTR)) &&
 541           dd->req->info) {
 542                atmel_aes_write_n(dd, AES_IVR(0), dd->req->info, 4);
 543        }
 544
 545        return 0;
 546}
 547
 548static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
 549                               struct ablkcipher_request *req)
 550{
 551        struct crypto_async_request *async_req, *backlog;
 552        struct atmel_aes_ctx *ctx;
 553        struct atmel_aes_reqctx *rctx;
 554        unsigned long flags;
 555        int err, ret = 0;
 556
 557        spin_lock_irqsave(&dd->lock, flags);
 558        if (req)
 559                ret = ablkcipher_enqueue_request(&dd->queue, req);
 560        if (dd->flags & AES_FLAGS_BUSY) {
 561                spin_unlock_irqrestore(&dd->lock, flags);
 562                return ret;
 563        }
 564        backlog = crypto_get_backlog(&dd->queue);
 565        async_req = crypto_dequeue_request(&dd->queue);
 566        if (async_req)
 567                dd->flags |= AES_FLAGS_BUSY;
 568        spin_unlock_irqrestore(&dd->lock, flags);
 569
 570        if (!async_req)
 571                return ret;
 572
 573        if (backlog)
 574                backlog->complete(backlog, -EINPROGRESS);
 575
 576        req = ablkcipher_request_cast(async_req);
 577
 578        /* assign new request to device */
 579        dd->req = req;
 580        dd->total = req->nbytes;
 581        dd->in_offset = 0;
 582        dd->in_sg = req->src;
 583        dd->out_offset = 0;
 584        dd->out_sg = req->dst;
 585
 586        rctx = ablkcipher_request_ctx(req);
 587        ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 588        rctx->mode &= AES_FLAGS_MODE_MASK;
 589        dd->flags = (dd->flags & ~AES_FLAGS_MODE_MASK) | rctx->mode;
 590        dd->ctx = ctx;
 591        ctx->dd = dd;
 592
 593        err = atmel_aes_write_ctrl(dd);
 594        if (!err) {
 595                if (dd->total > ATMEL_AES_DMA_THRESHOLD)
 596                        err = atmel_aes_crypt_dma_start(dd);
 597                else
 598                        err = atmel_aes_crypt_cpu_start(dd);
 599        }
 600        if (err) {
 601                /* aes_task will not finish it, so do it here */
 602                atmel_aes_finish_req(dd, err);
 603                tasklet_schedule(&dd->queue_task);
 604        }
 605
 606        return ret;
 607}
 608
 609static int atmel_aes_crypt_dma_stop(struct atmel_aes_dev *dd)
 610{
 611        int err = -EINVAL;
 612        size_t count;
 613
 614        if (dd->flags & AES_FLAGS_DMA) {
 615                err = 0;
 616                if  (dd->flags & AES_FLAGS_FAST) {
 617                        dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
 618                        dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
 619                } else {
 620                        dma_sync_single_for_device(dd->dev, dd->dma_addr_out,
 621                                dd->dma_size, DMA_FROM_DEVICE);
 622
 623                        /* copy data */
 624                        count = atmel_aes_sg_copy(&dd->out_sg, &dd->out_offset,
 625                                dd->buf_out, dd->buflen, dd->dma_size, 1);
 626                        if (count != dd->dma_size) {
 627                                err = -EINVAL;
 628                                pr_err("not all data converted: %u\n", count);
 629                        }
 630                }
 631        }
 632
 633        return err;
 634}
 635
 636
 637static int atmel_aes_buff_init(struct atmel_aes_dev *dd)
 638{
 639        int err = -ENOMEM;
 640
 641        dd->buf_in = (void *)__get_free_pages(GFP_KERNEL, 0);
 642        dd->buf_out = (void *)__get_free_pages(GFP_KERNEL, 0);
 643        dd->buflen = PAGE_SIZE;
 644        dd->buflen &= ~(AES_BLOCK_SIZE - 1);
 645
 646        if (!dd->buf_in || !dd->buf_out) {
 647                dev_err(dd->dev, "unable to alloc pages.\n");
 648                goto err_alloc;
 649        }
 650
 651        /* MAP here */
 652        dd->dma_addr_in = dma_map_single(dd->dev, dd->buf_in,
 653                                        dd->buflen, DMA_TO_DEVICE);
 654        if (dma_mapping_error(dd->dev, dd->dma_addr_in)) {
 655                dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
 656                err = -EINVAL;
 657                goto err_map_in;
 658        }
 659
 660        dd->dma_addr_out = dma_map_single(dd->dev, dd->buf_out,
 661                                        dd->buflen, DMA_FROM_DEVICE);
 662        if (dma_mapping_error(dd->dev, dd->dma_addr_out)) {
 663                dev_err(dd->dev, "dma %d bytes error\n", dd->buflen);
 664                err = -EINVAL;
 665                goto err_map_out;
 666        }
 667
 668        return 0;
 669
 670err_map_out:
 671        dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 672                DMA_TO_DEVICE);
 673err_map_in:
 674        free_page((unsigned long)dd->buf_out);
 675        free_page((unsigned long)dd->buf_in);
 676err_alloc:
 677        if (err)
 678                pr_err("error: %d\n", err);
 679        return err;
 680}
 681
 682static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd)
 683{
 684        dma_unmap_single(dd->dev, dd->dma_addr_out, dd->buflen,
 685                         DMA_FROM_DEVICE);
 686        dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
 687                DMA_TO_DEVICE);
 688        free_page((unsigned long)dd->buf_out);
 689        free_page((unsigned long)dd->buf_in);
 690}
 691
 692static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 693{
 694        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(
 695                        crypto_ablkcipher_reqtfm(req));
 696        struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 697        struct atmel_aes_dev *dd;
 698
 699        if (mode & AES_FLAGS_CFB8) {
 700                if (!IS_ALIGNED(req->nbytes, CFB8_BLOCK_SIZE)) {
 701                        pr_err("request size is not exact amount of CFB8 blocks\n");
 702                        return -EINVAL;
 703                }
 704                ctx->block_size = CFB8_BLOCK_SIZE;
 705        } else if (mode & AES_FLAGS_CFB16) {
 706                if (!IS_ALIGNED(req->nbytes, CFB16_BLOCK_SIZE)) {
 707                        pr_err("request size is not exact amount of CFB16 blocks\n");
 708                        return -EINVAL;
 709                }
 710                ctx->block_size = CFB16_BLOCK_SIZE;
 711        } else if (mode & AES_FLAGS_CFB32) {
 712                if (!IS_ALIGNED(req->nbytes, CFB32_BLOCK_SIZE)) {
 713                        pr_err("request size is not exact amount of CFB32 blocks\n");
 714                        return -EINVAL;
 715                }
 716                ctx->block_size = CFB32_BLOCK_SIZE;
 717        } else {
 718                if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 719                        pr_err("request size is not exact amount of AES blocks\n");
 720                        return -EINVAL;
 721                }
 722                ctx->block_size = AES_BLOCK_SIZE;
 723        }
 724
 725        dd = atmel_aes_find_dev(ctx);
 726        if (!dd)
 727                return -ENODEV;
 728
 729        rctx->mode = mode;
 730
 731        return atmel_aes_handle_queue(dd, req);
 732}
 733
 734static bool atmel_aes_filter(struct dma_chan *chan, void *slave)
 735{
 736        struct at_dma_slave     *sl = slave;
 737
 738        if (sl && sl->dma_dev == chan->device->dev) {
 739                chan->private = sl;
 740                return true;
 741        } else {
 742                return false;
 743        }
 744}
 745
 746static int atmel_aes_dma_init(struct atmel_aes_dev *dd,
 747        struct crypto_platform_data *pdata)
 748{
 749        int err = -ENOMEM;
 750        dma_cap_mask_t mask_in, mask_out;
 751
 752        if (pdata && pdata->dma_slave->txdata.dma_dev &&
 753                pdata->dma_slave->rxdata.dma_dev) {
 754
 755                /* Try to grab 2 DMA channels */
 756                dma_cap_zero(mask_in);
 757                dma_cap_set(DMA_SLAVE, mask_in);
 758
 759                dd->dma_lch_in.chan = dma_request_channel(mask_in,
 760                                atmel_aes_filter, &pdata->dma_slave->rxdata);
 761
 762                if (!dd->dma_lch_in.chan)
 763                        goto err_dma_in;
 764
 765                dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV;
 766                dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base +
 767                        AES_IDATAR(0);
 768                dd->dma_lch_in.dma_conf.src_maxburst = dd->caps.max_burst_size;
 769                dd->dma_lch_in.dma_conf.src_addr_width =
 770                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 771                dd->dma_lch_in.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 772                dd->dma_lch_in.dma_conf.dst_addr_width =
 773                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 774                dd->dma_lch_in.dma_conf.device_fc = false;
 775
 776                dma_cap_zero(mask_out);
 777                dma_cap_set(DMA_SLAVE, mask_out);
 778                dd->dma_lch_out.chan = dma_request_channel(mask_out,
 779                                atmel_aes_filter, &pdata->dma_slave->txdata);
 780
 781                if (!dd->dma_lch_out.chan)
 782                        goto err_dma_out;
 783
 784                dd->dma_lch_out.dma_conf.direction = DMA_DEV_TO_MEM;
 785                dd->dma_lch_out.dma_conf.src_addr = dd->phys_base +
 786                        AES_ODATAR(0);
 787                dd->dma_lch_out.dma_conf.src_maxburst = dd->caps.max_burst_size;
 788                dd->dma_lch_out.dma_conf.src_addr_width =
 789                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 790                dd->dma_lch_out.dma_conf.dst_maxburst = dd->caps.max_burst_size;
 791                dd->dma_lch_out.dma_conf.dst_addr_width =
 792                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 793                dd->dma_lch_out.dma_conf.device_fc = false;
 794
 795                return 0;
 796        } else {
 797                return -ENODEV;
 798        }
 799
 800err_dma_out:
 801        dma_release_channel(dd->dma_lch_in.chan);
 802err_dma_in:
 803        return err;
 804}
 805
 806static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd)
 807{
 808        dma_release_channel(dd->dma_lch_in.chan);
 809        dma_release_channel(dd->dma_lch_out.chan);
 810}
 811
 812static int atmel_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 813                           unsigned int keylen)
 814{
 815        struct atmel_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 816
 817        if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 818                   keylen != AES_KEYSIZE_256) {
 819                crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 820                return -EINVAL;
 821        }
 822
 823        memcpy(ctx->key, key, keylen);
 824        ctx->keylen = keylen;
 825
 826        return 0;
 827}
 828
 829static int atmel_aes_ecb_encrypt(struct ablkcipher_request *req)
 830{
 831        return atmel_aes_crypt(req,
 832                AES_FLAGS_ENCRYPT);
 833}
 834
 835static int atmel_aes_ecb_decrypt(struct ablkcipher_request *req)
 836{
 837        return atmel_aes_crypt(req,
 838                0);
 839}
 840
 841static int atmel_aes_cbc_encrypt(struct ablkcipher_request *req)
 842{
 843        return atmel_aes_crypt(req,
 844                AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
 845}
 846
 847static int atmel_aes_cbc_decrypt(struct ablkcipher_request *req)
 848{
 849        return atmel_aes_crypt(req,
 850                AES_FLAGS_CBC);
 851}
 852
 853static int atmel_aes_ofb_encrypt(struct ablkcipher_request *req)
 854{
 855        return atmel_aes_crypt(req,
 856                AES_FLAGS_ENCRYPT | AES_FLAGS_OFB);
 857}
 858
 859static int atmel_aes_ofb_decrypt(struct ablkcipher_request *req)
 860{
 861        return atmel_aes_crypt(req,
 862                AES_FLAGS_OFB);
 863}
 864
 865static int atmel_aes_cfb_encrypt(struct ablkcipher_request *req)
 866{
 867        return atmel_aes_crypt(req,
 868                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB128);
 869}
 870
 871static int atmel_aes_cfb_decrypt(struct ablkcipher_request *req)
 872{
 873        return atmel_aes_crypt(req,
 874                AES_FLAGS_CFB | AES_FLAGS_CFB128);
 875}
 876
 877static int atmel_aes_cfb64_encrypt(struct ablkcipher_request *req)
 878{
 879        return atmel_aes_crypt(req,
 880                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB64);
 881}
 882
 883static int atmel_aes_cfb64_decrypt(struct ablkcipher_request *req)
 884{
 885        return atmel_aes_crypt(req,
 886                AES_FLAGS_CFB | AES_FLAGS_CFB64);
 887}
 888
 889static int atmel_aes_cfb32_encrypt(struct ablkcipher_request *req)
 890{
 891        return atmel_aes_crypt(req,
 892                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB32);
 893}
 894
 895static int atmel_aes_cfb32_decrypt(struct ablkcipher_request *req)
 896{
 897        return atmel_aes_crypt(req,
 898                AES_FLAGS_CFB | AES_FLAGS_CFB32);
 899}
 900
 901static int atmel_aes_cfb16_encrypt(struct ablkcipher_request *req)
 902{
 903        return atmel_aes_crypt(req,
 904                AES_FLAGS_ENCRYPT | AES_FLAGS_CFB | AES_FLAGS_CFB16);
 905}
 906
 907static int atmel_aes_cfb16_decrypt(struct ablkcipher_request *req)
 908{
 909        return atmel_aes_crypt(req,
 910                AES_FLAGS_CFB | AES_FLAGS_CFB16);
 911}
 912
 913static int atmel_aes_cfb8_encrypt(struct ablkcipher_request *req)
 914{
 915        return atmel_aes_crypt(req,
 916                AES_FLAGS_ENCRYPT |     AES_FLAGS_CFB | AES_FLAGS_CFB8);
 917}
 918
 919static int atmel_aes_cfb8_decrypt(struct ablkcipher_request *req)
 920{
 921        return atmel_aes_crypt(req,
 922                AES_FLAGS_CFB | AES_FLAGS_CFB8);
 923}
 924
 925static int atmel_aes_ctr_encrypt(struct ablkcipher_request *req)
 926{
 927        return atmel_aes_crypt(req,
 928                AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
 929}
 930
 931static int atmel_aes_ctr_decrypt(struct ablkcipher_request *req)
 932{
 933        return atmel_aes_crypt(req,
 934                AES_FLAGS_CTR);
 935}
 936
 937static int atmel_aes_cra_init(struct crypto_tfm *tfm)
 938{
 939        tfm->crt_ablkcipher.reqsize = sizeof(struct atmel_aes_reqctx);
 940
 941        return 0;
 942}
 943
 944static void atmel_aes_cra_exit(struct crypto_tfm *tfm)
 945{
 946}
 947
 948static struct crypto_alg aes_algs[] = {
 949{
 950        .cra_name               = "ecb(aes)",
 951        .cra_driver_name        = "atmel-ecb-aes",
 952        .cra_priority           = 100,
 953        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 954        .cra_blocksize          = AES_BLOCK_SIZE,
 955        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 956        .cra_alignmask          = 0xf,
 957        .cra_type               = &crypto_ablkcipher_type,
 958        .cra_module             = THIS_MODULE,
 959        .cra_init               = atmel_aes_cra_init,
 960        .cra_exit               = atmel_aes_cra_exit,
 961        .cra_u.ablkcipher = {
 962                .min_keysize    = AES_MIN_KEY_SIZE,
 963                .max_keysize    = AES_MAX_KEY_SIZE,
 964                .setkey         = atmel_aes_setkey,
 965                .encrypt        = atmel_aes_ecb_encrypt,
 966                .decrypt        = atmel_aes_ecb_decrypt,
 967        }
 968},
 969{
 970        .cra_name               = "cbc(aes)",
 971        .cra_driver_name        = "atmel-cbc-aes",
 972        .cra_priority           = 100,
 973        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 974        .cra_blocksize          = AES_BLOCK_SIZE,
 975        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 976        .cra_alignmask          = 0xf,
 977        .cra_type               = &crypto_ablkcipher_type,
 978        .cra_module             = THIS_MODULE,
 979        .cra_init               = atmel_aes_cra_init,
 980        .cra_exit               = atmel_aes_cra_exit,
 981        .cra_u.ablkcipher = {
 982                .min_keysize    = AES_MIN_KEY_SIZE,
 983                .max_keysize    = AES_MAX_KEY_SIZE,
 984                .ivsize         = AES_BLOCK_SIZE,
 985                .setkey         = atmel_aes_setkey,
 986                .encrypt        = atmel_aes_cbc_encrypt,
 987                .decrypt        = atmel_aes_cbc_decrypt,
 988        }
 989},
 990{
 991        .cra_name               = "ofb(aes)",
 992        .cra_driver_name        = "atmel-ofb-aes",
 993        .cra_priority           = 100,
 994        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 995        .cra_blocksize          = AES_BLOCK_SIZE,
 996        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
 997        .cra_alignmask          = 0xf,
 998        .cra_type               = &crypto_ablkcipher_type,
 999        .cra_module             = THIS_MODULE,
1000        .cra_init               = atmel_aes_cra_init,
1001        .cra_exit               = atmel_aes_cra_exit,
1002        .cra_u.ablkcipher = {
1003                .min_keysize    = AES_MIN_KEY_SIZE,
1004                .max_keysize    = AES_MAX_KEY_SIZE,
1005                .ivsize         = AES_BLOCK_SIZE,
1006                .setkey         = atmel_aes_setkey,
1007                .encrypt        = atmel_aes_ofb_encrypt,
1008                .decrypt        = atmel_aes_ofb_decrypt,
1009        }
1010},
1011{
1012        .cra_name               = "cfb(aes)",
1013        .cra_driver_name        = "atmel-cfb-aes",
1014        .cra_priority           = 100,
1015        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1016        .cra_blocksize          = AES_BLOCK_SIZE,
1017        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1018        .cra_alignmask          = 0xf,
1019        .cra_type               = &crypto_ablkcipher_type,
1020        .cra_module             = THIS_MODULE,
1021        .cra_init               = atmel_aes_cra_init,
1022        .cra_exit               = atmel_aes_cra_exit,
1023        .cra_u.ablkcipher = {
1024                .min_keysize    = AES_MIN_KEY_SIZE,
1025                .max_keysize    = AES_MAX_KEY_SIZE,
1026                .ivsize         = AES_BLOCK_SIZE,
1027                .setkey         = atmel_aes_setkey,
1028                .encrypt        = atmel_aes_cfb_encrypt,
1029                .decrypt        = atmel_aes_cfb_decrypt,
1030        }
1031},
1032{
1033        .cra_name               = "cfb32(aes)",
1034        .cra_driver_name        = "atmel-cfb32-aes",
1035        .cra_priority           = 100,
1036        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1037        .cra_blocksize          = CFB32_BLOCK_SIZE,
1038        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1039        .cra_alignmask          = 0x3,
1040        .cra_type               = &crypto_ablkcipher_type,
1041        .cra_module             = THIS_MODULE,
1042        .cra_init               = atmel_aes_cra_init,
1043        .cra_exit               = atmel_aes_cra_exit,
1044        .cra_u.ablkcipher = {
1045                .min_keysize    = AES_MIN_KEY_SIZE,
1046                .max_keysize    = AES_MAX_KEY_SIZE,
1047                .ivsize         = AES_BLOCK_SIZE,
1048                .setkey         = atmel_aes_setkey,
1049                .encrypt        = atmel_aes_cfb32_encrypt,
1050                .decrypt        = atmel_aes_cfb32_decrypt,
1051        }
1052},
1053{
1054        .cra_name               = "cfb16(aes)",
1055        .cra_driver_name        = "atmel-cfb16-aes",
1056        .cra_priority           = 100,
1057        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1058        .cra_blocksize          = CFB16_BLOCK_SIZE,
1059        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1060        .cra_alignmask          = 0x1,
1061        .cra_type               = &crypto_ablkcipher_type,
1062        .cra_module             = THIS_MODULE,
1063        .cra_init               = atmel_aes_cra_init,
1064        .cra_exit               = atmel_aes_cra_exit,
1065        .cra_u.ablkcipher = {
1066                .min_keysize    = AES_MIN_KEY_SIZE,
1067                .max_keysize    = AES_MAX_KEY_SIZE,
1068                .ivsize         = AES_BLOCK_SIZE,
1069                .setkey         = atmel_aes_setkey,
1070                .encrypt        = atmel_aes_cfb16_encrypt,
1071                .decrypt        = atmel_aes_cfb16_decrypt,
1072        }
1073},
1074{
1075        .cra_name               = "cfb8(aes)",
1076        .cra_driver_name        = "atmel-cfb8-aes",
1077        .cra_priority           = 100,
1078        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1079        .cra_blocksize          = CFB64_BLOCK_SIZE,
1080        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1081        .cra_alignmask          = 0x0,
1082        .cra_type               = &crypto_ablkcipher_type,
1083        .cra_module             = THIS_MODULE,
1084        .cra_init               = atmel_aes_cra_init,
1085        .cra_exit               = atmel_aes_cra_exit,
1086        .cra_u.ablkcipher = {
1087                .min_keysize    = AES_MIN_KEY_SIZE,
1088                .max_keysize    = AES_MAX_KEY_SIZE,
1089                .ivsize         = AES_BLOCK_SIZE,
1090                .setkey         = atmel_aes_setkey,
1091                .encrypt        = atmel_aes_cfb8_encrypt,
1092                .decrypt        = atmel_aes_cfb8_decrypt,
1093        }
1094},
1095{
1096        .cra_name               = "ctr(aes)",
1097        .cra_driver_name        = "atmel-ctr-aes",
1098        .cra_priority           = 100,
1099        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1100        .cra_blocksize          = AES_BLOCK_SIZE,
1101        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1102        .cra_alignmask          = 0xf,
1103        .cra_type               = &crypto_ablkcipher_type,
1104        .cra_module             = THIS_MODULE,
1105        .cra_init               = atmel_aes_cra_init,
1106        .cra_exit               = atmel_aes_cra_exit,
1107        .cra_u.ablkcipher = {
1108                .min_keysize    = AES_MIN_KEY_SIZE,
1109                .max_keysize    = AES_MAX_KEY_SIZE,
1110                .ivsize         = AES_BLOCK_SIZE,
1111                .setkey         = atmel_aes_setkey,
1112                .encrypt        = atmel_aes_ctr_encrypt,
1113                .decrypt        = atmel_aes_ctr_decrypt,
1114        }
1115},
1116};
1117
1118static struct crypto_alg aes_cfb64_alg = {
1119        .cra_name               = "cfb64(aes)",
1120        .cra_driver_name        = "atmel-cfb64-aes",
1121        .cra_priority           = 100,
1122        .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1123        .cra_blocksize          = CFB64_BLOCK_SIZE,
1124        .cra_ctxsize            = sizeof(struct atmel_aes_ctx),
1125        .cra_alignmask          = 0x7,
1126        .cra_type               = &crypto_ablkcipher_type,
1127        .cra_module             = THIS_MODULE,
1128        .cra_init               = atmel_aes_cra_init,
1129        .cra_exit               = atmel_aes_cra_exit,
1130        .cra_u.ablkcipher = {
1131                .min_keysize    = AES_MIN_KEY_SIZE,
1132                .max_keysize    = AES_MAX_KEY_SIZE,
1133                .ivsize         = AES_BLOCK_SIZE,
1134                .setkey         = atmel_aes_setkey,
1135                .encrypt        = atmel_aes_cfb64_encrypt,
1136                .decrypt        = atmel_aes_cfb64_decrypt,
1137        }
1138};
1139
1140static void atmel_aes_queue_task(unsigned long data)
1141{
1142        struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data;
1143
1144        atmel_aes_handle_queue(dd, NULL);
1145}
1146
1147static void atmel_aes_done_task(unsigned long data)
1148{
1149        struct atmel_aes_dev *dd = (struct atmel_aes_dev *) data;
1150        int err;
1151
1152        if (!(dd->flags & AES_FLAGS_DMA)) {
1153                atmel_aes_read_n(dd, AES_ODATAR(0), (u32 *) dd->buf_out,
1154                                dd->bufcnt >> 2);
1155
1156                if (sg_copy_from_buffer(dd->out_sg, dd->nb_out_sg,
1157                        dd->buf_out, dd->bufcnt))
1158                        err = 0;
1159                else
1160                        err = -EINVAL;
1161
1162                goto cpu_end;
1163        }
1164
1165        err = atmel_aes_crypt_dma_stop(dd);
1166
1167        err = dd->err ? : err;
1168
1169        if (dd->total && !err) {
1170                if (dd->flags & AES_FLAGS_FAST) {
1171                        dd->in_sg = sg_next(dd->in_sg);
1172                        dd->out_sg = sg_next(dd->out_sg);
1173                        if (!dd->in_sg || !dd->out_sg)
1174                                err = -EINVAL;
1175                }
1176                if (!err)
1177                        err = atmel_aes_crypt_dma_start(dd);
1178                if (!err)
1179                        return; /* DMA started. Not fininishing. */
1180        }
1181
1182cpu_end:
1183        atmel_aes_finish_req(dd, err);
1184        atmel_aes_handle_queue(dd, NULL);
1185}
1186
1187static irqreturn_t atmel_aes_irq(int irq, void *dev_id)
1188{
1189        struct atmel_aes_dev *aes_dd = dev_id;
1190        u32 reg;
1191
1192        reg = atmel_aes_read(aes_dd, AES_ISR);
1193        if (reg & atmel_aes_read(aes_dd, AES_IMR)) {
1194                atmel_aes_write(aes_dd, AES_IDR, reg);
1195                if (AES_FLAGS_BUSY & aes_dd->flags)
1196                        tasklet_schedule(&aes_dd->done_task);
1197                else
1198                        dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n");
1199                return IRQ_HANDLED;
1200        }
1201
1202        return IRQ_NONE;
1203}
1204
1205static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
1206{
1207        int i;
1208
1209        for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1210                crypto_unregister_alg(&aes_algs[i]);
1211        if (dd->caps.has_cfb64)
1212                crypto_unregister_alg(&aes_cfb64_alg);
1213}
1214
1215static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
1216{
1217        int err, i, j;
1218
1219        for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1220                err = crypto_register_alg(&aes_algs[i]);
1221                if (err)
1222                        goto err_aes_algs;
1223        }
1224
1225        if (dd->caps.has_cfb64) {
1226                err = crypto_register_alg(&aes_cfb64_alg);
1227                if (err)
1228                        goto err_aes_cfb64_alg;
1229        }
1230
1231        return 0;
1232
1233err_aes_cfb64_alg:
1234        i = ARRAY_SIZE(aes_algs);
1235err_aes_algs:
1236        for (j = 0; j < i; j++)
1237                crypto_unregister_alg(&aes_algs[j]);
1238
1239        return err;
1240}
1241
1242static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
1243{
1244        dd->caps.has_dualbuff = 0;
1245        dd->caps.has_cfb64 = 0;
1246        dd->caps.max_burst_size = 1;
1247
1248        /* keep only major version number */
1249        switch (dd->hw_version & 0xff0) {
1250        case 0x130:
1251                dd->caps.has_dualbuff = 1;
1252                dd->caps.has_cfb64 = 1;
1253                dd->caps.max_burst_size = 4;
1254                break;
1255        case 0x120:
1256                break;
1257        default:
1258                dev_warn(dd->dev,
1259                                "Unmanaged aes version, set minimum capabilities\n");
1260                break;
1261        }
1262}
1263
1264static int atmel_aes_probe(struct platform_device *pdev)
1265{
1266        struct atmel_aes_dev *aes_dd;
1267        struct crypto_platform_data *pdata;
1268        struct device *dev = &pdev->dev;
1269        struct resource *aes_res;
1270        unsigned long aes_phys_size;
1271        int err;
1272
1273        pdata = pdev->dev.platform_data;
1274        if (!pdata) {
1275                err = -ENXIO;
1276                goto aes_dd_err;
1277        }
1278
1279        aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
1280        if (aes_dd == NULL) {
1281                dev_err(dev, "unable to alloc data struct.\n");
1282                err = -ENOMEM;
1283                goto aes_dd_err;
1284        }
1285
1286        aes_dd->dev = dev;
1287
1288        platform_set_drvdata(pdev, aes_dd);
1289
1290        INIT_LIST_HEAD(&aes_dd->list);
1291
1292        tasklet_init(&aes_dd->done_task, atmel_aes_done_task,
1293                                        (unsigned long)aes_dd);
1294        tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task,
1295                                        (unsigned long)aes_dd);
1296
1297        crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH);
1298
1299        aes_dd->irq = -1;
1300
1301        /* Get the base address */
1302        aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1303        if (!aes_res) {
1304                dev_err(dev, "no MEM resource info\n");
1305                err = -ENODEV;
1306                goto res_err;
1307        }
1308        aes_dd->phys_base = aes_res->start;
1309        aes_phys_size = resource_size(aes_res);
1310
1311        /* Get the IRQ */
1312        aes_dd->irq = platform_get_irq(pdev,  0);
1313        if (aes_dd->irq < 0) {
1314                dev_err(dev, "no IRQ resource info\n");
1315                err = aes_dd->irq;
1316                goto aes_irq_err;
1317        }
1318
1319        err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
1320                                                aes_dd);
1321        if (err) {
1322                dev_err(dev, "unable to request aes irq.\n");
1323                goto aes_irq_err;
1324        }
1325
1326        /* Initializing the clock */
1327        aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
1328        if (IS_ERR(aes_dd->iclk)) {
1329                dev_err(dev, "clock intialization failed.\n");
1330                err = PTR_ERR(aes_dd->iclk);
1331                goto clk_err;
1332        }
1333
1334        aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
1335        if (!aes_dd->io_base) {
1336                dev_err(dev, "can't ioremap\n");
1337                err = -ENOMEM;
1338                goto aes_io_err;
1339        }
1340
1341        atmel_aes_hw_version_init(aes_dd);
1342
1343        atmel_aes_get_cap(aes_dd);
1344
1345        err = atmel_aes_buff_init(aes_dd);
1346        if (err)
1347                goto err_aes_buff;
1348
1349        err = atmel_aes_dma_init(aes_dd, pdata);
1350        if (err)
1351                goto err_aes_dma;
1352
1353        spin_lock(&atmel_aes.lock);
1354        list_add_tail(&aes_dd->list, &atmel_aes.dev_list);
1355        spin_unlock(&atmel_aes.lock);
1356
1357        err = atmel_aes_register_algs(aes_dd);
1358        if (err)
1359                goto err_algs;
1360
1361        dev_info(dev, "Atmel AES\n");
1362
1363        return 0;
1364
1365err_algs:
1366        spin_lock(&atmel_aes.lock);
1367        list_del(&aes_dd->list);
1368        spin_unlock(&atmel_aes.lock);
1369        atmel_aes_dma_cleanup(aes_dd);
1370err_aes_dma:
1371        atmel_aes_buff_cleanup(aes_dd);
1372err_aes_buff:
1373        iounmap(aes_dd->io_base);
1374aes_io_err:
1375        clk_put(aes_dd->iclk);
1376clk_err:
1377        free_irq(aes_dd->irq, aes_dd);
1378aes_irq_err:
1379res_err:
1380        tasklet_kill(&aes_dd->done_task);
1381        tasklet_kill(&aes_dd->queue_task);
1382        kfree(aes_dd);
1383        aes_dd = NULL;
1384aes_dd_err:
1385        dev_err(dev, "initialization failed.\n");
1386
1387        return err;
1388}
1389
1390static int atmel_aes_remove(struct platform_device *pdev)
1391{
1392        static struct atmel_aes_dev *aes_dd;
1393
1394        aes_dd = platform_get_drvdata(pdev);
1395        if (!aes_dd)
1396                return -ENODEV;
1397        spin_lock(&atmel_aes.lock);
1398        list_del(&aes_dd->list);
1399        spin_unlock(&atmel_aes.lock);
1400
1401        atmel_aes_unregister_algs(aes_dd);
1402
1403        tasklet_kill(&aes_dd->done_task);
1404        tasklet_kill(&aes_dd->queue_task);
1405
1406        atmel_aes_dma_cleanup(aes_dd);
1407
1408        iounmap(aes_dd->io_base);
1409
1410        clk_put(aes_dd->iclk);
1411
1412        if (aes_dd->irq > 0)
1413                free_irq(aes_dd->irq, aes_dd);
1414
1415        kfree(aes_dd);
1416        aes_dd = NULL;
1417
1418        return 0;
1419}
1420
1421static struct platform_driver atmel_aes_driver = {
1422        .probe          = atmel_aes_probe,
1423        .remove         = atmel_aes_remove,
1424        .driver         = {
1425                .name   = "atmel_aes",
1426                .owner  = THIS_MODULE,
1427        },
1428};
1429
1430module_platform_driver(atmel_aes_driver);
1431
1432MODULE_DESCRIPTION("Atmel AES hw acceleration support.");
1433MODULE_LICENSE("GPL v2");
1434MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");
1435
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.