linux/drivers/crypto/bfin_crc.c
<<
>>
Prefs
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support Blackfin CRC HW acceleration.
   5 *
   6 * Copyright 2012 Analog Devices Inc.
   7 *
   8 * Licensed under the GPL-2.
   9 */
  10
  11#include <linux/err.h>
  12#include <linux/device.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/errno.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <linux/irq.h>
  19#include <linux/io.h>
  20#include <linux/platform_device.h>
  21#include <linux/scatterlist.h>
  22#include <linux/dma-mapping.h>
  23#include <linux/delay.h>
  24#include <linux/crypto.h>
  25#include <linux/cryptohash.h>
  26#include <crypto/scatterwalk.h>
  27#include <crypto/algapi.h>
  28#include <crypto/hash.h>
  29#include <crypto/internal/hash.h>
  30#include <asm/unaligned.h>
  31
  32#include <asm/dma.h>
  33#include <asm/portmux.h>
  34#include <asm/io.h>
  35
  36#include "bfin_crc.h"
  37
  38#define CRC_CCRYPTO_QUEUE_LENGTH        5
  39
  40#define DRIVER_NAME "bfin-hmac-crc"
  41#define CHKSUM_DIGEST_SIZE      4
  42#define CHKSUM_BLOCK_SIZE       1
  43
  44#define CRC_MAX_DMA_DESC        100
  45
  46#define CRC_CRYPTO_STATE_UPDATE         1
  47#define CRC_CRYPTO_STATE_FINALUPDATE    2
  48#define CRC_CRYPTO_STATE_FINISH         3
  49
  50struct bfin_crypto_crc {
  51        struct list_head        list;
  52        struct device           *dev;
  53        spinlock_t              lock;
  54
  55        int                     irq;
  56        int                     dma_ch;
  57        u32                     poly;
  58        struct crc_register     *regs;
  59
  60        struct ahash_request    *req; /* current request in operation */
  61        struct dma_desc_array   *sg_cpu; /* virt addr of sg dma descriptors */
  62        dma_addr_t              sg_dma; /* phy addr of sg dma descriptors */
  63        u8                      *sg_mid_buf;
  64        dma_addr_t              sg_mid_dma; /* phy addr of sg mid buffer */
  65
  66        struct tasklet_struct   done_task;
  67        struct crypto_queue     queue; /* waiting requests */
  68
  69        u8                      busy:1; /* crc device in operation flag */
  70};
  71
  72static struct bfin_crypto_crc_list {
  73        struct list_head        dev_list;
  74        spinlock_t              lock;
  75} crc_list;
  76
  77struct bfin_crypto_crc_reqctx {
  78        struct bfin_crypto_crc  *crc;
  79
  80        unsigned int            total;  /* total request bytes */
  81        size_t                  sg_buflen; /* bytes for this update */
  82        unsigned int            sg_nents;
  83        struct scatterlist      *sg; /* sg list head for this update*/
  84        struct scatterlist      bufsl[2]; /* chained sg list */
  85
  86        size_t                  bufnext_len;
  87        size_t                  buflast_len;
  88        u8                      bufnext[CHKSUM_DIGEST_SIZE]; /* extra bytes for next udpate */
  89        u8                      buflast[CHKSUM_DIGEST_SIZE]; /* extra bytes from last udpate */
  90
  91        u8                      flag;
  92};
  93
  94struct bfin_crypto_crc_ctx {
  95        struct bfin_crypto_crc  *crc;
  96        u32                     key;
  97};
  98
  99
 100/*
 101 * derive number of elements in scatterlist
 102 */
 103static int sg_count(struct scatterlist *sg_list)
 104{
 105        struct scatterlist *sg = sg_list;
 106        int sg_nents = 1;
 107
 108        if (sg_list == NULL)
 109                return 0;
 110
 111        while (!sg_is_last(sg)) {
 112                sg_nents++;
 113                sg = scatterwalk_sg_next(sg);
 114        }
 115
 116        return sg_nents;
 117}
 118
 119/*
 120 * get element in scatter list by given index
 121 */
 122static struct scatterlist *sg_get(struct scatterlist *sg_list, unsigned int nents,
 123                                unsigned int index)
 124{
 125        struct scatterlist *sg = NULL;
 126        int i;
 127
 128        for_each_sg(sg_list, sg, nents, i)
 129                if (i == index)
 130                        break;
 131
 132        return sg;
 133}
 134
 135static int bfin_crypto_crc_init_hw(struct bfin_crypto_crc *crc, u32 key)
 136{
 137        writel(0, &crc->regs->datacntrld);
 138        writel(MODE_CALC_CRC << OPMODE_OFFSET, &crc->regs->control);
 139        writel(key, &crc->regs->curresult);
 140
 141        /* setup CRC interrupts */
 142        writel(CMPERRI | DCNTEXPI, &crc->regs->status);
 143        writel(CMPERRI | DCNTEXPI, &crc->regs->intrenset);
 144
 145        return 0;
 146}
 147
 148static int bfin_crypto_crc_init(struct ahash_request *req)
 149{
 150        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 151        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 152        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 153        struct bfin_crypto_crc *crc;
 154
 155        dev_dbg(ctx->crc->dev, "crc_init\n");
 156        spin_lock_bh(&crc_list.lock);
 157        list_for_each_entry(crc, &crc_list.dev_list, list) {
 158                crc_ctx->crc = crc;
 159                break;
 160        }
 161        spin_unlock_bh(&crc_list.lock);
 162
 163        if (sg_count(req->src) > CRC_MAX_DMA_DESC) {
 164                dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n",
 165                        CRC_MAX_DMA_DESC);
 166                return -EINVAL;
 167        }
 168
 169        ctx->crc = crc;
 170        ctx->bufnext_len = 0;
 171        ctx->buflast_len = 0;
 172        ctx->sg_buflen = 0;
 173        ctx->total = 0;
 174        ctx->flag = 0;
 175
 176        /* init crc results */
 177        put_unaligned_le32(crc_ctx->key, req->result);
 178
 179        dev_dbg(ctx->crc->dev, "init: digest size: %d\n",
 180                crypto_ahash_digestsize(tfm));
 181
 182        return bfin_crypto_crc_init_hw(crc, crc_ctx->key);
 183}
 184
 185static void bfin_crypto_crc_config_dma(struct bfin_crypto_crc *crc)
 186{
 187        struct scatterlist *sg;
 188        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req);
 189        int i = 0, j = 0;
 190        unsigned long dma_config;
 191        unsigned int dma_count;
 192        unsigned int dma_addr;
 193        unsigned int mid_dma_count = 0;
 194        int dma_mod;
 195
 196        dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE);
 197
 198        for_each_sg(ctx->sg, sg, ctx->sg_nents, j) {
 199                dma_addr = sg_dma_address(sg);
 200                /* deduce extra bytes in last sg */
 201                if (sg_is_last(sg))
 202                        dma_count = sg_dma_len(sg) - ctx->bufnext_len;
 203                else
 204                        dma_count = sg_dma_len(sg);
 205
 206                if (mid_dma_count) {
 207                        /* Append last middle dma buffer to 4 bytes with first
 208                           bytes in current sg buffer. Move addr of current
 209                           sg and deduce the length of current sg.
 210                         */
 211                        memcpy(crc->sg_mid_buf +(i << 2) + mid_dma_count,
 212                                sg_virt(sg),
 213                                CHKSUM_DIGEST_SIZE - mid_dma_count);
 214                        dma_addr += CHKSUM_DIGEST_SIZE - mid_dma_count;
 215                        dma_count -= CHKSUM_DIGEST_SIZE - mid_dma_count;
 216
 217                        dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 |
 218                                DMAEN | PSIZE_32 | WDSIZE_32;
 219
 220                        /* setup new dma descriptor for next middle dma */
 221                        crc->sg_cpu[i].start_addr = crc->sg_mid_dma + (i << 2);
 222                        crc->sg_cpu[i].cfg = dma_config;
 223                        crc->sg_cpu[i].x_count = 1;
 224                        crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
 225                        dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 226                                "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 227                                i, crc->sg_cpu[i].start_addr,
 228                                crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 229                                crc->sg_cpu[i].x_modify);
 230                        i++;
 231                }
 232
 233                dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32;
 234                /* chop current sg dma len to multiple of 32 bits */
 235                mid_dma_count = dma_count % 4;
 236                dma_count &= ~0x3;
 237
 238                if (dma_addr % 4 == 0) {
 239                        dma_config |= WDSIZE_32;
 240                        dma_count >>= 2;
 241                        dma_mod = 4;
 242                } else if (dma_addr % 2 == 0) {
 243                        dma_config |= WDSIZE_16;
 244                        dma_count >>= 1;
 245                        dma_mod = 2;
 246                } else {
 247                        dma_config |= WDSIZE_8;
 248                        dma_mod = 1;
 249                }
 250
 251                crc->sg_cpu[i].start_addr = dma_addr;
 252                crc->sg_cpu[i].cfg = dma_config;
 253                crc->sg_cpu[i].x_count = dma_count;
 254                crc->sg_cpu[i].x_modify = dma_mod;
 255                dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 256                        "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 257                        i, crc->sg_cpu[i].start_addr,
 258                        crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 259                        crc->sg_cpu[i].x_modify);
 260                i++;
 261
 262                if (mid_dma_count) {
 263                        /* copy extra bytes to next middle dma buffer */
 264                        memcpy(crc->sg_mid_buf + (i << 2),
 265                                (u8*)sg_virt(sg) + (dma_count << 2),
 266                                mid_dma_count);
 267                }
 268        }
 269
 270        dma_config = DMAFLOW_ARRAY | RESTART | NDSIZE_3 | DMAEN | PSIZE_32 | WDSIZE_32;
 271        /* For final update req, append the buffer for next update as well*/
 272        if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
 273                ctx->flag == CRC_CRYPTO_STATE_FINISH)) {
 274                crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext,
 275                                                CHKSUM_DIGEST_SIZE, DMA_TO_DEVICE);
 276                crc->sg_cpu[i].cfg = dma_config;
 277                crc->sg_cpu[i].x_count = 1;
 278                crc->sg_cpu[i].x_modify = CHKSUM_DIGEST_SIZE;
 279                dev_dbg(crc->dev, "%d: crc_dma: start_addr:0x%lx, "
 280                        "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",
 281                        i, crc->sg_cpu[i].start_addr,
 282                        crc->sg_cpu[i].cfg, crc->sg_cpu[i].x_count,
 283                        crc->sg_cpu[i].x_modify);
 284                i++;
 285        }
 286
 287        if (i == 0)
 288                return;
 289
 290        /* Set the last descriptor to stop mode */
 291        crc->sg_cpu[i - 1].cfg &= ~(DMAFLOW | NDSIZE);
 292        crc->sg_cpu[i - 1].cfg |= DI_EN;
 293        set_dma_curr_desc_addr(crc->dma_ch, (unsigned long *)crc->sg_dma);
 294        set_dma_x_count(crc->dma_ch, 0);
 295        set_dma_x_modify(crc->dma_ch, 0);
 296        set_dma_config(crc->dma_ch, dma_config);
 297}
 298
 299static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
 300                                  struct ahash_request *req)
 301{
 302        struct crypto_async_request *async_req, *backlog;
 303        struct bfin_crypto_crc_reqctx *ctx;
 304        struct scatterlist *sg;
 305        int ret = 0;
 306        int nsg, i, j;
 307        unsigned int nextlen;
 308        unsigned long flags;
 309        u32 reg;
 310
 311        spin_lock_irqsave(&crc->lock, flags);
 312        if (req)
 313                ret = ahash_enqueue_request(&crc->queue, req);
 314        if (crc->busy) {
 315                spin_unlock_irqrestore(&crc->lock, flags);
 316                return ret;
 317        }
 318        backlog = crypto_get_backlog(&crc->queue);
 319        async_req = crypto_dequeue_request(&crc->queue);
 320        if (async_req)
 321                crc->busy = 1;
 322        spin_unlock_irqrestore(&crc->lock, flags);
 323
 324        if (!async_req)
 325                return ret;
 326
 327        if (backlog)
 328                backlog->complete(backlog, -EINPROGRESS);
 329
 330        req = ahash_request_cast(async_req);
 331        crc->req = req;
 332        ctx = ahash_request_ctx(req);
 333        ctx->sg = NULL;
 334        ctx->sg_buflen = 0;
 335        ctx->sg_nents = 0;
 336
 337        dev_dbg(crc->dev, "handling new req, flag=%u, nbytes: %d\n",
 338                                                ctx->flag, req->nbytes);
 339
 340        if (ctx->flag == CRC_CRYPTO_STATE_FINISH) {
 341                if (ctx->bufnext_len == 0) {
 342                        crc->busy = 0;
 343                        return 0;
 344                }
 345
 346                /* Pack last crc update buffer to 32bit */
 347                memset(ctx->bufnext + ctx->bufnext_len, 0,
 348                                CHKSUM_DIGEST_SIZE - ctx->bufnext_len);
 349        } else {
 350                /* Pack small data which is less than 32bit to buffer for next update. */
 351                if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) {
 352                        memcpy(ctx->bufnext + ctx->bufnext_len,
 353                                sg_virt(req->src), req->nbytes);
 354                        ctx->bufnext_len += req->nbytes;
 355                        if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE &&
 356                                ctx->bufnext_len) {
 357                                goto finish_update;
 358                        } else {
 359                                crc->busy = 0;
 360                                return 0;
 361                        }
 362                }
 363
 364                if (ctx->bufnext_len) {
 365                        /* Chain in extra bytes of last update */
 366                        ctx->buflast_len = ctx->bufnext_len;
 367                        memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len);
 368
 369                        nsg = ctx->sg_buflen ? 2 : 1;
 370                        sg_init_table(ctx->bufsl, nsg);
 371                        sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
 372                        if (nsg > 1)
 373                                scatterwalk_sg_chain(ctx->bufsl, nsg,
 374                                                req->src);
 375                        ctx->sg = ctx->bufsl;
 376                } else
 377                        ctx->sg = req->src;
 378
 379                /* Chop crc buffer size to multiple of 32 bit */
 380                nsg = ctx->sg_nents = sg_count(ctx->sg);
 381                ctx->sg_buflen = ctx->buflast_len + req->nbytes;
 382                ctx->bufnext_len = ctx->sg_buflen % 4;
 383                ctx->sg_buflen &= ~0x3;
 384
 385                if (ctx->bufnext_len) {
 386                        /* copy extra bytes to buffer for next update */
 387                        memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE);
 388                        nextlen = ctx->bufnext_len;
 389                        for (i = nsg - 1; i >= 0; i--) {
 390                                sg = sg_get(ctx->sg, nsg, i);
 391                                j = min(nextlen, sg_dma_len(sg));
 392                                memcpy(ctx->bufnext + nextlen - j,
 393                                        sg_virt(sg) + sg_dma_len(sg) - j, j);
 394                                if (j == sg_dma_len(sg))
 395                                        ctx->sg_nents--;
 396                                nextlen -= j;
 397                                if (nextlen == 0)
 398                                        break;
 399                        }
 400                }
 401        }
 402
 403finish_update:
 404        if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE ||
 405                ctx->flag == CRC_CRYPTO_STATE_FINISH))
 406                ctx->sg_buflen += CHKSUM_DIGEST_SIZE;
 407
 408        /* set CRC data count before start DMA */
 409        writel(ctx->sg_buflen >> 2, &crc->regs->datacnt);
 410
 411        /* setup and enable CRC DMA */
 412        bfin_crypto_crc_config_dma(crc);
 413
 414        /* finally kick off CRC operation */
 415        reg = readl(&crc->regs->control);
 416        writel(reg | BLKEN, &crc->regs->control);
 417
 418        return -EINPROGRESS;
 419}
 420
 421static int bfin_crypto_crc_update(struct ahash_request *req)
 422{
 423        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 424
 425        if (!req->nbytes)
 426                return 0;
 427
 428        dev_dbg(ctx->crc->dev, "crc_update\n");
 429        ctx->total += req->nbytes;
 430        ctx->flag = CRC_CRYPTO_STATE_UPDATE;
 431
 432        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 433}
 434
 435static int bfin_crypto_crc_final(struct ahash_request *req)
 436{
 437        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 438        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 439        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 440
 441        dev_dbg(ctx->crc->dev, "crc_final\n");
 442        ctx->flag = CRC_CRYPTO_STATE_FINISH;
 443        crc_ctx->key = 0;
 444
 445        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 446}
 447
 448static int bfin_crypto_crc_finup(struct ahash_request *req)
 449{
 450        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 451        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 452        struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req);
 453
 454        dev_dbg(ctx->crc->dev, "crc_finishupdate\n");
 455        ctx->total += req->nbytes;
 456        ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE;
 457        crc_ctx->key = 0;
 458
 459        return bfin_crypto_crc_handle_queue(ctx->crc, req);
 460}
 461
 462static int bfin_crypto_crc_digest(struct ahash_request *req)
 463{
 464        int ret;
 465
 466        ret = bfin_crypto_crc_init(req);
 467        if (ret)
 468                return ret;
 469
 470        return bfin_crypto_crc_finup(req);
 471}
 472
 473static int bfin_crypto_crc_setkey(struct crypto_ahash *tfm, const u8 *key,
 474                        unsigned int keylen)
 475{
 476        struct bfin_crypto_crc_ctx *crc_ctx = crypto_ahash_ctx(tfm);
 477
 478        dev_dbg(crc_ctx->crc->dev, "crc_setkey\n");
 479        if (keylen != CHKSUM_DIGEST_SIZE) {
 480                crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 481                return -EINVAL;
 482        }
 483
 484        crc_ctx->key = get_unaligned_le32(key);
 485
 486        return 0;
 487}
 488
 489static int bfin_crypto_crc_cra_init(struct crypto_tfm *tfm)
 490{
 491        struct bfin_crypto_crc_ctx *crc_ctx = crypto_tfm_ctx(tfm);
 492
 493        crc_ctx->key = 0;
 494        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 495                                 sizeof(struct bfin_crypto_crc_reqctx));
 496
 497        return 0;
 498}
 499
 500static void bfin_crypto_crc_cra_exit(struct crypto_tfm *tfm)
 501{
 502}
 503
 504static struct ahash_alg algs = {
 505        .init           = bfin_crypto_crc_init,
 506        .update         = bfin_crypto_crc_update,
 507        .final          = bfin_crypto_crc_final,
 508        .finup          = bfin_crypto_crc_finup,
 509        .digest         = bfin_crypto_crc_digest,
 510        .setkey         = bfin_crypto_crc_setkey,
 511        .halg.digestsize        = CHKSUM_DIGEST_SIZE,
 512        .halg.base      = {
 513                .cra_name               = "hmac(crc32)",
 514                .cra_driver_name        = DRIVER_NAME,
 515                .cra_priority           = 100,
 516                .cra_flags              = CRYPTO_ALG_TYPE_AHASH |
 517                                                CRYPTO_ALG_ASYNC,
 518                .cra_blocksize          = CHKSUM_BLOCK_SIZE,
 519                .cra_ctxsize            = sizeof(struct bfin_crypto_crc_ctx),
 520                .cra_alignmask          = 3,
 521                .cra_module             = THIS_MODULE,
 522                .cra_init               = bfin_crypto_crc_cra_init,
 523                .cra_exit               = bfin_crypto_crc_cra_exit,
 524        }
 525};
 526
 527static void bfin_crypto_crc_done_task(unsigned long data)
 528{
 529        struct bfin_crypto_crc *crc = (struct bfin_crypto_crc *)data;
 530
 531        bfin_crypto_crc_handle_queue(crc, NULL);
 532}
 533
 534static irqreturn_t bfin_crypto_crc_handler(int irq, void *dev_id)
 535{
 536        struct bfin_crypto_crc *crc = dev_id;
 537        u32 reg;
 538
 539        if (readl(&crc->regs->status) & DCNTEXP) {
 540                writel(DCNTEXP, &crc->regs->status);
 541
 542                /* prepare results */
 543                put_unaligned_le32(readl(&crc->regs->result),
 544                        crc->req->result);
 545
 546                reg = readl(&crc->regs->control);
 547                writel(reg & ~BLKEN, &crc->regs->control);
 548                crc->busy = 0;
 549
 550                if (crc->req->base.complete)
 551                        crc->req->base.complete(&crc->req->base, 0);
 552
 553                tasklet_schedule(&crc->done_task);
 554
 555                return IRQ_HANDLED;
 556        } else
 557                return IRQ_NONE;
 558}
 559
 560#ifdef CONFIG_PM
 561/**
 562 *      bfin_crypto_crc_suspend - suspend crc device
 563 *      @pdev: device being suspended
 564 *      @state: requested suspend state
 565 */
 566static int bfin_crypto_crc_suspend(struct platform_device *pdev, pm_message_t state)
 567{
 568        struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
 569        int i = 100000;
 570
 571        while ((readl(&crc->regs->control) & BLKEN) && --i)
 572                cpu_relax();
 573
 574        if (i == 0)
 575                return -EBUSY;
 576
 577        return 0;
 578}
 579#else
 580# define bfin_crypto_crc_suspend NULL
 581#endif
 582
 583#define bfin_crypto_crc_resume NULL
 584
 585/**
 586 *      bfin_crypto_crc_probe - Initialize module
 587 *
 588 */
 589static int bfin_crypto_crc_probe(struct platform_device *pdev)
 590{
 591        struct device *dev = &pdev->dev;
 592        struct resource *res;
 593        struct bfin_crypto_crc *crc;
 594        unsigned int timeout = 100000;
 595        int ret;
 596
 597        crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
 598        if (!crc) {
 599                dev_err(&pdev->dev, "fail to malloc bfin_crypto_crc\n");
 600                return -ENOMEM;
 601        }
 602
 603        crc->dev = dev;
 604
 605        INIT_LIST_HEAD(&crc->list);
 606        spin_lock_init(&crc->lock);
 607        tasklet_init(&crc->done_task, bfin_crypto_crc_done_task, (unsigned long)crc);
 608        crypto_init_queue(&crc->queue, CRC_CCRYPTO_QUEUE_LENGTH);
 609
 610        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 611        if (res == NULL) {
 612                dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
 613                return -ENOENT;
 614        }
 615
 616        crc->regs = devm_ioremap_resource(dev, res);
 617        if (IS_ERR((void *)crc->regs)) {
 618                dev_err(&pdev->dev, "Cannot map CRC IO\n");
 619                return PTR_ERR((void *)crc->regs);
 620        }
 621
 622        crc->irq = platform_get_irq(pdev, 0);
 623        if (crc->irq < 0) {
 624                dev_err(&pdev->dev, "No CRC DCNTEXP IRQ specified\n");
 625                return -ENOENT;
 626        }
 627
 628        ret = devm_request_irq(dev, crc->irq, bfin_crypto_crc_handler,
 629                        IRQF_SHARED, dev_name(dev), crc);
 630        if (ret) {
 631                dev_err(&pdev->dev, "Unable to request blackfin crc irq\n");
 632                return ret;
 633        }
 634
 635        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
 636        if (res == NULL) {
 637                dev_err(&pdev->dev, "No CRC DMA channel specified\n");
 638                return -ENOENT;
 639        }
 640        crc->dma_ch = res->start;
 641
 642        ret = request_dma(crc->dma_ch, dev_name(dev));
 643        if (ret) {
 644                dev_err(&pdev->dev, "Unable to attach Blackfin CRC DMA channel\n");
 645                return ret;
 646        }
 647
 648        crc->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &crc->sg_dma, GFP_KERNEL);
 649        if (crc->sg_cpu == NULL) {
 650                ret = -ENOMEM;
 651                goto out_error_dma;
 652        }
 653        /*
 654         * need at most CRC_MAX_DMA_DESC sg + CRC_MAX_DMA_DESC middle  +
 655         * 1 last + 1 next dma descriptors
 656         */
 657        crc->sg_mid_buf = (u8 *)(crc->sg_cpu + ((CRC_MAX_DMA_DESC + 1) << 1));
 658        crc->sg_mid_dma = crc->sg_dma + sizeof(struct dma_desc_array)
 659                        * ((CRC_MAX_DMA_DESC + 1) << 1);
 660
 661        writel(0, &crc->regs->control);
 662        crc->poly = (u32)pdev->dev.platform_data;
 663        writel(crc->poly, &crc->regs->poly);
 664
 665        while (!(readl(&crc->regs->status) & LUTDONE) && (--timeout) > 0)
 666                cpu_relax();
 667
 668        if (timeout == 0)
 669                dev_info(&pdev->dev, "init crc poly timeout\n");
 670
 671        platform_set_drvdata(pdev, crc);
 672
 673        spin_lock(&crc_list.lock);
 674        list_add(&crc->list, &crc_list.dev_list);
 675        spin_unlock(&crc_list.lock);
 676
 677        if (list_is_singular(&crc_list.dev_list)) {
 678                ret = crypto_register_ahash(&algs);
 679                if (ret) {
 680                        dev_err(&pdev->dev,
 681                                "Can't register crypto ahash device\n");
 682                        goto out_error_dma;
 683                }
 684        }
 685
 686        dev_info(&pdev->dev, "initialized\n");
 687
 688        return 0;
 689
 690out_error_dma:
 691        if (crc->sg_cpu)
 692                dma_free_coherent(&pdev->dev, PAGE_SIZE, crc->sg_cpu, crc->sg_dma);
 693        free_dma(crc->dma_ch);
 694
 695        return ret;
 696}
 697
 698/**
 699 *      bfin_crypto_crc_remove - Initialize module
 700 *
 701 */
 702static int bfin_crypto_crc_remove(struct platform_device *pdev)
 703{
 704        struct bfin_crypto_crc *crc = platform_get_drvdata(pdev);
 705
 706        if (!crc)
 707                return -ENODEV;
 708
 709        spin_lock(&crc_list.lock);
 710        list_del(&crc->list);
 711        spin_unlock(&crc_list.lock);
 712
 713        crypto_unregister_ahash(&algs);
 714        tasklet_kill(&crc->done_task);
 715        free_dma(crc->dma_ch);
 716
 717        return 0;
 718}
 719
 720static struct platform_driver bfin_crypto_crc_driver = {
 721        .probe     = bfin_crypto_crc_probe,
 722        .remove    = bfin_crypto_crc_remove,
 723        .suspend   = bfin_crypto_crc_suspend,
 724        .resume    = bfin_crypto_crc_resume,
 725        .driver    = {
 726                .name  = DRIVER_NAME,
 727        },
 728};
 729
 730/**
 731 *      bfin_crypto_crc_mod_init - Initialize module
 732 *
 733 *      Checks the module params and registers the platform driver.
 734 *      Real work is in the platform probe function.
 735 */
 736static int __init bfin_crypto_crc_mod_init(void)
 737{
 738        int ret;
 739
 740        pr_info("Blackfin hardware CRC crypto driver\n");
 741
 742        INIT_LIST_HEAD(&crc_list.dev_list);
 743        spin_lock_init(&crc_list.lock);
 744
 745        ret = platform_driver_register(&bfin_crypto_crc_driver);
 746        if (ret) {
 747                pr_info(KERN_ERR "unable to register driver\n");
 748                return ret;
 749        }
 750
 751        return 0;
 752}
 753
 754/**
 755 *      bfin_crypto_crc_mod_exit - Deinitialize module
 756 */
 757static void __exit bfin_crypto_crc_mod_exit(void)
 758{
 759        platform_driver_unregister(&bfin_crypto_crc_driver);
 760}
 761
 762module_init(bfin_crypto_crc_mod_init);
 763module_exit(bfin_crypto_crc_mod_exit);
 764
 765MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
 766MODULE_DESCRIPTION("Blackfin CRC hardware crypto driver");
 767MODULE_LICENSE("GPL");
 768
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.