linux/drivers/dma/mmp_pdma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Marvell International Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/module.h>
   9#include <linux/init.h>
  10#include <linux/types.h>
  11#include <linux/interrupt.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/slab.h>
  14#include <linux/dmaengine.h>
  15#include <linux/platform_device.h>
  16#include <linux/device.h>
  17#include <linux/platform_data/mmp_dma.h>
  18#include <linux/dmapool.h>
  19#include <linux/of_device.h>
  20#include <linux/of.h>
  21
  22#include "dmaengine.h"
  23
  24#define DCSR            0x0000
  25#define DALGN           0x00a0
  26#define DINT            0x00f0
  27#define DDADR           0x0200
  28#define DSADR           0x0204
  29#define DTADR           0x0208
  30#define DCMD            0x020c
  31
  32#define DCSR_RUN        (1 << 31)       /* Run Bit (read / write) */
  33#define DCSR_NODESC     (1 << 30)       /* No-Descriptor Fetch (read / write) */
  34#define DCSR_STOPIRQEN  (1 << 29)       /* Stop Interrupt Enable (read / write) */
  35#define DCSR_REQPEND    (1 << 8)        /* Request Pending (read-only) */
  36#define DCSR_STOPSTATE  (1 << 3)        /* Stop State (read-only) */
  37#define DCSR_ENDINTR    (1 << 2)        /* End Interrupt (read / write) */
  38#define DCSR_STARTINTR  (1 << 1)        /* Start Interrupt (read / write) */
  39#define DCSR_BUSERR     (1 << 0)        /* Bus Error Interrupt (read / write) */
  40
  41#define DCSR_EORIRQEN   (1 << 28)       /* End of Receive Interrupt Enable (R/W) */
  42#define DCSR_EORJMPEN   (1 << 27)       /* Jump to next descriptor on EOR */
  43#define DCSR_EORSTOPEN  (1 << 26)       /* STOP on an EOR */
  44#define DCSR_SETCMPST   (1 << 25)       /* Set Descriptor Compare Status */
  45#define DCSR_CLRCMPST   (1 << 24)       /* Clear Descriptor Compare Status */
  46#define DCSR_CMPST      (1 << 10)       /* The Descriptor Compare Status */
  47#define DCSR_EORINTR    (1 << 9)        /* The end of Receive */
  48
  49#define DRCMR_MAPVLD    (1 << 7)        /* Map Valid (read / write) */
  50#define DRCMR_CHLNUM    0x1f            /* mask for Channel Number (read / write) */
  51
  52#define DDADR_DESCADDR  0xfffffff0      /* Address of next descriptor (mask) */
  53#define DDADR_STOP      (1 << 0)        /* Stop (read / write) */
  54
  55#define DCMD_INCSRCADDR (1 << 31)       /* Source Address Increment Setting. */
  56#define DCMD_INCTRGADDR (1 << 30)       /* Target Address Increment Setting. */
  57#define DCMD_FLOWSRC    (1 << 29)       /* Flow Control by the source. */
  58#define DCMD_FLOWTRG    (1 << 28)       /* Flow Control by the target. */
  59#define DCMD_STARTIRQEN (1 << 22)       /* Start Interrupt Enable */
  60#define DCMD_ENDIRQEN   (1 << 21)       /* End Interrupt Enable */
  61#define DCMD_ENDIAN     (1 << 18)       /* Device Endian-ness. */
  62#define DCMD_BURST8     (1 << 16)       /* 8 byte burst */
  63#define DCMD_BURST16    (2 << 16)       /* 16 byte burst */
  64#define DCMD_BURST32    (3 << 16)       /* 32 byte burst */
  65#define DCMD_WIDTH1     (1 << 14)       /* 1 byte width */
  66#define DCMD_WIDTH2     (2 << 14)       /* 2 byte width (HalfWord) */
  67#define DCMD_WIDTH4     (3 << 14)       /* 4 byte width (Word) */
  68#define DCMD_LENGTH     0x01fff         /* length mask (max = 8K - 1) */
  69
  70#define PDMA_ALIGNMENT          3
  71#define PDMA_MAX_DESC_BYTES     0x1000
  72
  73struct mmp_pdma_desc_hw {
  74        u32 ddadr;      /* Points to the next descriptor + flags */
  75        u32 dsadr;      /* DSADR value for the current transfer */
  76        u32 dtadr;      /* DTADR value for the current transfer */
  77        u32 dcmd;       /* DCMD value for the current transfer */
  78} __aligned(32);
  79
  80struct mmp_pdma_desc_sw {
  81        struct mmp_pdma_desc_hw desc;
  82        struct list_head node;
  83        struct list_head tx_list;
  84        struct dma_async_tx_descriptor async_tx;
  85};
  86
  87struct mmp_pdma_phy;
  88
  89struct mmp_pdma_chan {
  90        struct device *dev;
  91        struct dma_chan chan;
  92        struct dma_async_tx_descriptor desc;
  93        struct mmp_pdma_phy *phy;
  94        enum dma_transfer_direction dir;
  95
  96        /* channel's basic info */
  97        struct tasklet_struct tasklet;
  98        u32 dcmd;
  99        u32 drcmr;
 100        u32 dev_addr;
 101
 102        /* list for desc */
 103        spinlock_t desc_lock;           /* Descriptor list lock */
 104        struct list_head chain_pending; /* Link descriptors queue for pending */
 105        struct list_head chain_running; /* Link descriptors queue for running */
 106        bool idle;                      /* channel statue machine */
 107
 108        struct dma_pool *desc_pool;     /* Descriptors pool */
 109};
 110
 111struct mmp_pdma_phy {
 112        int idx;
 113        void __iomem *base;
 114        struct mmp_pdma_chan *vchan;
 115};
 116
 117struct mmp_pdma_device {
 118        int                             dma_channels;
 119        void __iomem                    *base;
 120        struct device                   *dev;
 121        struct dma_device               device;
 122        struct mmp_pdma_phy             *phy;
 123};
 124
 125#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
 126#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
 127#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
 128#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
 129
 130static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 131{
 132        u32 reg = (phy->idx << 4) + DDADR;
 133
 134        writel(addr, phy->base + reg);
 135}
 136
 137static void enable_chan(struct mmp_pdma_phy *phy)
 138{
 139        u32 reg;
 140
 141        if (!phy->vchan)
 142                return;
 143
 144        reg = phy->vchan->drcmr;
 145        reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
 146        writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 147
 148        reg = (phy->idx << 2) + DCSR;
 149        writel(readl(phy->base + reg) | DCSR_RUN,
 150                                        phy->base + reg);
 151}
 152
 153static void disable_chan(struct mmp_pdma_phy *phy)
 154{
 155        u32 reg;
 156
 157        if (phy) {
 158                reg = (phy->idx << 2) + DCSR;
 159                writel(readl(phy->base + reg) & ~DCSR_RUN,
 160                                                phy->base + reg);
 161        }
 162}
 163
 164static int clear_chan_irq(struct mmp_pdma_phy *phy)
 165{
 166        u32 dcsr;
 167        u32 dint = readl(phy->base + DINT);
 168        u32 reg = (phy->idx << 2) + DCSR;
 169
 170        if (dint & BIT(phy->idx)) {
 171                /* clear irq */
 172                dcsr = readl(phy->base + reg);
 173                writel(dcsr, phy->base + reg);
 174                if ((dcsr & DCSR_BUSERR) && (phy->vchan))
 175                        dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
 176                return 0;
 177        }
 178        return -EAGAIN;
 179}
 180
 181static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 182{
 183        struct mmp_pdma_phy *phy = dev_id;
 184
 185        if (clear_chan_irq(phy) == 0) {
 186                tasklet_schedule(&phy->vchan->tasklet);
 187                return IRQ_HANDLED;
 188        } else
 189                return IRQ_NONE;
 190}
 191
 192static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 193{
 194        struct mmp_pdma_device *pdev = dev_id;
 195        struct mmp_pdma_phy *phy;
 196        u32 dint = readl(pdev->base + DINT);
 197        int i, ret;
 198        int irq_num = 0;
 199
 200        while (dint) {
 201                i = __ffs(dint);
 202                dint &= (dint - 1);
 203                phy = &pdev->phy[i];
 204                ret = mmp_pdma_chan_handler(irq, phy);
 205                if (ret == IRQ_HANDLED)
 206                        irq_num++;
 207        }
 208
 209        if (irq_num)
 210                return IRQ_HANDLED;
 211        else
 212                return IRQ_NONE;
 213}
 214
 215/* lookup free phy channel as descending priority */
 216static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 217{
 218        int prio, i;
 219        struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 220        struct mmp_pdma_phy *phy;
 221
 222        /*
 223         * dma channel priorities
 224         * ch 0 - 3,  16 - 19  <--> (0)
 225         * ch 4 - 7,  20 - 23  <--> (1)
 226         * ch 8 - 11, 24 - 27  <--> (2)
 227         * ch 12 - 15, 28 - 31  <--> (3)
 228         */
 229        for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
 230                for (i = 0; i < pdev->dma_channels; i++) {
 231                        if (prio != ((i & 0xf) >> 2))
 232                                continue;
 233                        phy = &pdev->phy[i];
 234                        if (!phy->vchan) {
 235                                phy->vchan = pchan;
 236                                return phy;
 237                        }
 238                }
 239        }
 240
 241        return NULL;
 242}
 243
 244/* desc->tx_list ==> pending list */
 245static void append_pending_queue(struct mmp_pdma_chan *chan,
 246                                        struct mmp_pdma_desc_sw *desc)
 247{
 248        struct mmp_pdma_desc_sw *tail =
 249                                to_mmp_pdma_desc(chan->chain_pending.prev);
 250
 251        if (list_empty(&chan->chain_pending))
 252                goto out_splice;
 253
 254        /* one irq per queue, even appended */
 255        tail->desc.ddadr = desc->async_tx.phys;
 256        tail->desc.dcmd &= ~DCMD_ENDIRQEN;
 257
 258        /* softly link to pending list */
 259out_splice:
 260        list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
 261}
 262
 263/**
 264 * start_pending_queue - transfer any pending transactions
 265 * pending list ==> running list
 266 */
 267static void start_pending_queue(struct mmp_pdma_chan *chan)
 268{
 269        struct mmp_pdma_desc_sw *desc;
 270
 271        /* still in running, irq will start the pending list */
 272        if (!chan->idle) {
 273                dev_dbg(chan->dev, "DMA controller still busy\n");
 274                return;
 275        }
 276
 277        if (list_empty(&chan->chain_pending)) {
 278                /* chance to re-fetch phy channel with higher prio */
 279                if (chan->phy) {
 280                        chan->phy->vchan = NULL;
 281                        chan->phy = NULL;
 282                }
 283                dev_dbg(chan->dev, "no pending list\n");
 284                return;
 285        }
 286
 287        if (!chan->phy) {
 288                chan->phy = lookup_phy(chan);
 289                if (!chan->phy) {
 290                        dev_dbg(chan->dev, "no free dma channel\n");
 291                        return;
 292                }
 293        }
 294
 295        /*
 296         * pending -> running
 297         * reintilize pending list
 298         */
 299        desc = list_first_entry(&chan->chain_pending,
 300                                struct mmp_pdma_desc_sw, node);
 301        list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
 302
 303        /*
 304         * Program the descriptor's address into the DMA controller,
 305         * then start the DMA transaction
 306         */
 307        set_desc(chan->phy, desc->async_tx.phys);
 308        enable_chan(chan->phy);
 309        chan->idle = false;
 310}
 311
 312
 313/* desc->tx_list ==> pending list */
 314static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
 315{
 316        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
 317        struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
 318        struct mmp_pdma_desc_sw *child;
 319        unsigned long flags;
 320        dma_cookie_t cookie = -EBUSY;
 321
 322        spin_lock_irqsave(&chan->desc_lock, flags);
 323
 324        list_for_each_entry(child, &desc->tx_list, node) {
 325                cookie = dma_cookie_assign(&child->async_tx);
 326        }
 327
 328        append_pending_queue(chan, desc);
 329
 330        spin_unlock_irqrestore(&chan->desc_lock, flags);
 331
 332        return cookie;
 333}
 334
 335struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
 336{
 337        struct mmp_pdma_desc_sw *desc;
 338        dma_addr_t pdesc;
 339
 340        desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 341        if (!desc) {
 342                dev_err(chan->dev, "out of memory for link descriptor\n");
 343                return NULL;
 344        }
 345
 346        memset(desc, 0, sizeof(*desc));
 347        INIT_LIST_HEAD(&desc->tx_list);
 348        dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
 349        /* each desc has submit */
 350        desc->async_tx.tx_submit = mmp_pdma_tx_submit;
 351        desc->async_tx.phys = pdesc;
 352
 353        return desc;
 354}
 355
 356/**
 357 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
 358 *
 359 * This function will create a dma pool for descriptor allocation.
 360 * Request irq only when channel is requested
 361 * Return - The number of allocated descriptors.
 362 */
 363
 364static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 365{
 366        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 367
 368        if (chan->desc_pool)
 369                return 1;
 370
 371        chan->desc_pool =
 372                dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
 373                                  sizeof(struct mmp_pdma_desc_sw),
 374                                  __alignof__(struct mmp_pdma_desc_sw), 0);
 375        if (!chan->desc_pool) {
 376                dev_err(chan->dev, "unable to allocate descriptor pool\n");
 377                return -ENOMEM;
 378        }
 379        if (chan->phy) {
 380                chan->phy->vchan = NULL;
 381                chan->phy = NULL;
 382        }
 383        chan->idle = true;
 384        chan->dev_addr = 0;
 385        return 1;
 386}
 387
 388static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
 389                                  struct list_head *list)
 390{
 391        struct mmp_pdma_desc_sw *desc, *_desc;
 392
 393        list_for_each_entry_safe(desc, _desc, list, node) {
 394                list_del(&desc->node);
 395                dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 396        }
 397}
 398
 399static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 400{
 401        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 402        unsigned long flags;
 403
 404        spin_lock_irqsave(&chan->desc_lock, flags);
 405        mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 406        mmp_pdma_free_desc_list(chan, &chan->chain_running);
 407        spin_unlock_irqrestore(&chan->desc_lock, flags);
 408
 409        dma_pool_destroy(chan->desc_pool);
 410        chan->desc_pool = NULL;
 411        chan->idle = true;
 412        chan->dev_addr = 0;
 413        if (chan->phy) {
 414                chan->phy->vchan = NULL;
 415                chan->phy = NULL;
 416        }
 417        return;
 418}
 419
 420static struct dma_async_tx_descriptor *
 421mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 422        dma_addr_t dma_dst, dma_addr_t dma_src,
 423        size_t len, unsigned long flags)
 424{
 425        struct mmp_pdma_chan *chan;
 426        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
 427        size_t copy = 0;
 428
 429        if (!dchan)
 430                return NULL;
 431
 432        if (!len)
 433                return NULL;
 434
 435        chan = to_mmp_pdma_chan(dchan);
 436
 437        if (!chan->dir) {
 438                chan->dir = DMA_MEM_TO_MEM;
 439                chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
 440                chan->dcmd |= DCMD_BURST32;
 441        }
 442
 443        do {
 444                /* Allocate the link descriptor from DMA pool */
 445                new = mmp_pdma_alloc_descriptor(chan);
 446                if (!new) {
 447                        dev_err(chan->dev, "no memory for desc\n");
 448                        goto fail;
 449                }
 450
 451                copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
 452
 453                new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
 454                new->desc.dsadr = dma_src;
 455                new->desc.dtadr = dma_dst;
 456
 457                if (!first)
 458                        first = new;
 459                else
 460                        prev->desc.ddadr = new->async_tx.phys;
 461
 462                new->async_tx.cookie = 0;
 463                async_tx_ack(&new->async_tx);
 464
 465                prev = new;
 466                len -= copy;
 467
 468                if (chan->dir == DMA_MEM_TO_DEV) {
 469                        dma_src += copy;
 470                } else if (chan->dir == DMA_DEV_TO_MEM) {
 471                        dma_dst += copy;
 472                } else if (chan->dir == DMA_MEM_TO_MEM) {
 473                        dma_src += copy;
 474                        dma_dst += copy;
 475                }
 476
 477                /* Insert the link descriptor to the LD ring */
 478                list_add_tail(&new->node, &first->tx_list);
 479        } while (len);
 480
 481        first->async_tx.flags = flags; /* client is in control of this ack */
 482        first->async_tx.cookie = -EBUSY;
 483
 484        /* last desc and fire IRQ */
 485        new->desc.ddadr = DDADR_STOP;
 486        new->desc.dcmd |= DCMD_ENDIRQEN;
 487
 488        return &first->async_tx;
 489
 490fail:
 491        if (first)
 492                mmp_pdma_free_desc_list(chan, &first->tx_list);
 493        return NULL;
 494}
 495
 496static struct dma_async_tx_descriptor *
 497mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 498                         unsigned int sg_len, enum dma_transfer_direction dir,
 499                         unsigned long flags, void *context)
 500{
 501        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 502        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 503        size_t len, avail;
 504        struct scatterlist *sg;
 505        dma_addr_t addr;
 506        int i;
 507
 508        if ((sgl == NULL) || (sg_len == 0))
 509                return NULL;
 510
 511        for_each_sg(sgl, sg, sg_len, i) {
 512                addr = sg_dma_address(sg);
 513                avail = sg_dma_len(sgl);
 514
 515                do {
 516                        len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
 517
 518                        /* allocate and populate the descriptor */
 519                        new = mmp_pdma_alloc_descriptor(chan);
 520                        if (!new) {
 521                                dev_err(chan->dev, "no memory for desc\n");
 522                                goto fail;
 523                        }
 524
 525                        new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
 526                        if (dir == DMA_MEM_TO_DEV) {
 527                                new->desc.dsadr = addr;
 528                                new->desc.dtadr = chan->dev_addr;
 529                        } else {
 530                                new->desc.dsadr = chan->dev_addr;
 531                                new->desc.dtadr = addr;
 532                        }
 533
 534                        if (!first)
 535                                first = new;
 536                        else
 537                                prev->desc.ddadr = new->async_tx.phys;
 538
 539                        new->async_tx.cookie = 0;
 540                        async_tx_ack(&new->async_tx);
 541                        prev = new;
 542
 543                        /* Insert the link descriptor to the LD ring */
 544                        list_add_tail(&new->node, &first->tx_list);
 545
 546                        /* update metadata */
 547                        addr += len;
 548                        avail -= len;
 549                } while (avail);
 550        }
 551
 552        first->async_tx.cookie = -EBUSY;
 553        first->async_tx.flags = flags;
 554
 555        /* last desc and fire IRQ */
 556        new->desc.ddadr = DDADR_STOP;
 557        new->desc.dcmd |= DCMD_ENDIRQEN;
 558
 559        return &first->async_tx;
 560
 561fail:
 562        if (first)
 563                mmp_pdma_free_desc_list(chan, &first->tx_list);
 564        return NULL;
 565}
 566
 567static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 568                unsigned long arg)
 569{
 570        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 571        struct dma_slave_config *cfg = (void *)arg;
 572        unsigned long flags;
 573        int ret = 0;
 574        u32 maxburst = 0, addr = 0;
 575        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 576
 577        if (!dchan)
 578                return -EINVAL;
 579
 580        switch (cmd) {
 581        case DMA_TERMINATE_ALL:
 582                disable_chan(chan->phy);
 583                if (chan->phy) {
 584                        chan->phy->vchan = NULL;
 585                        chan->phy = NULL;
 586                }
 587                spin_lock_irqsave(&chan->desc_lock, flags);
 588                mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 589                mmp_pdma_free_desc_list(chan, &chan->chain_running);
 590                spin_unlock_irqrestore(&chan->desc_lock, flags);
 591                chan->idle = true;
 592                break;
 593        case DMA_SLAVE_CONFIG:
 594                if (cfg->direction == DMA_DEV_TO_MEM) {
 595                        chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
 596                        maxburst = cfg->src_maxburst;
 597                        width = cfg->src_addr_width;
 598                        addr = cfg->src_addr;
 599                } else if (cfg->direction == DMA_MEM_TO_DEV) {
 600                        chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
 601                        maxburst = cfg->dst_maxburst;
 602                        width = cfg->dst_addr_width;
 603                        addr = cfg->dst_addr;
 604                }
 605
 606                if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
 607                        chan->dcmd |= DCMD_WIDTH1;
 608                else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 609                        chan->dcmd |= DCMD_WIDTH2;
 610                else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
 611                        chan->dcmd |= DCMD_WIDTH4;
 612
 613                if (maxburst == 8)
 614                        chan->dcmd |= DCMD_BURST8;
 615                else if (maxburst == 16)
 616                        chan->dcmd |= DCMD_BURST16;
 617                else if (maxburst == 32)
 618                        chan->dcmd |= DCMD_BURST32;
 619
 620                if (cfg) {
 621                        chan->dir = cfg->direction;
 622                        chan->drcmr = cfg->slave_id;
 623                }
 624                chan->dev_addr = addr;
 625                break;
 626        default:
 627                return -ENOSYS;
 628        }
 629
 630        return ret;
 631}
 632
 633static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 634                        dma_cookie_t cookie, struct dma_tx_state *txstate)
 635{
 636        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 637        enum dma_status ret;
 638        unsigned long flags;
 639
 640        spin_lock_irqsave(&chan->desc_lock, flags);
 641        ret = dma_cookie_status(dchan, cookie, txstate);
 642        spin_unlock_irqrestore(&chan->desc_lock, flags);
 643
 644        return ret;
 645}
 646
 647/**
 648 * mmp_pdma_issue_pending - Issue the DMA start command
 649 * pending list ==> running list
 650 */
 651static void mmp_pdma_issue_pending(struct dma_chan *dchan)
 652{
 653        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 654        unsigned long flags;
 655
 656        spin_lock_irqsave(&chan->desc_lock, flags);
 657        start_pending_queue(chan);
 658        spin_unlock_irqrestore(&chan->desc_lock, flags);
 659}
 660
 661/*
 662 * dma_do_tasklet
 663 * Do call back
 664 * Start pending list
 665 */
 666static void dma_do_tasklet(unsigned long data)
 667{
 668        struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
 669        struct mmp_pdma_desc_sw *desc, *_desc;
 670        LIST_HEAD(chain_cleanup);
 671        unsigned long flags;
 672
 673        /* submit pending list; callback for each desc; free desc */
 674
 675        spin_lock_irqsave(&chan->desc_lock, flags);
 676
 677        /* update the cookie if we have some descriptors to cleanup */
 678        if (!list_empty(&chan->chain_running)) {
 679                dma_cookie_t cookie;
 680
 681                desc = to_mmp_pdma_desc(chan->chain_running.prev);
 682                cookie = desc->async_tx.cookie;
 683                dma_cookie_complete(&desc->async_tx);
 684
 685                dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
 686        }
 687
 688        /*
 689         * move the descriptors to a temporary list so we can drop the lock
 690         * during the entire cleanup operation
 691         */
 692        list_splice_tail_init(&chan->chain_running, &chain_cleanup);
 693
 694        /* the hardware is now idle and ready for more */
 695        chan->idle = true;
 696
 697        /* Start any pending transactions automatically */
 698        start_pending_queue(chan);
 699        spin_unlock_irqrestore(&chan->desc_lock, flags);
 700
 701        /* Run the callback for each descriptor, in order */
 702        list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
 703                struct dma_async_tx_descriptor *txd = &desc->async_tx;
 704
 705                /* Remove from the list of transactions */
 706                list_del(&desc->node);
 707                /* Run the link descriptor callback function */
 708                if (txd->callback)
 709                        txd->callback(txd->callback_param);
 710
 711                dma_pool_free(chan->desc_pool, desc, txd->phys);
 712        }
 713}
 714
 715static int __devexit mmp_pdma_remove(struct platform_device *op)
 716{
 717        struct mmp_pdma_device *pdev = platform_get_drvdata(op);
 718
 719        dma_async_device_unregister(&pdev->device);
 720        return 0;
 721}
 722
 723static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 724                                                        int idx, int irq)
 725{
 726        struct mmp_pdma_phy *phy  = &pdev->phy[idx];
 727        struct mmp_pdma_chan *chan;
 728        int ret;
 729
 730        chan = devm_kzalloc(pdev->dev,
 731                        sizeof(struct mmp_pdma_chan), GFP_KERNEL);
 732        if (chan == NULL)
 733                return -ENOMEM;
 734
 735        phy->idx = idx;
 736        phy->base = pdev->base;
 737
 738        if (irq) {
 739                ret = devm_request_irq(pdev->dev, irq,
 740                        mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
 741                if (ret) {
 742                        dev_err(pdev->dev, "channel request irq fail!\n");
 743                        return ret;
 744                }
 745        }
 746
 747        spin_lock_init(&chan->desc_lock);
 748        chan->dev = pdev->dev;
 749        chan->chan.device = &pdev->device;
 750        tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
 751        INIT_LIST_HEAD(&chan->chain_pending);
 752        INIT_LIST_HEAD(&chan->chain_running);
 753
 754        /* register virt channel to dma engine */
 755        list_add_tail(&chan->chan.device_node,
 756                        &pdev->device.channels);
 757
 758        return 0;
 759}
 760
 761static struct of_device_id mmp_pdma_dt_ids[] = {
 762        { .compatible = "marvell,pdma-1.0", },
 763        {}
 764};
 765MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 766
 767static int __devinit mmp_pdma_probe(struct platform_device *op)
 768{
 769        struct mmp_pdma_device *pdev;
 770        const struct of_device_id *of_id;
 771        struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
 772        struct resource *iores;
 773        int i, ret, irq = 0;
 774        int dma_channels = 0, irq_num = 0;
 775
 776        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
 777        if (!pdev)
 778                return -ENOMEM;
 779        pdev->dev = &op->dev;
 780
 781        iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 782        if (!iores)
 783                return -EINVAL;
 784
 785        pdev->base = devm_request_and_ioremap(pdev->dev, iores);
 786        if (!pdev->base)
 787                return -EADDRNOTAVAIL;
 788
 789        of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
 790        if (of_id)
 791                of_property_read_u32(pdev->dev->of_node,
 792                                "#dma-channels", &dma_channels);
 793        else if (pdata && pdata->dma_channels)
 794                dma_channels = pdata->dma_channels;
 795        else
 796                dma_channels = 32;      /* default 32 channel */
 797        pdev->dma_channels = dma_channels;
 798
 799        for (i = 0; i < dma_channels; i++) {
 800                if (platform_get_irq(op, i) > 0)
 801                        irq_num++;
 802        }
 803
 804        pdev->phy = devm_kzalloc(pdev->dev,
 805                dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
 806        if (pdev->phy == NULL)
 807                return -ENOMEM;
 808
 809        INIT_LIST_HEAD(&pdev->device.channels);
 810
 811        if (irq_num != dma_channels) {
 812                /* all chan share one irq, demux inside */
 813                irq = platform_get_irq(op, 0);
 814                ret = devm_request_irq(pdev->dev, irq,
 815                        mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
 816                if (ret)
 817                        return ret;
 818        }
 819
 820        for (i = 0; i < dma_channels; i++) {
 821                irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
 822                ret = mmp_pdma_chan_init(pdev, i, irq);
 823                if (ret)
 824                        return ret;
 825        }
 826
 827        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 828        dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
 829        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 830        pdev->device.dev = &op->dev;
 831        pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
 832        pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
 833        pdev->device.device_tx_status = mmp_pdma_tx_status;
 834        pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
 835        pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
 836        pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 837        pdev->device.device_control = mmp_pdma_control;
 838        pdev->device.copy_align = PDMA_ALIGNMENT;
 839
 840        if (pdev->dev->coherent_dma_mask)
 841                dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
 842        else
 843                dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
 844
 845        ret = dma_async_device_register(&pdev->device);
 846        if (ret) {
 847                dev_err(pdev->device.dev, "unable to register\n");
 848                return ret;
 849        }
 850
 851        dev_info(pdev->device.dev, "initialized\n");
 852        return 0;
 853}
 854
 855static const struct platform_device_id mmp_pdma_id_table[] = {
 856        { "mmp-pdma", },
 857        { },
 858};
 859
 860static struct platform_driver mmp_pdma_driver = {
 861        .driver         = {
 862                .name   = "mmp-pdma",
 863                .owner  = THIS_MODULE,
 864                .of_match_table = mmp_pdma_dt_ids,
 865        },
 866        .id_table       = mmp_pdma_id_table,
 867        .probe          = mmp_pdma_probe,
 868        .remove         = __devexit_p(mmp_pdma_remove),
 869};
 870
 871module_platform_driver(mmp_pdma_driver);
 872
 873MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
 874MODULE_AUTHOR("Marvell International Ltd.");
 875MODULE_LICENSE("GPL v2");
 876
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.