linux/drivers/dma/mmp_pdma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Marvell International Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/err.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/types.h>
  12#include <linux/interrupt.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/slab.h>
  15#include <linux/dmaengine.h>
  16#include <linux/platform_device.h>
  17#include <linux/device.h>
  18#include <linux/platform_data/mmp_dma.h>
  19#include <linux/dmapool.h>
  20#include <linux/of_device.h>
  21#include <linux/of.h>
  22
  23#include "dmaengine.h"
  24
  25#define DCSR            0x0000
  26#define DALGN           0x00a0
  27#define DINT            0x00f0
  28#define DDADR           0x0200
  29#define DSADR           0x0204
  30#define DTADR           0x0208
  31#define DCMD            0x020c
  32
  33#define DCSR_RUN        (1 << 31)       /* Run Bit (read / write) */
  34#define DCSR_NODESC     (1 << 30)       /* No-Descriptor Fetch (read / write) */
  35#define DCSR_STOPIRQEN  (1 << 29)       /* Stop Interrupt Enable (read / write) */
  36#define DCSR_REQPEND    (1 << 8)        /* Request Pending (read-only) */
  37#define DCSR_STOPSTATE  (1 << 3)        /* Stop State (read-only) */
  38#define DCSR_ENDINTR    (1 << 2)        /* End Interrupt (read / write) */
  39#define DCSR_STARTINTR  (1 << 1)        /* Start Interrupt (read / write) */
  40#define DCSR_BUSERR     (1 << 0)        /* Bus Error Interrupt (read / write) */
  41
  42#define DCSR_EORIRQEN   (1 << 28)       /* End of Receive Interrupt Enable (R/W) */
  43#define DCSR_EORJMPEN   (1 << 27)       /* Jump to next descriptor on EOR */
  44#define DCSR_EORSTOPEN  (1 << 26)       /* STOP on an EOR */
  45#define DCSR_SETCMPST   (1 << 25)       /* Set Descriptor Compare Status */
  46#define DCSR_CLRCMPST   (1 << 24)       /* Clear Descriptor Compare Status */
  47#define DCSR_CMPST      (1 << 10)       /* The Descriptor Compare Status */
  48#define DCSR_EORINTR    (1 << 9)        /* The end of Receive */
  49
  50#define DRCMR_MAPVLD    (1 << 7)        /* Map Valid (read / write) */
  51#define DRCMR_CHLNUM    0x1f            /* mask for Channel Number (read / write) */
  52
  53#define DDADR_DESCADDR  0xfffffff0      /* Address of next descriptor (mask) */
  54#define DDADR_STOP      (1 << 0)        /* Stop (read / write) */
  55
  56#define DCMD_INCSRCADDR (1 << 31)       /* Source Address Increment Setting. */
  57#define DCMD_INCTRGADDR (1 << 30)       /* Target Address Increment Setting. */
  58#define DCMD_FLOWSRC    (1 << 29)       /* Flow Control by the source. */
  59#define DCMD_FLOWTRG    (1 << 28)       /* Flow Control by the target. */
  60#define DCMD_STARTIRQEN (1 << 22)       /* Start Interrupt Enable */
  61#define DCMD_ENDIRQEN   (1 << 21)       /* End Interrupt Enable */
  62#define DCMD_ENDIAN     (1 << 18)       /* Device Endian-ness. */
  63#define DCMD_BURST8     (1 << 16)       /* 8 byte burst */
  64#define DCMD_BURST16    (2 << 16)       /* 16 byte burst */
  65#define DCMD_BURST32    (3 << 16)       /* 32 byte burst */
  66#define DCMD_WIDTH1     (1 << 14)       /* 1 byte width */
  67#define DCMD_WIDTH2     (2 << 14)       /* 2 byte width (HalfWord) */
  68#define DCMD_WIDTH4     (3 << 14)       /* 4 byte width (Word) */
  69#define DCMD_LENGTH     0x01fff         /* length mask (max = 8K - 1) */
  70
  71#define PDMA_ALIGNMENT          3
  72#define PDMA_MAX_DESC_BYTES     0x1000
  73
  74struct mmp_pdma_desc_hw {
  75        u32 ddadr;      /* Points to the next descriptor + flags */
  76        u32 dsadr;      /* DSADR value for the current transfer */
  77        u32 dtadr;      /* DTADR value for the current transfer */
  78        u32 dcmd;       /* DCMD value for the current transfer */
  79} __aligned(32);
  80
  81struct mmp_pdma_desc_sw {
  82        struct mmp_pdma_desc_hw desc;
  83        struct list_head node;
  84        struct list_head tx_list;
  85        struct dma_async_tx_descriptor async_tx;
  86};
  87
  88struct mmp_pdma_phy;
  89
  90struct mmp_pdma_chan {
  91        struct device *dev;
  92        struct dma_chan chan;
  93        struct dma_async_tx_descriptor desc;
  94        struct mmp_pdma_phy *phy;
  95        enum dma_transfer_direction dir;
  96
  97        /* channel's basic info */
  98        struct tasklet_struct tasklet;
  99        u32 dcmd;
 100        u32 drcmr;
 101        u32 dev_addr;
 102
 103        /* list for desc */
 104        spinlock_t desc_lock;           /* Descriptor list lock */
 105        struct list_head chain_pending; /* Link descriptors queue for pending */
 106        struct list_head chain_running; /* Link descriptors queue for running */
 107        bool idle;                      /* channel statue machine */
 108
 109        struct dma_pool *desc_pool;     /* Descriptors pool */
 110};
 111
 112struct mmp_pdma_phy {
 113        int idx;
 114        void __iomem *base;
 115        struct mmp_pdma_chan *vchan;
 116};
 117
 118struct mmp_pdma_device {
 119        int                             dma_channels;
 120        void __iomem                    *base;
 121        struct device                   *dev;
 122        struct dma_device               device;
 123        struct mmp_pdma_phy             *phy;
 124};
 125
 126#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
 127#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
 128#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
 129#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
 130
 131static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 132{
 133        u32 reg = (phy->idx << 4) + DDADR;
 134
 135        writel(addr, phy->base + reg);
 136}
 137
 138static void enable_chan(struct mmp_pdma_phy *phy)
 139{
 140        u32 reg;
 141
 142        if (!phy->vchan)
 143                return;
 144
 145        reg = phy->vchan->drcmr;
 146        reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
 147        writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
 148
 149        reg = (phy->idx << 2) + DCSR;
 150        writel(readl(phy->base + reg) | DCSR_RUN,
 151                                        phy->base + reg);
 152}
 153
 154static void disable_chan(struct mmp_pdma_phy *phy)
 155{
 156        u32 reg;
 157
 158        if (phy) {
 159                reg = (phy->idx << 2) + DCSR;
 160                writel(readl(phy->base + reg) & ~DCSR_RUN,
 161                                                phy->base + reg);
 162        }
 163}
 164
 165static int clear_chan_irq(struct mmp_pdma_phy *phy)
 166{
 167        u32 dcsr;
 168        u32 dint = readl(phy->base + DINT);
 169        u32 reg = (phy->idx << 2) + DCSR;
 170
 171        if (dint & BIT(phy->idx)) {
 172                /* clear irq */
 173                dcsr = readl(phy->base + reg);
 174                writel(dcsr, phy->base + reg);
 175                if ((dcsr & DCSR_BUSERR) && (phy->vchan))
 176                        dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
 177                return 0;
 178        }
 179        return -EAGAIN;
 180}
 181
 182static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 183{
 184        struct mmp_pdma_phy *phy = dev_id;
 185
 186        if (clear_chan_irq(phy) == 0) {
 187                tasklet_schedule(&phy->vchan->tasklet);
 188                return IRQ_HANDLED;
 189        } else
 190                return IRQ_NONE;
 191}
 192
 193static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 194{
 195        struct mmp_pdma_device *pdev = dev_id;
 196        struct mmp_pdma_phy *phy;
 197        u32 dint = readl(pdev->base + DINT);
 198        int i, ret;
 199        int irq_num = 0;
 200
 201        while (dint) {
 202                i = __ffs(dint);
 203                dint &= (dint - 1);
 204                phy = &pdev->phy[i];
 205                ret = mmp_pdma_chan_handler(irq, phy);
 206                if (ret == IRQ_HANDLED)
 207                        irq_num++;
 208        }
 209
 210        if (irq_num)
 211                return IRQ_HANDLED;
 212        else
 213                return IRQ_NONE;
 214}
 215
 216/* lookup free phy channel as descending priority */
 217static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
 218{
 219        int prio, i;
 220        struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
 221        struct mmp_pdma_phy *phy;
 222
 223        /*
 224         * dma channel priorities
 225         * ch 0 - 3,  16 - 19  <--> (0)
 226         * ch 4 - 7,  20 - 23  <--> (1)
 227         * ch 8 - 11, 24 - 27  <--> (2)
 228         * ch 12 - 15, 28 - 31  <--> (3)
 229         */
 230        for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
 231                for (i = 0; i < pdev->dma_channels; i++) {
 232                        if (prio != ((i & 0xf) >> 2))
 233                                continue;
 234                        phy = &pdev->phy[i];
 235                        if (!phy->vchan) {
 236                                phy->vchan = pchan;
 237                                return phy;
 238                        }
 239                }
 240        }
 241
 242        return NULL;
 243}
 244
 245/* desc->tx_list ==> pending list */
 246static void append_pending_queue(struct mmp_pdma_chan *chan,
 247                                        struct mmp_pdma_desc_sw *desc)
 248{
 249        struct mmp_pdma_desc_sw *tail =
 250                                to_mmp_pdma_desc(chan->chain_pending.prev);
 251
 252        if (list_empty(&chan->chain_pending))
 253                goto out_splice;
 254
 255        /* one irq per queue, even appended */
 256        tail->desc.ddadr = desc->async_tx.phys;
 257        tail->desc.dcmd &= ~DCMD_ENDIRQEN;
 258
 259        /* softly link to pending list */
 260out_splice:
 261        list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
 262}
 263
 264/**
 265 * start_pending_queue - transfer any pending transactions
 266 * pending list ==> running list
 267 */
 268static void start_pending_queue(struct mmp_pdma_chan *chan)
 269{
 270        struct mmp_pdma_desc_sw *desc;
 271
 272        /* still in running, irq will start the pending list */
 273        if (!chan->idle) {
 274                dev_dbg(chan->dev, "DMA controller still busy\n");
 275                return;
 276        }
 277
 278        if (list_empty(&chan->chain_pending)) {
 279                /* chance to re-fetch phy channel with higher prio */
 280                if (chan->phy) {
 281                        chan->phy->vchan = NULL;
 282                        chan->phy = NULL;
 283                }
 284                dev_dbg(chan->dev, "no pending list\n");
 285                return;
 286        }
 287
 288        if (!chan->phy) {
 289                chan->phy = lookup_phy(chan);
 290                if (!chan->phy) {
 291                        dev_dbg(chan->dev, "no free dma channel\n");
 292                        return;
 293                }
 294        }
 295
 296        /*
 297         * pending -> running
 298         * reintilize pending list
 299         */
 300        desc = list_first_entry(&chan->chain_pending,
 301                                struct mmp_pdma_desc_sw, node);
 302        list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
 303
 304        /*
 305         * Program the descriptor's address into the DMA controller,
 306         * then start the DMA transaction
 307         */
 308        set_desc(chan->phy, desc->async_tx.phys);
 309        enable_chan(chan->phy);
 310        chan->idle = false;
 311}
 312
 313
 314/* desc->tx_list ==> pending list */
 315static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
 316{
 317        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
 318        struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
 319        struct mmp_pdma_desc_sw *child;
 320        unsigned long flags;
 321        dma_cookie_t cookie = -EBUSY;
 322
 323        spin_lock_irqsave(&chan->desc_lock, flags);
 324
 325        list_for_each_entry(child, &desc->tx_list, node) {
 326                cookie = dma_cookie_assign(&child->async_tx);
 327        }
 328
 329        append_pending_queue(chan, desc);
 330
 331        spin_unlock_irqrestore(&chan->desc_lock, flags);
 332
 333        return cookie;
 334}
 335
 336struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
 337{
 338        struct mmp_pdma_desc_sw *desc;
 339        dma_addr_t pdesc;
 340
 341        desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 342        if (!desc) {
 343                dev_err(chan->dev, "out of memory for link descriptor\n");
 344                return NULL;
 345        }
 346
 347        memset(desc, 0, sizeof(*desc));
 348        INIT_LIST_HEAD(&desc->tx_list);
 349        dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
 350        /* each desc has submit */
 351        desc->async_tx.tx_submit = mmp_pdma_tx_submit;
 352        desc->async_tx.phys = pdesc;
 353
 354        return desc;
 355}
 356
 357/**
 358 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
 359 *
 360 * This function will create a dma pool for descriptor allocation.
 361 * Request irq only when channel is requested
 362 * Return - The number of allocated descriptors.
 363 */
 364
 365static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 366{
 367        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 368
 369        if (chan->desc_pool)
 370                return 1;
 371
 372        chan->desc_pool =
 373                dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
 374                                  sizeof(struct mmp_pdma_desc_sw),
 375                                  __alignof__(struct mmp_pdma_desc_sw), 0);
 376        if (!chan->desc_pool) {
 377                dev_err(chan->dev, "unable to allocate descriptor pool\n");
 378                return -ENOMEM;
 379        }
 380        if (chan->phy) {
 381                chan->phy->vchan = NULL;
 382                chan->phy = NULL;
 383        }
 384        chan->idle = true;
 385        chan->dev_addr = 0;
 386        return 1;
 387}
 388
 389static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
 390                                  struct list_head *list)
 391{
 392        struct mmp_pdma_desc_sw *desc, *_desc;
 393
 394        list_for_each_entry_safe(desc, _desc, list, node) {
 395                list_del(&desc->node);
 396                dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 397        }
 398}
 399
 400static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 401{
 402        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 403        unsigned long flags;
 404
 405        spin_lock_irqsave(&chan->desc_lock, flags);
 406        mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 407        mmp_pdma_free_desc_list(chan, &chan->chain_running);
 408        spin_unlock_irqrestore(&chan->desc_lock, flags);
 409
 410        dma_pool_destroy(chan->desc_pool);
 411        chan->desc_pool = NULL;
 412        chan->idle = true;
 413        chan->dev_addr = 0;
 414        if (chan->phy) {
 415                chan->phy->vchan = NULL;
 416                chan->phy = NULL;
 417        }
 418        return;
 419}
 420
 421static struct dma_async_tx_descriptor *
 422mmp_pdma_prep_memcpy(struct dma_chan *dchan,
 423        dma_addr_t dma_dst, dma_addr_t dma_src,
 424        size_t len, unsigned long flags)
 425{
 426        struct mmp_pdma_chan *chan;
 427        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
 428        size_t copy = 0;
 429
 430        if (!dchan)
 431                return NULL;
 432
 433        if (!len)
 434                return NULL;
 435
 436        chan = to_mmp_pdma_chan(dchan);
 437
 438        if (!chan->dir) {
 439                chan->dir = DMA_MEM_TO_MEM;
 440                chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
 441                chan->dcmd |= DCMD_BURST32;
 442        }
 443
 444        do {
 445                /* Allocate the link descriptor from DMA pool */
 446                new = mmp_pdma_alloc_descriptor(chan);
 447                if (!new) {
 448                        dev_err(chan->dev, "no memory for desc\n");
 449                        goto fail;
 450                }
 451
 452                copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
 453
 454                new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
 455                new->desc.dsadr = dma_src;
 456                new->desc.dtadr = dma_dst;
 457
 458                if (!first)
 459                        first = new;
 460                else
 461                        prev->desc.ddadr = new->async_tx.phys;
 462
 463                new->async_tx.cookie = 0;
 464                async_tx_ack(&new->async_tx);
 465
 466                prev = new;
 467                len -= copy;
 468
 469                if (chan->dir == DMA_MEM_TO_DEV) {
 470                        dma_src += copy;
 471                } else if (chan->dir == DMA_DEV_TO_MEM) {
 472                        dma_dst += copy;
 473                } else if (chan->dir == DMA_MEM_TO_MEM) {
 474                        dma_src += copy;
 475                        dma_dst += copy;
 476                }
 477
 478                /* Insert the link descriptor to the LD ring */
 479                list_add_tail(&new->node, &first->tx_list);
 480        } while (len);
 481
 482        first->async_tx.flags = flags; /* client is in control of this ack */
 483        first->async_tx.cookie = -EBUSY;
 484
 485        /* last desc and fire IRQ */
 486        new->desc.ddadr = DDADR_STOP;
 487        new->desc.dcmd |= DCMD_ENDIRQEN;
 488
 489        return &first->async_tx;
 490
 491fail:
 492        if (first)
 493                mmp_pdma_free_desc_list(chan, &first->tx_list);
 494        return NULL;
 495}
 496
 497static struct dma_async_tx_descriptor *
 498mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
 499                         unsigned int sg_len, enum dma_transfer_direction dir,
 500                         unsigned long flags, void *context)
 501{
 502        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 503        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
 504        size_t len, avail;
 505        struct scatterlist *sg;
 506        dma_addr_t addr;
 507        int i;
 508
 509        if ((sgl == NULL) || (sg_len == 0))
 510                return NULL;
 511
 512        for_each_sg(sgl, sg, sg_len, i) {
 513                addr = sg_dma_address(sg);
 514                avail = sg_dma_len(sgl);
 515
 516                do {
 517                        len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
 518
 519                        /* allocate and populate the descriptor */
 520                        new = mmp_pdma_alloc_descriptor(chan);
 521                        if (!new) {
 522                                dev_err(chan->dev, "no memory for desc\n");
 523                                goto fail;
 524                        }
 525
 526                        new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
 527                        if (dir == DMA_MEM_TO_DEV) {
 528                                new->desc.dsadr = addr;
 529                                new->desc.dtadr = chan->dev_addr;
 530                        } else {
 531                                new->desc.dsadr = chan->dev_addr;
 532                                new->desc.dtadr = addr;
 533                        }
 534
 535                        if (!first)
 536                                first = new;
 537                        else
 538                                prev->desc.ddadr = new->async_tx.phys;
 539
 540                        new->async_tx.cookie = 0;
 541                        async_tx_ack(&new->async_tx);
 542                        prev = new;
 543
 544                        /* Insert the link descriptor to the LD ring */
 545                        list_add_tail(&new->node, &first->tx_list);
 546
 547                        /* update metadata */
 548                        addr += len;
 549                        avail -= len;
 550                } while (avail);
 551        }
 552
 553        first->async_tx.cookie = -EBUSY;
 554        first->async_tx.flags = flags;
 555
 556        /* last desc and fire IRQ */
 557        new->desc.ddadr = DDADR_STOP;
 558        new->desc.dcmd |= DCMD_ENDIRQEN;
 559
 560        return &first->async_tx;
 561
 562fail:
 563        if (first)
 564                mmp_pdma_free_desc_list(chan, &first->tx_list);
 565        return NULL;
 566}
 567
 568static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
 569                unsigned long arg)
 570{
 571        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 572        struct dma_slave_config *cfg = (void *)arg;
 573        unsigned long flags;
 574        int ret = 0;
 575        u32 maxburst = 0, addr = 0;
 576        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 577
 578        if (!dchan)
 579                return -EINVAL;
 580
 581        switch (cmd) {
 582        case DMA_TERMINATE_ALL:
 583                disable_chan(chan->phy);
 584                if (chan->phy) {
 585                        chan->phy->vchan = NULL;
 586                        chan->phy = NULL;
 587                }
 588                spin_lock_irqsave(&chan->desc_lock, flags);
 589                mmp_pdma_free_desc_list(chan, &chan->chain_pending);
 590                mmp_pdma_free_desc_list(chan, &chan->chain_running);
 591                spin_unlock_irqrestore(&chan->desc_lock, flags);
 592                chan->idle = true;
 593                break;
 594        case DMA_SLAVE_CONFIG:
 595                if (cfg->direction == DMA_DEV_TO_MEM) {
 596                        chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
 597                        maxburst = cfg->src_maxburst;
 598                        width = cfg->src_addr_width;
 599                        addr = cfg->src_addr;
 600                } else if (cfg->direction == DMA_MEM_TO_DEV) {
 601                        chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
 602                        maxburst = cfg->dst_maxburst;
 603                        width = cfg->dst_addr_width;
 604                        addr = cfg->dst_addr;
 605                }
 606
 607                if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
 608                        chan->dcmd |= DCMD_WIDTH1;
 609                else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 610                        chan->dcmd |= DCMD_WIDTH2;
 611                else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
 612                        chan->dcmd |= DCMD_WIDTH4;
 613
 614                if (maxburst == 8)
 615                        chan->dcmd |= DCMD_BURST8;
 616                else if (maxburst == 16)
 617                        chan->dcmd |= DCMD_BURST16;
 618                else if (maxburst == 32)
 619                        chan->dcmd |= DCMD_BURST32;
 620
 621                chan->dir = cfg->direction;
 622                chan->drcmr = cfg->slave_id;
 623                chan->dev_addr = addr;
 624                break;
 625        default:
 626                return -ENOSYS;
 627        }
 628
 629        return ret;
 630}
 631
 632static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
 633                        dma_cookie_t cookie, struct dma_tx_state *txstate)
 634{
 635        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 636        enum dma_status ret;
 637        unsigned long flags;
 638
 639        spin_lock_irqsave(&chan->desc_lock, flags);
 640        ret = dma_cookie_status(dchan, cookie, txstate);
 641        spin_unlock_irqrestore(&chan->desc_lock, flags);
 642
 643        return ret;
 644}
 645
 646/**
 647 * mmp_pdma_issue_pending - Issue the DMA start command
 648 * pending list ==> running list
 649 */
 650static void mmp_pdma_issue_pending(struct dma_chan *dchan)
 651{
 652        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
 653        unsigned long flags;
 654
 655        spin_lock_irqsave(&chan->desc_lock, flags);
 656        start_pending_queue(chan);
 657        spin_unlock_irqrestore(&chan->desc_lock, flags);
 658}
 659
 660/*
 661 * dma_do_tasklet
 662 * Do call back
 663 * Start pending list
 664 */
 665static void dma_do_tasklet(unsigned long data)
 666{
 667        struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
 668        struct mmp_pdma_desc_sw *desc, *_desc;
 669        LIST_HEAD(chain_cleanup);
 670        unsigned long flags;
 671
 672        /* submit pending list; callback for each desc; free desc */
 673
 674        spin_lock_irqsave(&chan->desc_lock, flags);
 675
 676        /* update the cookie if we have some descriptors to cleanup */
 677        if (!list_empty(&chan->chain_running)) {
 678                dma_cookie_t cookie;
 679
 680                desc = to_mmp_pdma_desc(chan->chain_running.prev);
 681                cookie = desc->async_tx.cookie;
 682                dma_cookie_complete(&desc->async_tx);
 683
 684                dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
 685        }
 686
 687        /*
 688         * move the descriptors to a temporary list so we can drop the lock
 689         * during the entire cleanup operation
 690         */
 691        list_splice_tail_init(&chan->chain_running, &chain_cleanup);
 692
 693        /* the hardware is now idle and ready for more */
 694        chan->idle = true;
 695
 696        /* Start any pending transactions automatically */
 697        start_pending_queue(chan);
 698        spin_unlock_irqrestore(&chan->desc_lock, flags);
 699
 700        /* Run the callback for each descriptor, in order */
 701        list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
 702                struct dma_async_tx_descriptor *txd = &desc->async_tx;
 703
 704                /* Remove from the list of transactions */
 705                list_del(&desc->node);
 706                /* Run the link descriptor callback function */
 707                if (txd->callback)
 708                        txd->callback(txd->callback_param);
 709
 710                dma_pool_free(chan->desc_pool, desc, txd->phys);
 711        }
 712}
 713
 714static int mmp_pdma_remove(struct platform_device *op)
 715{
 716        struct mmp_pdma_device *pdev = platform_get_drvdata(op);
 717
 718        dma_async_device_unregister(&pdev->device);
 719        return 0;
 720}
 721
 722static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 723                                                        int idx, int irq)
 724{
 725        struct mmp_pdma_phy *phy  = &pdev->phy[idx];
 726        struct mmp_pdma_chan *chan;
 727        int ret;
 728
 729        chan = devm_kzalloc(pdev->dev,
 730                        sizeof(struct mmp_pdma_chan), GFP_KERNEL);
 731        if (chan == NULL)
 732                return -ENOMEM;
 733
 734        phy->idx = idx;
 735        phy->base = pdev->base;
 736
 737        if (irq) {
 738                ret = devm_request_irq(pdev->dev, irq,
 739                        mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
 740                if (ret) {
 741                        dev_err(pdev->dev, "channel request irq fail!\n");
 742                        return ret;
 743                }
 744        }
 745
 746        spin_lock_init(&chan->desc_lock);
 747        chan->dev = pdev->dev;
 748        chan->chan.device = &pdev->device;
 749        tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
 750        INIT_LIST_HEAD(&chan->chain_pending);
 751        INIT_LIST_HEAD(&chan->chain_running);
 752
 753        /* register virt channel to dma engine */
 754        list_add_tail(&chan->chan.device_node,
 755                        &pdev->device.channels);
 756
 757        return 0;
 758}
 759
 760static struct of_device_id mmp_pdma_dt_ids[] = {
 761        { .compatible = "marvell,pdma-1.0", },
 762        {}
 763};
 764MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
 765
 766static int mmp_pdma_probe(struct platform_device *op)
 767{
 768        struct mmp_pdma_device *pdev;
 769        const struct of_device_id *of_id;
 770        struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
 771        struct resource *iores;
 772        int i, ret, irq = 0;
 773        int dma_channels = 0, irq_num = 0;
 774
 775        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
 776        if (!pdev)
 777                return -ENOMEM;
 778        pdev->dev = &op->dev;
 779
 780        iores = platform_get_resource(op, IORESOURCE_MEM, 0);
 781        if (!iores)
 782                return -EINVAL;
 783
 784        pdev->base = devm_ioremap_resource(pdev->dev, iores);
 785        if (IS_ERR(pdev->base))
 786                return PTR_ERR(pdev->base);
 787
 788        of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
 789        if (of_id)
 790                of_property_read_u32(pdev->dev->of_node,
 791                                "#dma-channels", &dma_channels);
 792        else if (pdata && pdata->dma_channels)
 793                dma_channels = pdata->dma_channels;
 794        else
 795                dma_channels = 32;      /* default 32 channel */
 796        pdev->dma_channels = dma_channels;
 797
 798        for (i = 0; i < dma_channels; i++) {
 799                if (platform_get_irq(op, i) > 0)
 800                        irq_num++;
 801        }
 802
 803        pdev->phy = devm_kzalloc(pdev->dev,
 804                dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
 805        if (pdev->phy == NULL)
 806                return -ENOMEM;
 807
 808        INIT_LIST_HEAD(&pdev->device.channels);
 809
 810        if (irq_num != dma_channels) {
 811                /* all chan share one irq, demux inside */
 812                irq = platform_get_irq(op, 0);
 813                ret = devm_request_irq(pdev->dev, irq,
 814                        mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
 815                if (ret)
 816                        return ret;
 817        }
 818
 819        for (i = 0; i < dma_channels; i++) {
 820                irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
 821                ret = mmp_pdma_chan_init(pdev, i, irq);
 822                if (ret)
 823                        return ret;
 824        }
 825
 826        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 827        dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
 828        dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
 829        pdev->device.dev = &op->dev;
 830        pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
 831        pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
 832        pdev->device.device_tx_status = mmp_pdma_tx_status;
 833        pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
 834        pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
 835        pdev->device.device_issue_pending = mmp_pdma_issue_pending;
 836        pdev->device.device_control = mmp_pdma_control;
 837        pdev->device.copy_align = PDMA_ALIGNMENT;
 838
 839        if (pdev->dev->coherent_dma_mask)
 840                dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
 841        else
 842                dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
 843
 844        ret = dma_async_device_register(&pdev->device);
 845        if (ret) {
 846                dev_err(pdev->device.dev, "unable to register\n");
 847                return ret;
 848        }
 849
 850        dev_info(pdev->device.dev, "initialized\n");
 851        return 0;
 852}
 853
 854static const struct platform_device_id mmp_pdma_id_table[] = {
 855        { "mmp-pdma", },
 856        { },
 857};
 858
 859static struct platform_driver mmp_pdma_driver = {
 860        .driver         = {
 861                .name   = "mmp-pdma",
 862                .owner  = THIS_MODULE,
 863                .of_match_table = mmp_pdma_dt_ids,
 864        },
 865        .id_table       = mmp_pdma_id_table,
 866        .probe          = mmp_pdma_probe,
 867        .remove         = mmp_pdma_remove,
 868};
 869
 870module_platform_driver(mmp_pdma_driver);
 871
 872MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
 873MODULE_AUTHOR("Marvell International Ltd.");
 874MODULE_LICENSE("GPL v2");
 875
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.