linux/drivers/dma/dw_dmac.c
<<
>>
Prefs
   1/*
   2 * Core driver for the Synopsys DesignWare DMA Controller
   3 *
   4 * Copyright (C) 2007-2008 Atmel Corporation
   5 * Copyright (C) 2010-2011 ST Microelectronics
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/bitops.h>
  13#include <linux/clk.h>
  14#include <linux/delay.h>
  15#include <linux/dmaengine.h>
  16#include <linux/dma-mapping.h>
  17#include <linux/dmapool.h>
  18#include <linux/err.h>
  19#include <linux/init.h>
  20#include <linux/interrupt.h>
  21#include <linux/io.h>
  22#include <linux/of.h>
  23#include <linux/of_dma.h>
  24#include <linux/mm.h>
  25#include <linux/module.h>
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
  29#include "dw_dmac_regs.h"
  30#include "dmaengine.h"
  31
  32/*
  33 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
  34 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
  35 * of which use ARM any more).  See the "Databook" from Synopsys for
  36 * information beyond what licensees probably provide.
  37 *
  38 * The driver has currently been tested only with the Atmel AT32AP7000,
  39 * which does not support descriptor writeback.
  40 */
  41
  42static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave)
  43{
  44        return slave ? slave->dst_master : 0;
  45}
  46
  47static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave)
  48{
  49        return slave ? slave->src_master : 1;
  50}
  51
  52#define SRC_MASTER      0
  53#define DST_MASTER      1
  54
  55static inline unsigned int dwc_get_master(struct dma_chan *chan, int master)
  56{
  57        struct dw_dma *dw = to_dw_dma(chan->device);
  58        struct dw_dma_slave *dws = chan->private;
  59        unsigned int m;
  60
  61        if (master == SRC_MASTER)
  62                m = dwc_get_sms(dws);
  63        else
  64                m = dwc_get_dms(dws);
  65
  66        return min_t(unsigned int, dw->nr_masters - 1, m);
  67}
  68
  69#define DWC_DEFAULT_CTLLO(_chan) ({                             \
  70                struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
  71                struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
  72                bool _is_slave = is_slave_direction(_dwc->direction);   \
  73                int _dms = dwc_get_master(_chan, DST_MASTER);           \
  74                int _sms = dwc_get_master(_chan, SRC_MASTER);           \
  75                u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
  76                        DW_DMA_MSIZE_16;                        \
  77                u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
  78                        DW_DMA_MSIZE_16;                        \
  79                                                                \
  80                (DWC_CTLL_DST_MSIZE(_dmsize)                    \
  81                 | DWC_CTLL_SRC_MSIZE(_smsize)                  \
  82                 | DWC_CTLL_LLP_D_EN                            \
  83                 | DWC_CTLL_LLP_S_EN                            \
  84                 | DWC_CTLL_DMS(_dms)                           \
  85                 | DWC_CTLL_SMS(_sms));                         \
  86        })
  87
  88/*
  89 * Number of descriptors to allocate for each channel. This should be
  90 * made configurable somehow; preferably, the clients (at least the
  91 * ones using slave transfers) should be able to give us a hint.
  92 */
  93#define NR_DESCS_PER_CHANNEL    64
  94
  95static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master)
  96{
  97        struct dw_dma *dw = to_dw_dma(chan->device);
  98
  99        return dw->data_width[dwc_get_master(chan, master)];
 100}
 101
 102/*----------------------------------------------------------------------*/
 103
 104static struct device *chan2dev(struct dma_chan *chan)
 105{
 106        return &chan->dev->device;
 107}
 108static struct device *chan2parent(struct dma_chan *chan)
 109{
 110        return chan->dev->device.parent;
 111}
 112
 113static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
 114{
 115        return to_dw_desc(dwc->active_list.next);
 116}
 117
 118static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
 119{
 120        struct dw_desc *desc, *_desc;
 121        struct dw_desc *ret = NULL;
 122        unsigned int i = 0;
 123        unsigned long flags;
 124
 125        spin_lock_irqsave(&dwc->lock, flags);
 126        list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
 127                i++;
 128                if (async_tx_test_ack(&desc->txd)) {
 129                        list_del(&desc->desc_node);
 130                        ret = desc;
 131                        break;
 132                }
 133                dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
 134        }
 135        spin_unlock_irqrestore(&dwc->lock, flags);
 136
 137        dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
 138
 139        return ret;
 140}
 141
 142/*
 143 * Move a descriptor, including any children, to the free list.
 144 * `desc' must not be on any lists.
 145 */
 146static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
 147{
 148        unsigned long flags;
 149
 150        if (desc) {
 151                struct dw_desc *child;
 152
 153                spin_lock_irqsave(&dwc->lock, flags);
 154                list_for_each_entry(child, &desc->tx_list, desc_node)
 155                        dev_vdbg(chan2dev(&dwc->chan),
 156                                        "moving child desc %p to freelist\n",
 157                                        child);
 158                list_splice_init(&desc->tx_list, &dwc->free_list);
 159                dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
 160                list_add(&desc->desc_node, &dwc->free_list);
 161                spin_unlock_irqrestore(&dwc->lock, flags);
 162        }
 163}
 164
 165static void dwc_initialize(struct dw_dma_chan *dwc)
 166{
 167        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
 168        struct dw_dma_slave *dws = dwc->chan.private;
 169        u32 cfghi = DWC_CFGH_FIFO_MODE;
 170        u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
 171
 172        if (dwc->initialized == true)
 173                return;
 174
 175        if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
 176                /* autoconfigure based on request line from DT */
 177                if (dwc->direction == DMA_MEM_TO_DEV)
 178                        cfghi = DWC_CFGH_DST_PER(dwc->request_line);
 179                else if (dwc->direction == DMA_DEV_TO_MEM)
 180                        cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
 181        } else if (dws) {
 182                /*
 183                 * We need controller-specific data to set up slave
 184                 * transfers.
 185                 */
 186                BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
 187
 188                cfghi = dws->cfg_hi;
 189                cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
 190        } else {
 191                if (dwc->direction == DMA_MEM_TO_DEV)
 192                        cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id);
 193                else if (dwc->direction == DMA_DEV_TO_MEM)
 194                        cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id);
 195        }
 196
 197        channel_writel(dwc, CFG_LO, cfglo);
 198        channel_writel(dwc, CFG_HI, cfghi);
 199
 200        /* Enable interrupts */
 201        channel_set_bit(dw, MASK.XFER, dwc->mask);
 202        channel_set_bit(dw, MASK.ERROR, dwc->mask);
 203
 204        dwc->initialized = true;
 205}
 206
 207/*----------------------------------------------------------------------*/
 208
 209static inline unsigned int dwc_fast_fls(unsigned long long v)
 210{
 211        /*
 212         * We can be a lot more clever here, but this should take care
 213         * of the most common optimization.
 214         */
 215        if (!(v & 7))
 216                return 3;
 217        else if (!(v & 3))
 218                return 2;
 219        else if (!(v & 1))
 220                return 1;
 221        return 0;
 222}
 223
 224static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
 225{
 226        dev_err(chan2dev(&dwc->chan),
 227                "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
 228                channel_readl(dwc, SAR),
 229                channel_readl(dwc, DAR),
 230                channel_readl(dwc, LLP),
 231                channel_readl(dwc, CTL_HI),
 232                channel_readl(dwc, CTL_LO));
 233}
 234
 235static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
 236{
 237        channel_clear_bit(dw, CH_EN, dwc->mask);
 238        while (dma_readl(dw, CH_EN) & dwc->mask)
 239                cpu_relax();
 240}
 241
 242/*----------------------------------------------------------------------*/
 243
 244/* Perform single block transfer */
 245static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
 246                                       struct dw_desc *desc)
 247{
 248        struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
 249        u32             ctllo;
 250
 251        /* Software emulation of LLP mode relies on interrupts to continue
 252         * multi block transfer. */
 253        ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
 254
 255        channel_writel(dwc, SAR, desc->lli.sar);
 256        channel_writel(dwc, DAR, desc->lli.dar);
 257        channel_writel(dwc, CTL_LO, ctllo);
 258        channel_writel(dwc, CTL_HI, desc->lli.ctlhi);
 259        channel_set_bit(dw, CH_EN, dwc->mask);
 260
 261        /* Move pointer to next descriptor */
 262        dwc->tx_node_active = dwc->tx_node_active->next;
 263}
 264
 265/* Called with dwc->lock held and bh disabled */
 266static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
 267{
 268        struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
 269        unsigned long   was_soft_llp;
 270
 271        /* ASSERT:  channel is idle */
 272        if (dma_readl(dw, CH_EN) & dwc->mask) {
 273                dev_err(chan2dev(&dwc->chan),
 274                        "BUG: Attempted to start non-idle channel\n");
 275                dwc_dump_chan_regs(dwc);
 276
 277                /* The tasklet will hopefully advance the queue... */
 278                return;
 279        }
 280
 281        if (dwc->nollp) {
 282                was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
 283                                                &dwc->flags);
 284                if (was_soft_llp) {
 285                        dev_err(chan2dev(&dwc->chan),
 286                                "BUG: Attempted to start new LLP transfer "
 287                                "inside ongoing one\n");
 288                        return;
 289                }
 290
 291                dwc_initialize(dwc);
 292
 293                dwc->residue = first->total_len;
 294                dwc->tx_node_active = &first->tx_list;
 295
 296                /* Submit first block */
 297                dwc_do_single_block(dwc, first);
 298
 299                return;
 300        }
 301
 302        dwc_initialize(dwc);
 303
 304        channel_writel(dwc, LLP, first->txd.phys);
 305        channel_writel(dwc, CTL_LO,
 306                        DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
 307        channel_writel(dwc, CTL_HI, 0);
 308        channel_set_bit(dw, CH_EN, dwc->mask);
 309}
 310
 311/*----------------------------------------------------------------------*/
 312
 313static void
 314dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
 315                bool callback_required)
 316{
 317        dma_async_tx_callback           callback = NULL;
 318        void                            *param = NULL;
 319        struct dma_async_tx_descriptor  *txd = &desc->txd;
 320        struct dw_desc                  *child;
 321        unsigned long                   flags;
 322
 323        dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
 324
 325        spin_lock_irqsave(&dwc->lock, flags);
 326        dma_cookie_complete(txd);
 327        if (callback_required) {
 328                callback = txd->callback;
 329                param = txd->callback_param;
 330        }
 331
 332        /* async_tx_ack */
 333        list_for_each_entry(child, &desc->tx_list, desc_node)
 334                async_tx_ack(&child->txd);
 335        async_tx_ack(&desc->txd);
 336
 337        list_splice_init(&desc->tx_list, &dwc->free_list);
 338        list_move(&desc->desc_node, &dwc->free_list);
 339
 340        if (!is_slave_direction(dwc->direction)) {
 341                struct device *parent = chan2parent(&dwc->chan);
 342                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
 343                        if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
 344                                dma_unmap_single(parent, desc->lli.dar,
 345                                        desc->total_len, DMA_FROM_DEVICE);
 346                        else
 347                                dma_unmap_page(parent, desc->lli.dar,
 348                                        desc->total_len, DMA_FROM_DEVICE);
 349                }
 350                if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
 351                        if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
 352                                dma_unmap_single(parent, desc->lli.sar,
 353                                        desc->total_len, DMA_TO_DEVICE);
 354                        else
 355                                dma_unmap_page(parent, desc->lli.sar,
 356                                        desc->total_len, DMA_TO_DEVICE);
 357                }
 358        }
 359
 360        spin_unlock_irqrestore(&dwc->lock, flags);
 361
 362        if (callback)
 363                callback(param);
 364}
 365
 366static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
 367{
 368        struct dw_desc *desc, *_desc;
 369        LIST_HEAD(list);
 370        unsigned long flags;
 371
 372        spin_lock_irqsave(&dwc->lock, flags);
 373        if (dma_readl(dw, CH_EN) & dwc->mask) {
 374                dev_err(chan2dev(&dwc->chan),
 375                        "BUG: XFER bit set, but channel not idle!\n");
 376
 377                /* Try to continue after resetting the channel... */
 378                dwc_chan_disable(dw, dwc);
 379        }
 380
 381        /*
 382         * Submit queued descriptors ASAP, i.e. before we go through
 383         * the completed ones.
 384         */
 385        list_splice_init(&dwc->active_list, &list);
 386        if (!list_empty(&dwc->queue)) {
 387                list_move(dwc->queue.next, &dwc->active_list);
 388                dwc_dostart(dwc, dwc_first_active(dwc));
 389        }
 390
 391        spin_unlock_irqrestore(&dwc->lock, flags);
 392
 393        list_for_each_entry_safe(desc, _desc, &list, desc_node)
 394                dwc_descriptor_complete(dwc, desc, true);
 395}
 396
 397/* Returns how many bytes were already received from source */
 398static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
 399{
 400        u32 ctlhi = channel_readl(dwc, CTL_HI);
 401        u32 ctllo = channel_readl(dwc, CTL_LO);
 402
 403        return (ctlhi & DWC_CTLH_BLOCK_TS_MASK) * (1 << (ctllo >> 4 & 7));
 404}
 405
 406static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
 407{
 408        dma_addr_t llp;
 409        struct dw_desc *desc, *_desc;
 410        struct dw_desc *child;
 411        u32 status_xfer;
 412        unsigned long flags;
 413
 414        spin_lock_irqsave(&dwc->lock, flags);
 415        llp = channel_readl(dwc, LLP);
 416        status_xfer = dma_readl(dw, RAW.XFER);
 417
 418        if (status_xfer & dwc->mask) {
 419                /* Everything we've submitted is done */
 420                dma_writel(dw, CLEAR.XFER, dwc->mask);
 421
 422                if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
 423                        struct list_head *head, *active = dwc->tx_node_active;
 424
 425                        /*
 426                         * We are inside first active descriptor.
 427                         * Otherwise something is really wrong.
 428                         */
 429                        desc = dwc_first_active(dwc);
 430
 431                        head = &desc->tx_list;
 432                        if (active != head) {
 433                                /* Update desc to reflect last sent one */
 434                                if (active != head->next)
 435                                        desc = to_dw_desc(active->prev);
 436
 437                                dwc->residue -= desc->len;
 438
 439                                child = to_dw_desc(active);
 440
 441                                /* Submit next block */
 442                                dwc_do_single_block(dwc, child);
 443
 444                                spin_unlock_irqrestore(&dwc->lock, flags);
 445                                return;
 446                        }
 447
 448                        /* We are done here */
 449                        clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
 450                }
 451
 452                dwc->residue = 0;
 453
 454                spin_unlock_irqrestore(&dwc->lock, flags);
 455
 456                dwc_complete_all(dw, dwc);
 457                return;
 458        }
 459
 460        if (list_empty(&dwc->active_list)) {
 461                dwc->residue = 0;
 462                spin_unlock_irqrestore(&dwc->lock, flags);
 463                return;
 464        }
 465
 466        if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
 467                dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
 468                spin_unlock_irqrestore(&dwc->lock, flags);
 469                return;
 470        }
 471
 472        dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
 473                        (unsigned long long)llp);
 474
 475        list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
 476                /* initial residue value */
 477                dwc->residue = desc->total_len;
 478
 479                /* check first descriptors addr */
 480                if (desc->txd.phys == llp) {
 481                        spin_unlock_irqrestore(&dwc->lock, flags);
 482                        return;
 483                }
 484
 485                /* check first descriptors llp */
 486                if (desc->lli.llp == llp) {
 487                        /* This one is currently in progress */
 488                        dwc->residue -= dwc_get_sent(dwc);
 489                        spin_unlock_irqrestore(&dwc->lock, flags);
 490                        return;
 491                }
 492
 493                dwc->residue -= desc->len;
 494                list_for_each_entry(child, &desc->tx_list, desc_node) {
 495                        if (child->lli.llp == llp) {
 496                                /* Currently in progress */
 497                                dwc->residue -= dwc_get_sent(dwc);
 498                                spin_unlock_irqrestore(&dwc->lock, flags);
 499                                return;
 500                        }
 501                        dwc->residue -= child->len;
 502                }
 503
 504                /*
 505                 * No descriptors so far seem to be in progress, i.e.
 506                 * this one must be done.
 507                 */
 508                spin_unlock_irqrestore(&dwc->lock, flags);
 509                dwc_descriptor_complete(dwc, desc, true);
 510                spin_lock_irqsave(&dwc->lock, flags);
 511        }
 512
 513        dev_err(chan2dev(&dwc->chan),
 514                "BUG: All descriptors done, but channel not idle!\n");
 515
 516        /* Try to continue after resetting the channel... */
 517        dwc_chan_disable(dw, dwc);
 518
 519        if (!list_empty(&dwc->queue)) {
 520                list_move(dwc->queue.next, &dwc->active_list);
 521                dwc_dostart(dwc, dwc_first_active(dwc));
 522        }
 523        spin_unlock_irqrestore(&dwc->lock, flags);
 524}
 525
 526static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
 527{
 528        dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
 529                 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
 530}
 531
 532static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
 533{
 534        struct dw_desc *bad_desc;
 535        struct dw_desc *child;
 536        unsigned long flags;
 537
 538        dwc_scan_descriptors(dw, dwc);
 539
 540        spin_lock_irqsave(&dwc->lock, flags);
 541
 542        /*
 543         * The descriptor currently at the head of the active list is
 544         * borked. Since we don't have any way to report errors, we'll
 545         * just have to scream loudly and try to carry on.
 546         */
 547        bad_desc = dwc_first_active(dwc);
 548        list_del_init(&bad_desc->desc_node);
 549        list_move(dwc->queue.next, dwc->active_list.prev);
 550
 551        /* Clear the error flag and try to restart the controller */
 552        dma_writel(dw, CLEAR.ERROR, dwc->mask);
 553        if (!list_empty(&dwc->active_list))
 554                dwc_dostart(dwc, dwc_first_active(dwc));
 555
 556        /*
 557         * WARN may seem harsh, but since this only happens
 558         * when someone submits a bad physical address in a
 559         * descriptor, we should consider ourselves lucky that the
 560         * controller flagged an error instead of scribbling over
 561         * random memory locations.
 562         */
 563        dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
 564                                       "  cookie: %d\n", bad_desc->txd.cookie);
 565        dwc_dump_lli(dwc, &bad_desc->lli);
 566        list_for_each_entry(child, &bad_desc->tx_list, desc_node)
 567                dwc_dump_lli(dwc, &child->lli);
 568
 569        spin_unlock_irqrestore(&dwc->lock, flags);
 570
 571        /* Pretend the descriptor completed successfully */
 572        dwc_descriptor_complete(dwc, bad_desc, true);
 573}
 574
 575/* --------------------- Cyclic DMA API extensions -------------------- */
 576
 577inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
 578{
 579        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 580        return channel_readl(dwc, SAR);
 581}
 582EXPORT_SYMBOL(dw_dma_get_src_addr);
 583
 584inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
 585{
 586        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
 587        return channel_readl(dwc, DAR);
 588}
 589EXPORT_SYMBOL(dw_dma_get_dst_addr);
 590
 591/* called with dwc->lock held and all DMAC interrupts disabled */
 592static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
 593                u32 status_err, u32 status_xfer)
 594{
 595        unsigned long flags;
 596
 597        if (dwc->mask) {
 598                void (*callback)(void *param);
 599                void *callback_param;
 600
 601                dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
 602                                channel_readl(dwc, LLP));
 603
 604                callback = dwc->cdesc->period_callback;
 605                callback_param = dwc->cdesc->period_callback_param;
 606
 607                if (callback)
 608                        callback(callback_param);
 609        }
 610
 611        /*
 612         * Error and transfer complete are highly unlikely, and will most
 613         * likely be due to a configuration error by the user.
 614         */
 615        if (unlikely(status_err & dwc->mask) ||
 616                        unlikely(status_xfer & dwc->mask)) {
 617                int i;
 618
 619                dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
 620                                "interrupt, stopping DMA transfer\n",
 621                                status_xfer ? "xfer" : "error");
 622
 623                spin_lock_irqsave(&dwc->lock, flags);
 624
 625                dwc_dump_chan_regs(dwc);
 626
 627                dwc_chan_disable(dw, dwc);
 628
 629                /* make sure DMA does not restart by loading a new list */
 630                channel_writel(dwc, LLP, 0);
 631                channel_writel(dwc, CTL_LO, 0);
 632                channel_writel(dwc, CTL_HI, 0);
 633
 634                dma_writel(dw, CLEAR.ERROR, dwc->mask);
 635                dma_writel(dw, CLEAR.XFER, dwc->mask);
 636
 637                for (i = 0; i < dwc->cdesc->periods; i++)
 638                        dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
 639
 640                spin_unlock_irqrestore(&dwc->lock, flags);
 641        }
 642}
 643
 644/* ------------------------------------------------------------------------- */
 645
 646static void dw_dma_tasklet(unsigned long data)
 647{
 648        struct dw_dma *dw = (struct dw_dma *)data;
 649        struct dw_dma_chan *dwc;
 650        u32 status_xfer;
 651        u32 status_err;
 652        int i;
 653
 654        status_xfer = dma_readl(dw, RAW.XFER);
 655        status_err = dma_readl(dw, RAW.ERROR);
 656
 657        dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
 658
 659        for (i = 0; i < dw->dma.chancnt; i++) {
 660                dwc = &dw->chan[i];
 661                if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
 662                        dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
 663                else if (status_err & (1 << i))
 664                        dwc_handle_error(dw, dwc);
 665                else if (status_xfer & (1 << i))
 666                        dwc_scan_descriptors(dw, dwc);
 667        }
 668
 669        /*
 670         * Re-enable interrupts.
 671         */
 672        channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
 673        channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
 674}
 675
 676static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
 677{
 678        struct dw_dma *dw = dev_id;
 679        u32 status;
 680
 681        dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
 682                        dma_readl(dw, STATUS_INT));
 683
 684        /*
 685         * Just disable the interrupts. We'll turn them back on in the
 686         * softirq handler.
 687         */
 688        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
 689        channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
 690
 691        status = dma_readl(dw, STATUS_INT);
 692        if (status) {
 693                dev_err(dw->dma.dev,
 694                        "BUG: Unexpected interrupts pending: 0x%x\n",
 695                        status);
 696
 697                /* Try to recover */
 698                channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
 699                channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
 700                channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
 701                channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
 702        }
 703
 704        tasklet_schedule(&dw->tasklet);
 705
 706        return IRQ_HANDLED;
 707}
 708
 709/*----------------------------------------------------------------------*/
 710
 711static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
 712{
 713        struct dw_desc          *desc = txd_to_dw_desc(tx);
 714        struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
 715        dma_cookie_t            cookie;
 716        unsigned long           flags;
 717
 718        spin_lock_irqsave(&dwc->lock, flags);
 719        cookie = dma_cookie_assign(tx);
 720
 721        /*
 722         * REVISIT: We should attempt to chain as many descriptors as
 723         * possible, perhaps even appending to those already submitted
 724         * for DMA. But this is hard to do in a race-free manner.
 725         */
 726        if (list_empty(&dwc->active_list)) {
 727                dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
 728                                desc->txd.cookie);
 729                list_add_tail(&desc->desc_node, &dwc->active_list);
 730                dwc_dostart(dwc, dwc_first_active(dwc));
 731        } else {
 732                dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
 733                                desc->txd.cookie);
 734
 735                list_add_tail(&desc->desc_node, &dwc->queue);
 736        }
 737
 738        spin_unlock_irqrestore(&dwc->lock, flags);
 739
 740        return cookie;
 741}
 742
 743static struct dma_async_tx_descriptor *
 744dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 745                size_t len, unsigned long flags)
 746{
 747        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
 748        struct dw_desc          *desc;
 749        struct dw_desc          *first;
 750        struct dw_desc          *prev;
 751        size_t                  xfer_count;
 752        size_t                  offset;
 753        unsigned int            src_width;
 754        unsigned int            dst_width;
 755        unsigned int            data_width;
 756        u32                     ctllo;
 757
 758        dev_vdbg(chan2dev(chan),
 759                        "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
 760                        (unsigned long long)dest, (unsigned long long)src,
 761                        len, flags);
 762
 763        if (unlikely(!len)) {
 764                dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
 765                return NULL;
 766        }
 767
 768        dwc->direction = DMA_MEM_TO_MEM;
 769
 770        data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER),
 771                           dwc_get_data_width(chan, DST_MASTER));
 772
 773        src_width = dst_width = min_t(unsigned int, data_width,
 774                                      dwc_fast_fls(src | dest | len));
 775
 776        ctllo = DWC_DEFAULT_CTLLO(chan)
 777                        | DWC_CTLL_DST_WIDTH(dst_width)
 778                        | DWC_CTLL_SRC_WIDTH(src_width)
 779                        | DWC_CTLL_DST_INC
 780                        | DWC_CTLL_SRC_INC
 781                        | DWC_CTLL_FC_M2M;
 782        prev = first = NULL;
 783
 784        for (offset = 0; offset < len; offset += xfer_count << src_width) {
 785                xfer_count = min_t(size_t, (len - offset) >> src_width,
 786                                           dwc->block_size);
 787
 788                desc = dwc_desc_get(dwc);
 789                if (!desc)
 790                        goto err_desc_get;
 791
 792                desc->lli.sar = src + offset;
 793                desc->lli.dar = dest + offset;
 794                desc->lli.ctllo = ctllo;
 795                desc->lli.ctlhi = xfer_count;
 796                desc->len = xfer_count << src_width;
 797
 798                if (!first) {
 799                        first = desc;
 800                } else {
 801                        prev->lli.llp = desc->txd.phys;
 802                        list_add_tail(&desc->desc_node,
 803                                        &first->tx_list);
 804                }
 805                prev = desc;
 806        }
 807
 808        if (flags & DMA_PREP_INTERRUPT)
 809                /* Trigger interrupt after last block */
 810                prev->lli.ctllo |= DWC_CTLL_INT_EN;
 811
 812        prev->lli.llp = 0;
 813        first->txd.flags = flags;
 814        first->total_len = len;
 815
 816        return &first->txd;
 817
 818err_desc_get:
 819        dwc_desc_put(dwc, first);
 820        return NULL;
 821}
 822
 823static struct dma_async_tx_descriptor *
 824dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 825                unsigned int sg_len, enum dma_transfer_direction direction,
 826                unsigned long flags, void *context)
 827{
 828        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
 829        struct dma_slave_config *sconfig = &dwc->dma_sconfig;
 830        struct dw_desc          *prev;
 831        struct dw_desc          *first;
 832        u32                     ctllo;
 833        dma_addr_t              reg;
 834        unsigned int            reg_width;
 835        unsigned int            mem_width;
 836        unsigned int            data_width;
 837        unsigned int            i;
 838        struct scatterlist      *sg;
 839        size_t                  total_len = 0;
 840
 841        dev_vdbg(chan2dev(chan), "%s\n", __func__);
 842
 843        if (unlikely(!is_slave_direction(direction) || !sg_len))
 844                return NULL;
 845
 846        dwc->direction = direction;
 847
 848        prev = first = NULL;
 849
 850        switch (direction) {
 851        case DMA_MEM_TO_DEV:
 852                reg_width = __fls(sconfig->dst_addr_width);
 853                reg = sconfig->dst_addr;
 854                ctllo = (DWC_DEFAULT_CTLLO(chan)
 855                                | DWC_CTLL_DST_WIDTH(reg_width)
 856                                | DWC_CTLL_DST_FIX
 857                                | DWC_CTLL_SRC_INC);
 858
 859                ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
 860                        DWC_CTLL_FC(DW_DMA_FC_D_M2P);
 861
 862                data_width = dwc_get_data_width(chan, SRC_MASTER);
 863
 864                for_each_sg(sgl, sg, sg_len, i) {
 865                        struct dw_desc  *desc;
 866                        u32             len, dlen, mem;
 867
 868                        mem = sg_dma_address(sg);
 869                        len = sg_dma_len(sg);
 870
 871                        mem_width = min_t(unsigned int,
 872                                          data_width, dwc_fast_fls(mem | len));
 873
 874slave_sg_todev_fill_desc:
 875                        desc = dwc_desc_get(dwc);
 876                        if (!desc) {
 877                                dev_err(chan2dev(chan),
 878                                        "not enough descriptors available\n");
 879                                goto err_desc_get;
 880                        }
 881
 882                        desc->lli.sar = mem;
 883                        desc->lli.dar = reg;
 884                        desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
 885                        if ((len >> mem_width) > dwc->block_size) {
 886                                dlen = dwc->block_size << mem_width;
 887                                mem += dlen;
 888                                len -= dlen;
 889                        } else {
 890                                dlen = len;
 891                                len = 0;
 892                        }
 893
 894                        desc->lli.ctlhi = dlen >> mem_width;
 895                        desc->len = dlen;
 896
 897                        if (!first) {
 898                                first = desc;
 899                        } else {
 900                                prev->lli.llp = desc->txd.phys;
 901                                list_add_tail(&desc->desc_node,
 902                                                &first->tx_list);
 903                        }
 904                        prev = desc;
 905                        total_len += dlen;
 906
 907                        if (len)
 908                                goto slave_sg_todev_fill_desc;
 909                }
 910                break;
 911        case DMA_DEV_TO_MEM:
 912                reg_width = __fls(sconfig->src_addr_width);
 913                reg = sconfig->src_addr;
 914                ctllo = (DWC_DEFAULT_CTLLO(chan)
 915                                | DWC_CTLL_SRC_WIDTH(reg_width)
 916                                | DWC_CTLL_DST_INC
 917                                | DWC_CTLL_SRC_FIX);
 918
 919                ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
 920                        DWC_CTLL_FC(DW_DMA_FC_D_P2M);
 921
 922                data_width = dwc_get_data_width(chan, DST_MASTER);
 923
 924                for_each_sg(sgl, sg, sg_len, i) {
 925                        struct dw_desc  *desc;
 926                        u32             len, dlen, mem;
 927
 928                        mem = sg_dma_address(sg);
 929                        len = sg_dma_len(sg);
 930
 931                        mem_width = min_t(unsigned int,
 932                                          data_width, dwc_fast_fls(mem | len));
 933
 934slave_sg_fromdev_fill_desc:
 935                        desc = dwc_desc_get(dwc);
 936                        if (!desc) {
 937                                dev_err(chan2dev(chan),
 938                                                "not enough descriptors available\n");
 939                                goto err_desc_get;
 940                        }
 941
 942                        desc->lli.sar = reg;
 943                        desc->lli.dar = mem;
 944                        desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
 945                        if ((len >> reg_width) > dwc->block_size) {
 946                                dlen = dwc->block_size << reg_width;
 947                                mem += dlen;
 948                                len -= dlen;
 949                        } else {
 950                                dlen = len;
 951                                len = 0;
 952                        }
 953                        desc->lli.ctlhi = dlen >> reg_width;
 954                        desc->len = dlen;
 955
 956                        if (!first) {
 957                                first = desc;
 958                        } else {
 959                                prev->lli.llp = desc->txd.phys;
 960                                list_add_tail(&desc->desc_node,
 961                                                &first->tx_list);
 962                        }
 963                        prev = desc;
 964                        total_len += dlen;
 965
 966                        if (len)
 967                                goto slave_sg_fromdev_fill_desc;
 968                }
 969                break;
 970        default:
 971                return NULL;
 972        }
 973
 974        if (flags & DMA_PREP_INTERRUPT)
 975                /* Trigger interrupt after last block */
 976                prev->lli.ctllo |= DWC_CTLL_INT_EN;
 977
 978        prev->lli.llp = 0;
 979        first->total_len = total_len;
 980
 981        return &first->txd;
 982
 983err_desc_get:
 984        dwc_desc_put(dwc, first);
 985        return NULL;
 986}
 987
 988/*
 989 * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
 990 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
 991 *
 992 * NOTE: burst size 2 is not supported by controller.
 993 *
 994 * This can be done by finding least significant bit set: n & (n - 1)
 995 */
 996static inline void convert_burst(u32 *maxburst)
 997{
 998        if (*maxburst > 1)
 999                *maxburst = fls(*maxburst) - 2;
1000        else
1001                *maxburst = 0;
1002}
1003
1004static inline void convert_slave_id(struct dw_dma_chan *dwc)
1005{
1006        struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1007
1008        dwc->dma_sconfig.slave_id -= dw->request_line_base;
1009}
1010
1011static int
1012set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
1013{
1014        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1015
1016        /* Check if chan will be configured for slave transfers */
1017        if (!is_slave_direction(sconfig->direction))
1018                return -EINVAL;
1019
1020        memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
1021        dwc->direction = sconfig->direction;
1022
1023        convert_burst(&dwc->dma_sconfig.src_maxburst);
1024        convert_burst(&dwc->dma_sconfig.dst_maxburst);
1025        convert_slave_id(dwc);
1026
1027        return 0;
1028}
1029
1030static inline void dwc_chan_pause(struct dw_dma_chan *dwc)
1031{
1032        u32 cfglo = channel_readl(dwc, CFG_LO);
1033
1034        channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
1035        while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
1036                cpu_relax();
1037
1038        dwc->paused = true;
1039}
1040
1041static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
1042{
1043        u32 cfglo = channel_readl(dwc, CFG_LO);
1044
1045        channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
1046
1047        dwc->paused = false;
1048}
1049
1050static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1051                       unsigned long arg)
1052{
1053        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1054        struct dw_dma           *dw = to_dw_dma(chan->device);
1055        struct dw_desc          *desc, *_desc;
1056        unsigned long           flags;
1057        LIST_HEAD(list);
1058
1059        if (cmd == DMA_PAUSE) {
1060                spin_lock_irqsave(&dwc->lock, flags);
1061
1062                dwc_chan_pause(dwc);
1063
1064                spin_unlock_irqrestore(&dwc->lock, flags);
1065        } else if (cmd == DMA_RESUME) {
1066                if (!dwc->paused)
1067                        return 0;
1068
1069                spin_lock_irqsave(&dwc->lock, flags);
1070
1071                dwc_chan_resume(dwc);
1072
1073                spin_unlock_irqrestore(&dwc->lock, flags);
1074        } else if (cmd == DMA_TERMINATE_ALL) {
1075                spin_lock_irqsave(&dwc->lock, flags);
1076
1077                clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
1078
1079                dwc_chan_disable(dw, dwc);
1080
1081                dwc_chan_resume(dwc);
1082
1083                /* active_list entries will end up before queued entries */
1084                list_splice_init(&dwc->queue, &list);
1085                list_splice_init(&dwc->active_list, &list);
1086
1087                spin_unlock_irqrestore(&dwc->lock, flags);
1088
1089                /* Flush all pending and queued descriptors */
1090                list_for_each_entry_safe(desc, _desc, &list, desc_node)
1091                        dwc_descriptor_complete(dwc, desc, false);
1092        } else if (cmd == DMA_SLAVE_CONFIG) {
1093                return set_runtime_config(chan, (struct dma_slave_config *)arg);
1094        } else {
1095                return -ENXIO;
1096        }
1097
1098        return 0;
1099}
1100
1101static inline u32 dwc_get_residue(struct dw_dma_chan *dwc)
1102{
1103        unsigned long flags;
1104        u32 residue;
1105
1106        spin_lock_irqsave(&dwc->lock, flags);
1107
1108        residue = dwc->residue;
1109        if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1110                residue -= dwc_get_sent(dwc);
1111
1112        spin_unlock_irqrestore(&dwc->lock, flags);
1113        return residue;
1114}
1115
1116static enum dma_status
1117dwc_tx_status(struct dma_chan *chan,
1118              dma_cookie_t cookie,
1119              struct dma_tx_state *txstate)
1120{
1121        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1122        enum dma_status         ret;
1123
1124        ret = dma_cookie_status(chan, cookie, txstate);
1125        if (ret != DMA_SUCCESS) {
1126                dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1127
1128                ret = dma_cookie_status(chan, cookie, txstate);
1129        }
1130
1131        if (ret != DMA_SUCCESS)
1132                dma_set_residue(txstate, dwc_get_residue(dwc));
1133
1134        if (dwc->paused)
1135                return DMA_PAUSED;
1136
1137        return ret;
1138}
1139
1140static void dwc_issue_pending(struct dma_chan *chan)
1141{
1142        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1143
1144        if (!list_empty(&dwc->queue))
1145                dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1146}
1147
1148static int dwc_alloc_chan_resources(struct dma_chan *chan)
1149{
1150        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1151        struct dw_dma           *dw = to_dw_dma(chan->device);
1152        struct dw_desc          *desc;
1153        int                     i;
1154        unsigned long           flags;
1155
1156        dev_vdbg(chan2dev(chan), "%s\n", __func__);
1157
1158        /* ASSERT:  channel is idle */
1159        if (dma_readl(dw, CH_EN) & dwc->mask) {
1160                dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1161                return -EIO;
1162        }
1163
1164        dma_cookie_init(chan);
1165
1166        /*
1167         * NOTE: some controllers may have additional features that we
1168         * need to initialize here, like "scatter-gather" (which
1169         * doesn't mean what you think it means), and status writeback.
1170         */
1171
1172        spin_lock_irqsave(&dwc->lock, flags);
1173        i = dwc->descs_allocated;
1174        while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1175                dma_addr_t phys;
1176
1177                spin_unlock_irqrestore(&dwc->lock, flags);
1178
1179                desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys);
1180                if (!desc)
1181                        goto err_desc_alloc;
1182
1183                memset(desc, 0, sizeof(struct dw_desc));
1184
1185                INIT_LIST_HEAD(&desc->tx_list);
1186                dma_async_tx_descriptor_init(&desc->txd, chan);
1187                desc->txd.tx_submit = dwc_tx_submit;
1188                desc->txd.flags = DMA_CTRL_ACK;
1189                desc->txd.phys = phys;
1190
1191                dwc_desc_put(dwc, desc);
1192
1193                spin_lock_irqsave(&dwc->lock, flags);
1194                i = ++dwc->descs_allocated;
1195        }
1196
1197        spin_unlock_irqrestore(&dwc->lock, flags);
1198
1199        dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1200
1201        return i;
1202
1203err_desc_alloc:
1204        dev_info(chan2dev(chan), "only allocated %d descriptors\n", i);
1205
1206        return i;
1207}
1208
1209static void dwc_free_chan_resources(struct dma_chan *chan)
1210{
1211        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1212        struct dw_dma           *dw = to_dw_dma(chan->device);
1213        struct dw_desc          *desc, *_desc;
1214        unsigned long           flags;
1215        LIST_HEAD(list);
1216
1217        dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1218                        dwc->descs_allocated);
1219
1220        /* ASSERT:  channel is idle */
1221        BUG_ON(!list_empty(&dwc->active_list));
1222        BUG_ON(!list_empty(&dwc->queue));
1223        BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1224
1225        spin_lock_irqsave(&dwc->lock, flags);
1226        list_splice_init(&dwc->free_list, &list);
1227        dwc->descs_allocated = 0;
1228        dwc->initialized = false;
1229
1230        /* Disable interrupts */
1231        channel_clear_bit(dw, MASK.XFER, dwc->mask);
1232        channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1233
1234        spin_unlock_irqrestore(&dwc->lock, flags);
1235
1236        list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1237                dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1238                dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
1239        }
1240
1241        dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1242}
1243
1244struct dw_dma_filter_args {
1245        struct dw_dma *dw;
1246        unsigned int req;
1247        unsigned int src;
1248        unsigned int dst;
1249};
1250
1251static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
1252{
1253        struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1254        struct dw_dma *dw = to_dw_dma(chan->device);
1255        struct dw_dma_filter_args *fargs = param;
1256        struct dw_dma_slave *dws = &dwc->slave;
1257
1258        /* ensure the device matches our channel */
1259        if (chan->device != &fargs->dw->dma)
1260                return false;
1261
1262        dws->dma_dev    = dw->dma.dev;
1263        dws->cfg_hi     = ~0;
1264        dws->cfg_lo     = ~0;
1265        dws->src_master = fargs->src;
1266        dws->dst_master = fargs->dst;
1267
1268        dwc->request_line = fargs->req;
1269
1270        chan->private = dws;
1271
1272        return true;
1273}
1274
1275static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
1276                                         struct of_dma *ofdma)
1277{
1278        struct dw_dma *dw = ofdma->of_dma_data;
1279        struct dw_dma_filter_args fargs = {
1280                .dw = dw,
1281        };
1282        dma_cap_mask_t cap;
1283
1284        if (dma_spec->args_count != 3)
1285                return NULL;
1286
1287        fargs.req = dma_spec->args[0];
1288        fargs.src = dma_spec->args[1];
1289        fargs.dst = dma_spec->args[2];
1290
1291        if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
1292                    fargs.src >= dw->nr_masters ||
1293                    fargs.dst >= dw->nr_masters))
1294                return NULL;
1295
1296        dma_cap_zero(cap);
1297        dma_cap_set(DMA_SLAVE, cap);
1298
1299        /* TODO: there should be a simpler way to do this */
1300        return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
1301}
1302
1303/* --------------------- Cyclic DMA API extensions -------------------- */
1304
1305/**
1306 * dw_dma_cyclic_start - start the cyclic DMA transfer
1307 * @chan: the DMA channel to start
1308 *
1309 * Must be called with soft interrupts disabled. Returns zero on success or
1310 * -errno on failure.
1311 */
1312int dw_dma_cyclic_start(struct dma_chan *chan)
1313{
1314        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1315        struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1316        unsigned long           flags;
1317
1318        if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1319                dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1320                return -ENODEV;
1321        }
1322
1323        spin_lock_irqsave(&dwc->lock, flags);
1324
1325        /* assert channel is idle */
1326        if (dma_readl(dw, CH_EN) & dwc->mask) {
1327                dev_err(chan2dev(&dwc->chan),
1328                        "BUG: Attempted to start non-idle channel\n");
1329                dwc_dump_chan_regs(dwc);
1330                spin_unlock_irqrestore(&dwc->lock, flags);
1331                return -EBUSY;
1332        }
1333
1334        dma_writel(dw, CLEAR.ERROR, dwc->mask);
1335        dma_writel(dw, CLEAR.XFER, dwc->mask);
1336
1337        /* setup DMAC channel registers */
1338        channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1339        channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1340        channel_writel(dwc, CTL_HI, 0);
1341
1342        channel_set_bit(dw, CH_EN, dwc->mask);
1343
1344        spin_unlock_irqrestore(&dwc->lock, flags);
1345
1346        return 0;
1347}
1348EXPORT_SYMBOL(dw_dma_cyclic_start);
1349
1350/**
1351 * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1352 * @chan: the DMA channel to stop
1353 *
1354 * Must be called with soft interrupts disabled.
1355 */
1356void dw_dma_cyclic_stop(struct dma_chan *chan)
1357{
1358        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1359        struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1360        unsigned long           flags;
1361
1362        spin_lock_irqsave(&dwc->lock, flags);
1363
1364        dwc_chan_disable(dw, dwc);
1365
1366        spin_unlock_irqrestore(&dwc->lock, flags);
1367}
1368EXPORT_SYMBOL(dw_dma_cyclic_stop);
1369
1370/**
1371 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1372 * @chan: the DMA channel to prepare
1373 * @buf_addr: physical DMA address where the buffer starts
1374 * @buf_len: total number of bytes for the entire buffer
1375 * @period_len: number of bytes for each period
1376 * @direction: transfer direction, to or from device
1377 *
1378 * Must be called before trying to start the transfer. Returns a valid struct
1379 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1380 */
1381struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1382                dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1383                enum dma_transfer_direction direction)
1384{
1385        struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1386        struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1387        struct dw_cyclic_desc           *cdesc;
1388        struct dw_cyclic_desc           *retval = NULL;
1389        struct dw_desc                  *desc;
1390        struct dw_desc                  *last = NULL;
1391        unsigned long                   was_cyclic;
1392        unsigned int                    reg_width;
1393        unsigned int                    periods;
1394        unsigned int                    i;
1395        unsigned long                   flags;
1396
1397        spin_lock_irqsave(&dwc->lock, flags);
1398        if (dwc->nollp) {
1399                spin_unlock_irqrestore(&dwc->lock, flags);
1400                dev_dbg(chan2dev(&dwc->chan),
1401                                "channel doesn't support LLP transfers\n");
1402                return ERR_PTR(-EINVAL);
1403        }
1404
1405        if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1406                spin_unlock_irqrestore(&dwc->lock, flags);
1407                dev_dbg(chan2dev(&dwc->chan),
1408                                "queue and/or active list are not empty\n");
1409                return ERR_PTR(-EBUSY);
1410        }
1411
1412        was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1413        spin_unlock_irqrestore(&dwc->lock, flags);
1414        if (was_cyclic) {
1415                dev_dbg(chan2dev(&dwc->chan),
1416                                "channel already prepared for cyclic DMA\n");
1417                return ERR_PTR(-EBUSY);
1418        }
1419
1420        retval = ERR_PTR(-EINVAL);
1421
1422        if (unlikely(!is_slave_direction(direction)))
1423                goto out_err;
1424
1425        dwc->direction = direction;
1426
1427        if (direction == DMA_MEM_TO_DEV)
1428                reg_width = __ffs(sconfig->dst_addr_width);
1429        else
1430                reg_width = __ffs(sconfig->src_addr_width);
1431
1432        periods = buf_len / period_len;
1433
1434        /* Check for too big/unaligned periods and unaligned DMA buffer. */
1435        if (period_len > (dwc->block_size << reg_width))
1436                goto out_err;
1437        if (unlikely(period_len & ((1 << reg_width) - 1)))
1438                goto out_err;
1439        if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1440                goto out_err;
1441
1442        retval = ERR_PTR(-ENOMEM);
1443
1444        if (periods > NR_DESCS_PER_CHANNEL)
1445                goto out_err;
1446
1447        cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1448        if (!cdesc)
1449                goto out_err;
1450
1451        cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1452        if (!cdesc->desc)
1453                goto out_err_alloc;
1454
1455        for (i = 0; i < periods; i++) {
1456                desc = dwc_desc_get(dwc);
1457                if (!desc)
1458                        goto out_err_desc_get;
1459
1460                switch (direction) {
1461                case DMA_MEM_TO_DEV:
1462                        desc->lli.dar = sconfig->dst_addr;
1463                        desc->lli.sar = buf_addr + (period_len * i);
1464                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1465                                        | DWC_CTLL_DST_WIDTH(reg_width)
1466                                        | DWC_CTLL_SRC_WIDTH(reg_width)
1467                                        | DWC_CTLL_DST_FIX
1468                                        | DWC_CTLL_SRC_INC
1469                                        | DWC_CTLL_INT_EN);
1470
1471                        desc->lli.ctllo |= sconfig->device_fc ?
1472                                DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1473                                DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1474
1475                        break;
1476                case DMA_DEV_TO_MEM:
1477                        desc->lli.dar = buf_addr + (period_len * i);
1478                        desc->lli.sar = sconfig->src_addr;
1479                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1480                                        | DWC_CTLL_SRC_WIDTH(reg_width)
1481                                        | DWC_CTLL_DST_WIDTH(reg_width)
1482                                        | DWC_CTLL_DST_INC
1483                                        | DWC_CTLL_SRC_FIX
1484                                        | DWC_CTLL_INT_EN);
1485
1486                        desc->lli.ctllo |= sconfig->device_fc ?
1487                                DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1488                                DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1489
1490                        break;
1491                default:
1492                        break;
1493                }
1494
1495                desc->lli.ctlhi = (period_len >> reg_width);
1496                cdesc->desc[i] = desc;
1497
1498                if (last)
1499                        last->lli.llp = desc->txd.phys;
1500
1501                last = desc;
1502        }
1503
1504        /* lets make a cyclic list */
1505        last->lli.llp = cdesc->desc[0]->txd.phys;
1506
1507        dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
1508                        "period %zu periods %d\n", (unsigned long long)buf_addr,
1509                        buf_len, period_len, periods);
1510
1511        cdesc->periods = periods;
1512        dwc->cdesc = cdesc;
1513
1514        return cdesc;
1515
1516out_err_desc_get:
1517        while (i--)
1518                dwc_desc_put(dwc, cdesc->desc[i]);
1519out_err_alloc:
1520        kfree(cdesc);
1521out_err:
1522        clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1523        return (struct dw_cyclic_desc *)retval;
1524}
1525EXPORT_SYMBOL(dw_dma_cyclic_prep);
1526
1527/**
1528 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1529 * @chan: the DMA channel to free
1530 */
1531void dw_dma_cyclic_free(struct dma_chan *chan)
1532{
1533        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1534        struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1535        struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1536        int                     i;
1537        unsigned long           flags;
1538
1539        dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
1540
1541        if (!cdesc)
1542                return;
1543
1544        spin_lock_irqsave(&dwc->lock, flags);
1545
1546        dwc_chan_disable(dw, dwc);
1547
1548        dma_writel(dw, CLEAR.ERROR, dwc->mask);
1549        dma_writel(dw, CLEAR.XFER, dwc->mask);
1550
1551        spin_unlock_irqrestore(&dwc->lock, flags);
1552
1553        for (i = 0; i < cdesc->periods; i++)
1554                dwc_desc_put(dwc, cdesc->desc[i]);
1555
1556        kfree(cdesc->desc);
1557        kfree(cdesc);
1558
1559        clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1560}
1561EXPORT_SYMBOL(dw_dma_cyclic_free);
1562
1563/*----------------------------------------------------------------------*/
1564
1565static void dw_dma_off(struct dw_dma *dw)
1566{
1567        int i;
1568
1569        dma_writel(dw, CFG, 0);
1570
1571        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1572        channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1573        channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1574        channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1575
1576        while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1577                cpu_relax();
1578
1579        for (i = 0; i < dw->dma.chancnt; i++)
1580                dw->chan[i].initialized = false;
1581}
1582
1583#ifdef CONFIG_OF
1584static struct dw_dma_platform_data *
1585dw_dma_parse_dt(struct platform_device *pdev)
1586{
1587        struct device_node *np = pdev->dev.of_node;
1588        struct dw_dma_platform_data *pdata;
1589        u32 tmp, arr[4];
1590
1591        if (!np) {
1592                dev_err(&pdev->dev, "Missing DT data\n");
1593                return NULL;
1594        }
1595
1596        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1597        if (!pdata)
1598                return NULL;
1599
1600        if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
1601                return NULL;
1602
1603        if (of_property_read_bool(np, "is_private"))
1604                pdata->is_private = true;
1605
1606        if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
1607                pdata->chan_allocation_order = (unsigned char)tmp;
1608
1609        if (!of_property_read_u32(np, "chan_priority", &tmp))
1610                pdata->chan_priority = tmp;
1611
1612        if (!of_property_read_u32(np, "block_size", &tmp))
1613                pdata->block_size = tmp;
1614
1615        if (!of_property_read_u32(np, "dma-masters", &tmp)) {
1616                if (tmp > 4)
1617                        return NULL;
1618
1619                pdata->nr_masters = tmp;
1620        }
1621
1622        if (!of_property_read_u32_array(np, "data_width", arr,
1623                                pdata->nr_masters))
1624                for (tmp = 0; tmp < pdata->nr_masters; tmp++)
1625                        pdata->data_width[tmp] = arr[tmp];
1626
1627        return pdata;
1628}
1629#else
1630static inline struct dw_dma_platform_data *
1631dw_dma_parse_dt(struct platform_device *pdev)
1632{
1633        return NULL;
1634}
1635#endif
1636
1637static int dw_probe(struct platform_device *pdev)
1638{
1639        const struct platform_device_id *match;
1640        struct dw_dma_platform_data *pdata;
1641        struct resource         *io;
1642        struct dw_dma           *dw;
1643        size_t                  size;
1644        void __iomem            *regs;
1645        bool                    autocfg;
1646        unsigned int            dw_params;
1647        unsigned int            nr_channels;
1648        unsigned int            max_blk_size = 0;
1649        int                     irq;
1650        int                     err;
1651        int                     i;
1652
1653        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1654        if (!io)
1655                return -EINVAL;
1656
1657        irq = platform_get_irq(pdev, 0);
1658        if (irq < 0)
1659                return irq;
1660
1661        regs = devm_ioremap_resource(&pdev->dev, io);
1662        if (IS_ERR(regs))
1663                return PTR_ERR(regs);
1664
1665        /* Apply default dma_mask if needed */
1666        if (!pdev->dev.dma_mask) {
1667                pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1668                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
1669        }
1670
1671        dw_params = dma_read_byaddr(regs, DW_PARAMS);
1672        autocfg = dw_params >> DW_PARAMS_EN & 0x1;
1673
1674        dev_dbg(&pdev->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1675
1676        pdata = dev_get_platdata(&pdev->dev);
1677        if (!pdata)
1678                pdata = dw_dma_parse_dt(pdev);
1679
1680        if (!pdata && autocfg) {
1681                pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1682                if (!pdata)
1683                        return -ENOMEM;
1684
1685                /* Fill platform data with the default values */
1686                pdata->is_private = true;
1687                pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1688                pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1689        } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1690                return -EINVAL;
1691
1692        if (autocfg)
1693                nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1;
1694        else
1695                nr_channels = pdata->nr_channels;
1696
1697        size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
1698        dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1699        if (!dw)
1700                return -ENOMEM;
1701
1702        dw->clk = devm_clk_get(&pdev->dev, "hclk");
1703        if (IS_ERR(dw->clk))
1704                return PTR_ERR(dw->clk);
1705        clk_prepare_enable(dw->clk);
1706
1707        dw->regs = regs;
1708
1709        /* get hardware configuration parameters */
1710        if (autocfg) {
1711                max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
1712
1713                dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1714                for (i = 0; i < dw->nr_masters; i++) {
1715                        dw->data_width[i] =
1716                                (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2;
1717                }
1718        } else {
1719                dw->nr_masters = pdata->nr_masters;
1720                memcpy(dw->data_width, pdata->data_width, 4);
1721        }
1722
1723        /* Get the base request line if set */
1724        match = platform_get_device_id(pdev);
1725        if (match)
1726                dw->request_line_base = (unsigned int)match->driver_data;
1727
1728        /* Calculate all channel mask before DMA setup */
1729        dw->all_chan_mask = (1 << nr_channels) - 1;
1730
1731        /* force dma off, just in case */
1732        dw_dma_off(dw);
1733
1734        /* disable BLOCK interrupts as well */
1735        channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1736
1737        err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0,
1738                               "dw_dmac", dw);
1739        if (err)
1740                return err;
1741
1742        platform_set_drvdata(pdev, dw);
1743
1744        /* create a pool of consistent memory blocks for hardware descriptors */
1745        dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev,
1746                                         sizeof(struct dw_desc), 4, 0);
1747        if (!dw->desc_pool) {
1748                dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1749                return -ENOMEM;
1750        }
1751
1752        tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1753
1754        INIT_LIST_HEAD(&dw->dma.channels);
1755        for (i = 0; i < nr_channels; i++) {
1756                struct dw_dma_chan      *dwc = &dw->chan[i];
1757                int                     r = nr_channels - i - 1;
1758
1759                dwc->chan.device = &dw->dma;
1760                dma_cookie_init(&dwc->chan);
1761                if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1762                        list_add_tail(&dwc->chan.device_node,
1763                                        &dw->dma.channels);
1764                else
1765                        list_add(&dwc->chan.device_node, &dw->dma.channels);
1766
1767                /* 7 is highest priority & 0 is lowest. */
1768                if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1769                        dwc->priority = r;
1770                else
1771                        dwc->priority = i;
1772
1773                dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1774                spin_lock_init(&dwc->lock);
1775                dwc->mask = 1 << i;
1776
1777                INIT_LIST_HEAD(&dwc->active_list);
1778                INIT_LIST_HEAD(&dwc->queue);
1779                INIT_LIST_HEAD(&dwc->free_list);
1780
1781                channel_clear_bit(dw, CH_EN, dwc->mask);
1782
1783                dwc->direction = DMA_TRANS_NONE;
1784
1785                /* hardware configuration */
1786                if (autocfg) {
1787                        unsigned int dwc_params;
1788
1789                        dwc_params = dma_read_byaddr(regs + r * sizeof(u32),
1790                                                     DWC_PARAMS);
1791
1792                        dev_dbg(&pdev->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1793                                            dwc_params);
1794
1795                        /* Decode maximum block size for given channel. The
1796                         * stored 4 bit value represents blocks from 0x00 for 3
1797                         * up to 0x0a for 4095. */
1798                        dwc->block_size =
1799                                (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
1800                        dwc->nollp =
1801                                (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1802                } else {
1803                        dwc->block_size = pdata->block_size;
1804
1805                        /* Check if channel supports multi block transfer */
1806                        channel_writel(dwc, LLP, 0xfffffffc);
1807                        dwc->nollp =
1808                                (channel_readl(dwc, LLP) & 0xfffffffc) == 0;
1809                        channel_writel(dwc, LLP, 0);
1810                }
1811        }
1812
1813        /* Clear all interrupts on all channels. */
1814        dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1815        dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1816        dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1817        dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1818        dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1819
1820        dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1821        dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1822        if (pdata->is_private)
1823                dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1824        dw->dma.dev = &pdev->dev;
1825        dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1826        dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1827
1828        dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1829
1830        dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1831        dw->dma.device_control = dwc_control;
1832
1833        dw->dma.device_tx_status = dwc_tx_status;
1834        dw->dma.device_issue_pending = dwc_issue_pending;
1835
1836        dma_writel(dw, CFG, DW_CFG_DMA_EN);
1837
1838        dev_info(&pdev->dev, "DesignWare DMA Controller, %d channels\n",
1839                 nr_channels);
1840
1841        dma_async_device_register(&dw->dma);
1842
1843        if (pdev->dev.of_node) {
1844                err = of_dma_controller_register(pdev->dev.of_node,
1845                                                 dw_dma_xlate, dw);
1846                if (err && err != -ENODEV)
1847                        dev_err(&pdev->dev,
1848                                "could not register of_dma_controller\n");
1849        }
1850
1851        return 0;
1852}
1853
1854static int dw_remove(struct platform_device *pdev)
1855{
1856        struct dw_dma           *dw = platform_get_drvdata(pdev);
1857        struct dw_dma_chan      *dwc, *_dwc;
1858
1859        if (pdev->dev.of_node)
1860                of_dma_controller_free(pdev->dev.of_node);
1861        dw_dma_off(dw);
1862        dma_async_device_unregister(&dw->dma);
1863
1864        tasklet_kill(&dw->tasklet);
1865
1866        list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1867                        chan.device_node) {
1868                list_del(&dwc->chan.device_node);
1869                channel_clear_bit(dw, CH_EN, dwc->mask);
1870        }
1871
1872        return 0;
1873}
1874
1875static void dw_shutdown(struct platform_device *pdev)
1876{
1877        struct dw_dma   *dw = platform_get_drvdata(pdev);
1878
1879        dw_dma_off(dw);
1880        clk_disable_unprepare(dw->clk);
1881}
1882
1883static int dw_suspend_noirq(struct device *dev)
1884{
1885        struct platform_device *pdev = to_platform_device(dev);
1886        struct dw_dma   *dw = platform_get_drvdata(pdev);
1887
1888        dw_dma_off(dw);
1889        clk_disable_unprepare(dw->clk);
1890
1891        return 0;
1892}
1893
1894static int dw_resume_noirq(struct device *dev)
1895{
1896        struct platform_device *pdev = to_platform_device(dev);
1897        struct dw_dma   *dw = platform_get_drvdata(pdev);
1898
1899        clk_prepare_enable(dw->clk);
1900        dma_writel(dw, CFG, DW_CFG_DMA_EN);
1901
1902        return 0;
1903}
1904
1905static const struct dev_pm_ops dw_dev_pm_ops = {
1906        .suspend_noirq = dw_suspend_noirq,
1907        .resume_noirq = dw_resume_noirq,
1908        .freeze_noirq = dw_suspend_noirq,
1909        .thaw_noirq = dw_resume_noirq,
1910        .restore_noirq = dw_resume_noirq,
1911        .poweroff_noirq = dw_suspend_noirq,
1912};
1913
1914#ifdef CONFIG_OF
1915static const struct of_device_id dw_dma_id_table[] = {
1916        { .compatible = "snps,dma-spear1340" },
1917        {}
1918};
1919MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1920#endif
1921
1922static const struct platform_device_id dw_dma_ids[] = {
1923        /* Name,        Request Line Base */
1924        { "INTL9C60",   (kernel_ulong_t)16 },
1925        { }
1926};
1927
1928static struct platform_driver dw_driver = {
1929        .probe          = dw_probe,
1930        .remove         = dw_remove,
1931        .shutdown       = dw_shutdown,
1932        .driver = {
1933                .name   = "dw_dmac",
1934                .pm     = &dw_dev_pm_ops,
1935                .of_match_table = of_match_ptr(dw_dma_id_table),
1936        },
1937        .id_table       = dw_dma_ids,
1938};
1939
1940static int __init dw_init(void)
1941{
1942        return platform_driver_register(&dw_driver);
1943}
1944subsys_initcall(dw_init);
1945
1946static void __exit dw_exit(void)
1947{
1948        platform_driver_unregister(&dw_driver);
1949}
1950module_exit(dw_exit);
1951
1952MODULE_LICENSE("GPL v2");
1953MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1954MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1955MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
1956
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.