linux/drivers/dma/tegra20-apb-dma.c
<<
>>
Prefs
   1/*
   2 * DMA driver for Nvidia's Tegra20 APB DMA controller.
   3 *
   4 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include <linux/bitops.h>
  20#include <linux/clk.h>
  21#include <linux/delay.h>
  22#include <linux/dmaengine.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/err.h>
  25#include <linux/init.h>
  26#include <linux/interrupt.h>
  27#include <linux/io.h>
  28#include <linux/mm.h>
  29#include <linux/module.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/platform_device.h>
  33#include <linux/pm_runtime.h>
  34#include <linux/slab.h>
  35#include <linux/clk/tegra.h>
  36
  37#include "dmaengine.h"
  38
  39#define TEGRA_APBDMA_GENERAL                    0x0
  40#define TEGRA_APBDMA_GENERAL_ENABLE             BIT(31)
  41
  42#define TEGRA_APBDMA_CONTROL                    0x010
  43#define TEGRA_APBDMA_IRQ_MASK                   0x01c
  44#define TEGRA_APBDMA_IRQ_MASK_SET               0x020
  45
  46/* CSR register */
  47#define TEGRA_APBDMA_CHAN_CSR                   0x00
  48#define TEGRA_APBDMA_CSR_ENB                    BIT(31)
  49#define TEGRA_APBDMA_CSR_IE_EOC                 BIT(30)
  50#define TEGRA_APBDMA_CSR_HOLD                   BIT(29)
  51#define TEGRA_APBDMA_CSR_DIR                    BIT(28)
  52#define TEGRA_APBDMA_CSR_ONCE                   BIT(27)
  53#define TEGRA_APBDMA_CSR_FLOW                   BIT(21)
  54#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT          16
  55#define TEGRA_APBDMA_CSR_WCOUNT_MASK            0xFFFC
  56
  57/* STATUS register */
  58#define TEGRA_APBDMA_CHAN_STATUS                0x004
  59#define TEGRA_APBDMA_STATUS_BUSY                BIT(31)
  60#define TEGRA_APBDMA_STATUS_ISE_EOC             BIT(30)
  61#define TEGRA_APBDMA_STATUS_HALT                BIT(29)
  62#define TEGRA_APBDMA_STATUS_PING_PONG           BIT(28)
  63#define TEGRA_APBDMA_STATUS_COUNT_SHIFT         2
  64#define TEGRA_APBDMA_STATUS_COUNT_MASK          0xFFFC
  65
  66#define TEGRA_APBDMA_CHAN_CSRE                  0x00C
  67#define TEGRA_APBDMA_CHAN_CSRE_PAUSE            (1 << 31)
  68
  69/* AHB memory address */
  70#define TEGRA_APBDMA_CHAN_AHBPTR                0x010
  71
  72/* AHB sequence register */
  73#define TEGRA_APBDMA_CHAN_AHBSEQ                0x14
  74#define TEGRA_APBDMA_AHBSEQ_INTR_ENB            BIT(31)
  75#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8         (0 << 28)
  76#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16        (1 << 28)
  77#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32        (2 << 28)
  78#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64        (3 << 28)
  79#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128       (4 << 28)
  80#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP           BIT(27)
  81#define TEGRA_APBDMA_AHBSEQ_BURST_1             (4 << 24)
  82#define TEGRA_APBDMA_AHBSEQ_BURST_4             (5 << 24)
  83#define TEGRA_APBDMA_AHBSEQ_BURST_8             (6 << 24)
  84#define TEGRA_APBDMA_AHBSEQ_DBL_BUF             BIT(19)
  85#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT          16
  86#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE           0
  87
  88/* APB address */
  89#define TEGRA_APBDMA_CHAN_APBPTR                0x018
  90
  91/* APB sequence register */
  92#define TEGRA_APBDMA_CHAN_APBSEQ                0x01c
  93#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8         (0 << 28)
  94#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16        (1 << 28)
  95#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32        (2 << 28)
  96#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64        (3 << 28)
  97#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128       (4 << 28)
  98#define TEGRA_APBDMA_APBSEQ_DATA_SWAP           BIT(27)
  99#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1         (1 << 16)
 100
 101/*
 102 * If any burst is in flight and DMA paused then this is the time to complete
 103 * on-flight burst and update DMA status register.
 104 */
 105#define TEGRA_APBDMA_BURST_COMPLETE_TIME        20
 106
 107/* Channel base address offset from APBDMA base address */
 108#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET    0x1000
 109
 110/* DMA channel register space size */
 111#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE      0x20
 112
 113struct tegra_dma;
 114
 115/*
 116 * tegra_dma_chip_data Tegra chip specific DMA data
 117 * @nr_channels: Number of channels available in the controller.
 118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
 119 * @support_channel_pause: Support channel wise pause of dma.
 120 */
 121struct tegra_dma_chip_data {
 122        int nr_channels;
 123        int max_dma_count;
 124        bool support_channel_pause;
 125};
 126
 127/* DMA channel registers */
 128struct tegra_dma_channel_regs {
 129        unsigned long   csr;
 130        unsigned long   ahb_ptr;
 131        unsigned long   apb_ptr;
 132        unsigned long   ahb_seq;
 133        unsigned long   apb_seq;
 134};
 135
 136/*
 137 * tegra_dma_sg_req: Dma request details to configure hardware. This
 138 * contains the details for one transfer to configure DMA hw.
 139 * The client's request for data transfer can be broken into multiple
 140 * sub-transfer as per requester details and hw support.
 141 * This sub transfer get added in the list of transfer and point to Tegra
 142 * DMA descriptor which manages the transfer details.
 143 */
 144struct tegra_dma_sg_req {
 145        struct tegra_dma_channel_regs   ch_regs;
 146        int                             req_len;
 147        bool                            configured;
 148        bool                            last_sg;
 149        bool                            half_done;
 150        struct list_head                node;
 151        struct tegra_dma_desc           *dma_desc;
 152};
 153
 154/*
 155 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
 156 * This descriptor keep track of transfer status, callbacks and request
 157 * counts etc.
 158 */
 159struct tegra_dma_desc {
 160        struct dma_async_tx_descriptor  txd;
 161        int                             bytes_requested;
 162        int                             bytes_transferred;
 163        enum dma_status                 dma_status;
 164        struct list_head                node;
 165        struct list_head                tx_list;
 166        struct list_head                cb_node;
 167        int                             cb_count;
 168};
 169
 170struct tegra_dma_channel;
 171
 172typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
 173                                bool to_terminate);
 174
 175/* tegra_dma_channel: Channel specific information */
 176struct tegra_dma_channel {
 177        struct dma_chan         dma_chan;
 178        char                    name[30];
 179        bool                    config_init;
 180        int                     id;
 181        int                     irq;
 182        unsigned long           chan_base_offset;
 183        spinlock_t              lock;
 184        bool                    busy;
 185        struct tegra_dma        *tdma;
 186        bool                    cyclic;
 187
 188        /* Different lists for managing the requests */
 189        struct list_head        free_sg_req;
 190        struct list_head        pending_sg_req;
 191        struct list_head        free_dma_desc;
 192        struct list_head        cb_desc;
 193
 194        /* ISR handler and tasklet for bottom half of isr handling */
 195        dma_isr_handler         isr_handler;
 196        struct tasklet_struct   tasklet;
 197        dma_async_tx_callback   callback;
 198        void                    *callback_param;
 199
 200        /* Channel-slave specific configuration */
 201        struct dma_slave_config dma_sconfig;
 202};
 203
 204/* tegra_dma: Tegra DMA specific information */
 205struct tegra_dma {
 206        struct dma_device               dma_dev;
 207        struct device                   *dev;
 208        struct clk                      *dma_clk;
 209        spinlock_t                      global_lock;
 210        void __iomem                    *base_addr;
 211        const struct tegra_dma_chip_data *chip_data;
 212
 213        /* Some register need to be cache before suspend */
 214        u32                             reg_gen;
 215
 216        /* Last member of the structure */
 217        struct tegra_dma_channel channels[0];
 218};
 219
 220static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
 221{
 222        writel(val, tdma->base_addr + reg);
 223}
 224
 225static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
 226{
 227        return readl(tdma->base_addr + reg);
 228}
 229
 230static inline void tdc_write(struct tegra_dma_channel *tdc,
 231                u32 reg, u32 val)
 232{
 233        writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
 234}
 235
 236static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
 237{
 238        return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
 239}
 240
 241static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
 242{
 243        return container_of(dc, struct tegra_dma_channel, dma_chan);
 244}
 245
 246static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
 247                struct dma_async_tx_descriptor *td)
 248{
 249        return container_of(td, struct tegra_dma_desc, txd);
 250}
 251
 252static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
 253{
 254        return &tdc->dma_chan.dev->device;
 255}
 256
 257static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
 258static int tegra_dma_runtime_suspend(struct device *dev);
 259static int tegra_dma_runtime_resume(struct device *dev);
 260
 261/* Get DMA desc from free list, if not there then allocate it.  */
 262static struct tegra_dma_desc *tegra_dma_desc_get(
 263                struct tegra_dma_channel *tdc)
 264{
 265        struct tegra_dma_desc *dma_desc;
 266        unsigned long flags;
 267
 268        spin_lock_irqsave(&tdc->lock, flags);
 269
 270        /* Do not allocate if desc are waiting for ack */
 271        list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
 272                if (async_tx_test_ack(&dma_desc->txd)) {
 273                        list_del(&dma_desc->node);
 274                        spin_unlock_irqrestore(&tdc->lock, flags);
 275                        dma_desc->txd.flags = 0;
 276                        return dma_desc;
 277                }
 278        }
 279
 280        spin_unlock_irqrestore(&tdc->lock, flags);
 281
 282        /* Allocate DMA desc */
 283        dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
 284        if (!dma_desc) {
 285                dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
 286                return NULL;
 287        }
 288
 289        dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
 290        dma_desc->txd.tx_submit = tegra_dma_tx_submit;
 291        dma_desc->txd.flags = 0;
 292        return dma_desc;
 293}
 294
 295static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
 296                struct tegra_dma_desc *dma_desc)
 297{
 298        unsigned long flags;
 299
 300        spin_lock_irqsave(&tdc->lock, flags);
 301        if (!list_empty(&dma_desc->tx_list))
 302                list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
 303        list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
 304        spin_unlock_irqrestore(&tdc->lock, flags);
 305}
 306
 307static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
 308                struct tegra_dma_channel *tdc)
 309{
 310        struct tegra_dma_sg_req *sg_req = NULL;
 311        unsigned long flags;
 312
 313        spin_lock_irqsave(&tdc->lock, flags);
 314        if (!list_empty(&tdc->free_sg_req)) {
 315                sg_req = list_first_entry(&tdc->free_sg_req,
 316                                        typeof(*sg_req), node);
 317                list_del(&sg_req->node);
 318                spin_unlock_irqrestore(&tdc->lock, flags);
 319                return sg_req;
 320        }
 321        spin_unlock_irqrestore(&tdc->lock, flags);
 322
 323        sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
 324        if (!sg_req)
 325                dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
 326        return sg_req;
 327}
 328
 329static int tegra_dma_slave_config(struct dma_chan *dc,
 330                struct dma_slave_config *sconfig)
 331{
 332        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 333
 334        if (!list_empty(&tdc->pending_sg_req)) {
 335                dev_err(tdc2dev(tdc), "Configuration not allowed\n");
 336                return -EBUSY;
 337        }
 338
 339        memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
 340        tdc->config_init = true;
 341        return 0;
 342}
 343
 344static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
 345        bool wait_for_burst_complete)
 346{
 347        struct tegra_dma *tdma = tdc->tdma;
 348
 349        spin_lock(&tdma->global_lock);
 350        tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
 351        if (wait_for_burst_complete)
 352                udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
 353}
 354
 355static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
 356{
 357        struct tegra_dma *tdma = tdc->tdma;
 358
 359        tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
 360        spin_unlock(&tdma->global_lock);
 361}
 362
 363static void tegra_dma_pause(struct tegra_dma_channel *tdc,
 364        bool wait_for_burst_complete)
 365{
 366        struct tegra_dma *tdma = tdc->tdma;
 367
 368        if (tdma->chip_data->support_channel_pause) {
 369                tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
 370                                TEGRA_APBDMA_CHAN_CSRE_PAUSE);
 371                if (wait_for_burst_complete)
 372                        udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
 373        } else {
 374                tegra_dma_global_pause(tdc, wait_for_burst_complete);
 375        }
 376}
 377
 378static void tegra_dma_resume(struct tegra_dma_channel *tdc)
 379{
 380        struct tegra_dma *tdma = tdc->tdma;
 381
 382        if (tdma->chip_data->support_channel_pause) {
 383                tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
 384        } else {
 385                tegra_dma_global_resume(tdc);
 386        }
 387}
 388
 389static void tegra_dma_stop(struct tegra_dma_channel *tdc)
 390{
 391        u32 csr;
 392        u32 status;
 393
 394        /* Disable interrupts */
 395        csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
 396        csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
 397        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
 398
 399        /* Disable DMA */
 400        csr &= ~TEGRA_APBDMA_CSR_ENB;
 401        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
 402
 403        /* Clear interrupt status if it is there */
 404        status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 405        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
 406                dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
 407                tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
 408        }
 409        tdc->busy = false;
 410}
 411
 412static void tegra_dma_start(struct tegra_dma_channel *tdc,
 413                struct tegra_dma_sg_req *sg_req)
 414{
 415        struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
 416
 417        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
 418        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
 419        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
 420        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
 421        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
 422
 423        /* Start DMA */
 424        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
 425                                ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
 426}
 427
 428static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
 429                struct tegra_dma_sg_req *nsg_req)
 430{
 431        unsigned long status;
 432
 433        /*
 434         * The DMA controller reloads the new configuration for next transfer
 435         * after last burst of current transfer completes.
 436         * If there is no IEC status then this makes sure that last burst
 437         * has not be completed. There may be case that last burst is on
 438         * flight and so it can complete but because DMA is paused, it
 439         * will not generates interrupt as well as not reload the new
 440         * configuration.
 441         * If there is already IEC status then interrupt handler need to
 442         * load new configuration.
 443         */
 444        tegra_dma_pause(tdc, false);
 445        status  = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 446
 447        /*
 448         * If interrupt is pending then do nothing as the ISR will handle
 449         * the programing for new request.
 450         */
 451        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
 452                dev_err(tdc2dev(tdc),
 453                        "Skipping new configuration as interrupt is pending\n");
 454                tegra_dma_resume(tdc);
 455                return;
 456        }
 457
 458        /* Safe to program new configuration */
 459        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
 460        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
 461        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
 462                                nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
 463        nsg_req->configured = true;
 464
 465        tegra_dma_resume(tdc);
 466}
 467
 468static void tdc_start_head_req(struct tegra_dma_channel *tdc)
 469{
 470        struct tegra_dma_sg_req *sg_req;
 471
 472        if (list_empty(&tdc->pending_sg_req))
 473                return;
 474
 475        sg_req = list_first_entry(&tdc->pending_sg_req,
 476                                        typeof(*sg_req), node);
 477        tegra_dma_start(tdc, sg_req);
 478        sg_req->configured = true;
 479        tdc->busy = true;
 480}
 481
 482static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
 483{
 484        struct tegra_dma_sg_req *hsgreq;
 485        struct tegra_dma_sg_req *hnsgreq;
 486
 487        if (list_empty(&tdc->pending_sg_req))
 488                return;
 489
 490        hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
 491        if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
 492                hnsgreq = list_first_entry(&hsgreq->node,
 493                                        typeof(*hnsgreq), node);
 494                tegra_dma_configure_for_next(tdc, hnsgreq);
 495        }
 496}
 497
 498static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
 499        struct tegra_dma_sg_req *sg_req, unsigned long status)
 500{
 501        return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
 502}
 503
 504static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
 505{
 506        struct tegra_dma_sg_req *sgreq;
 507        struct tegra_dma_desc *dma_desc;
 508
 509        while (!list_empty(&tdc->pending_sg_req)) {
 510                sgreq = list_first_entry(&tdc->pending_sg_req,
 511                                                typeof(*sgreq), node);
 512                list_move_tail(&sgreq->node, &tdc->free_sg_req);
 513                if (sgreq->last_sg) {
 514                        dma_desc = sgreq->dma_desc;
 515                        dma_desc->dma_status = DMA_ERROR;
 516                        list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
 517
 518                        /* Add in cb list if it is not there. */
 519                        if (!dma_desc->cb_count)
 520                                list_add_tail(&dma_desc->cb_node,
 521                                                        &tdc->cb_desc);
 522                        dma_desc->cb_count++;
 523                }
 524        }
 525        tdc->isr_handler = NULL;
 526}
 527
 528static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
 529                struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
 530{
 531        struct tegra_dma_sg_req *hsgreq = NULL;
 532
 533        if (list_empty(&tdc->pending_sg_req)) {
 534                dev_err(tdc2dev(tdc), "Dma is running without req\n");
 535                tegra_dma_stop(tdc);
 536                return false;
 537        }
 538
 539        /*
 540         * Check that head req on list should be in flight.
 541         * If it is not in flight then abort transfer as
 542         * looping of transfer can not continue.
 543         */
 544        hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
 545        if (!hsgreq->configured) {
 546                tegra_dma_stop(tdc);
 547                dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
 548                tegra_dma_abort_all(tdc);
 549                return false;
 550        }
 551
 552        /* Configure next request */
 553        if (!to_terminate)
 554                tdc_configure_next_head_desc(tdc);
 555        return true;
 556}
 557
 558static void handle_once_dma_done(struct tegra_dma_channel *tdc,
 559        bool to_terminate)
 560{
 561        struct tegra_dma_sg_req *sgreq;
 562        struct tegra_dma_desc *dma_desc;
 563
 564        tdc->busy = false;
 565        sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
 566        dma_desc = sgreq->dma_desc;
 567        dma_desc->bytes_transferred += sgreq->req_len;
 568
 569        list_del(&sgreq->node);
 570        if (sgreq->last_sg) {
 571                dma_desc->dma_status = DMA_SUCCESS;
 572                dma_cookie_complete(&dma_desc->txd);
 573                if (!dma_desc->cb_count)
 574                        list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
 575                dma_desc->cb_count++;
 576                list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
 577        }
 578        list_add_tail(&sgreq->node, &tdc->free_sg_req);
 579
 580        /* Do not start DMA if it is going to be terminate */
 581        if (to_terminate || list_empty(&tdc->pending_sg_req))
 582                return;
 583
 584        tdc_start_head_req(tdc);
 585        return;
 586}
 587
 588static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
 589                bool to_terminate)
 590{
 591        struct tegra_dma_sg_req *sgreq;
 592        struct tegra_dma_desc *dma_desc;
 593        bool st;
 594
 595        sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
 596        dma_desc = sgreq->dma_desc;
 597        dma_desc->bytes_transferred += sgreq->req_len;
 598
 599        /* Callback need to be call */
 600        if (!dma_desc->cb_count)
 601                list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
 602        dma_desc->cb_count++;
 603
 604        /* If not last req then put at end of pending list */
 605        if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
 606                list_move_tail(&sgreq->node, &tdc->pending_sg_req);
 607                sgreq->configured = false;
 608                st = handle_continuous_head_request(tdc, sgreq, to_terminate);
 609                if (!st)
 610                        dma_desc->dma_status = DMA_ERROR;
 611        }
 612        return;
 613}
 614
 615static void tegra_dma_tasklet(unsigned long data)
 616{
 617        struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
 618        dma_async_tx_callback callback = NULL;
 619        void *callback_param = NULL;
 620        struct tegra_dma_desc *dma_desc;
 621        unsigned long flags;
 622        int cb_count;
 623
 624        spin_lock_irqsave(&tdc->lock, flags);
 625        while (!list_empty(&tdc->cb_desc)) {
 626                dma_desc  = list_first_entry(&tdc->cb_desc,
 627                                        typeof(*dma_desc), cb_node);
 628                list_del(&dma_desc->cb_node);
 629                callback = dma_desc->txd.callback;
 630                callback_param = dma_desc->txd.callback_param;
 631                cb_count = dma_desc->cb_count;
 632                dma_desc->cb_count = 0;
 633                spin_unlock_irqrestore(&tdc->lock, flags);
 634                while (cb_count-- && callback)
 635                        callback(callback_param);
 636                spin_lock_irqsave(&tdc->lock, flags);
 637        }
 638        spin_unlock_irqrestore(&tdc->lock, flags);
 639}
 640
 641static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
 642{
 643        struct tegra_dma_channel *tdc = dev_id;
 644        unsigned long status;
 645        unsigned long flags;
 646
 647        spin_lock_irqsave(&tdc->lock, flags);
 648
 649        status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 650        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
 651                tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
 652                tdc->isr_handler(tdc, false);
 653                tasklet_schedule(&tdc->tasklet);
 654                spin_unlock_irqrestore(&tdc->lock, flags);
 655                return IRQ_HANDLED;
 656        }
 657
 658        spin_unlock_irqrestore(&tdc->lock, flags);
 659        dev_info(tdc2dev(tdc),
 660                "Interrupt already served status 0x%08lx\n", status);
 661        return IRQ_NONE;
 662}
 663
 664static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 665{
 666        struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
 667        struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
 668        unsigned long flags;
 669        dma_cookie_t cookie;
 670
 671        spin_lock_irqsave(&tdc->lock, flags);
 672        dma_desc->dma_status = DMA_IN_PROGRESS;
 673        cookie = dma_cookie_assign(&dma_desc->txd);
 674        list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
 675        spin_unlock_irqrestore(&tdc->lock, flags);
 676        return cookie;
 677}
 678
 679static void tegra_dma_issue_pending(struct dma_chan *dc)
 680{
 681        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 682        unsigned long flags;
 683
 684        spin_lock_irqsave(&tdc->lock, flags);
 685        if (list_empty(&tdc->pending_sg_req)) {
 686                dev_err(tdc2dev(tdc), "No DMA request\n");
 687                goto end;
 688        }
 689        if (!tdc->busy) {
 690                tdc_start_head_req(tdc);
 691
 692                /* Continuous single mode: Configure next req */
 693                if (tdc->cyclic) {
 694                        /*
 695                         * Wait for 1 burst time for configure DMA for
 696                         * next transfer.
 697                         */
 698                        udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
 699                        tdc_configure_next_head_desc(tdc);
 700                }
 701        }
 702end:
 703        spin_unlock_irqrestore(&tdc->lock, flags);
 704        return;
 705}
 706
 707static void tegra_dma_terminate_all(struct dma_chan *dc)
 708{
 709        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 710        struct tegra_dma_sg_req *sgreq;
 711        struct tegra_dma_desc *dma_desc;
 712        unsigned long flags;
 713        unsigned long status;
 714        bool was_busy;
 715
 716        spin_lock_irqsave(&tdc->lock, flags);
 717        if (list_empty(&tdc->pending_sg_req)) {
 718                spin_unlock_irqrestore(&tdc->lock, flags);
 719                return;
 720        }
 721
 722        if (!tdc->busy)
 723                goto skip_dma_stop;
 724
 725        /* Pause DMA before checking the queue status */
 726        tegra_dma_pause(tdc, true);
 727
 728        status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 729        if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
 730                dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
 731                tdc->isr_handler(tdc, true);
 732                status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
 733        }
 734
 735        was_busy = tdc->busy;
 736        tegra_dma_stop(tdc);
 737
 738        if (!list_empty(&tdc->pending_sg_req) && was_busy) {
 739                sgreq = list_first_entry(&tdc->pending_sg_req,
 740                                        typeof(*sgreq), node);
 741                sgreq->dma_desc->bytes_transferred +=
 742                                get_current_xferred_count(tdc, sgreq, status);
 743        }
 744        tegra_dma_resume(tdc);
 745
 746skip_dma_stop:
 747        tegra_dma_abort_all(tdc);
 748
 749        while (!list_empty(&tdc->cb_desc)) {
 750                dma_desc  = list_first_entry(&tdc->cb_desc,
 751                                        typeof(*dma_desc), cb_node);
 752                list_del(&dma_desc->cb_node);
 753                dma_desc->cb_count = 0;
 754        }
 755        spin_unlock_irqrestore(&tdc->lock, flags);
 756}
 757
 758static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
 759        dma_cookie_t cookie, struct dma_tx_state *txstate)
 760{
 761        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 762        struct tegra_dma_desc *dma_desc;
 763        struct tegra_dma_sg_req *sg_req;
 764        enum dma_status ret;
 765        unsigned long flags;
 766        unsigned int residual;
 767
 768        spin_lock_irqsave(&tdc->lock, flags);
 769
 770        ret = dma_cookie_status(dc, cookie, txstate);
 771        if (ret == DMA_SUCCESS) {
 772                spin_unlock_irqrestore(&tdc->lock, flags);
 773                return ret;
 774        }
 775
 776        /* Check on wait_ack desc status */
 777        list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
 778                if (dma_desc->txd.cookie == cookie) {
 779                        residual =  dma_desc->bytes_requested -
 780                                        (dma_desc->bytes_transferred %
 781                                                dma_desc->bytes_requested);
 782                        dma_set_residue(txstate, residual);
 783                        ret = dma_desc->dma_status;
 784                        spin_unlock_irqrestore(&tdc->lock, flags);
 785                        return ret;
 786                }
 787        }
 788
 789        /* Check in pending list */
 790        list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
 791                dma_desc = sg_req->dma_desc;
 792                if (dma_desc->txd.cookie == cookie) {
 793                        residual =  dma_desc->bytes_requested -
 794                                        (dma_desc->bytes_transferred %
 795                                                dma_desc->bytes_requested);
 796                        dma_set_residue(txstate, residual);
 797                        ret = dma_desc->dma_status;
 798                        spin_unlock_irqrestore(&tdc->lock, flags);
 799                        return ret;
 800                }
 801        }
 802
 803        dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
 804        spin_unlock_irqrestore(&tdc->lock, flags);
 805        return ret;
 806}
 807
 808static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
 809                        unsigned long arg)
 810{
 811        switch (cmd) {
 812        case DMA_SLAVE_CONFIG:
 813                return tegra_dma_slave_config(dc,
 814                                (struct dma_slave_config *)arg);
 815
 816        case DMA_TERMINATE_ALL:
 817                tegra_dma_terminate_all(dc);
 818                return 0;
 819
 820        default:
 821                break;
 822        }
 823
 824        return -ENXIO;
 825}
 826
 827static inline int get_bus_width(struct tegra_dma_channel *tdc,
 828                enum dma_slave_buswidth slave_bw)
 829{
 830        switch (slave_bw) {
 831        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 832                return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
 833        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 834                return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
 835        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 836                return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
 837        case DMA_SLAVE_BUSWIDTH_8_BYTES:
 838                return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
 839        default:
 840                dev_warn(tdc2dev(tdc),
 841                        "slave bw is not supported, using 32bits\n");
 842                return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
 843        }
 844}
 845
 846static inline int get_burst_size(struct tegra_dma_channel *tdc,
 847        u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
 848{
 849        int burst_byte;
 850        int burst_ahb_width;
 851
 852        /*
 853         * burst_size from client is in terms of the bus_width.
 854         * convert them into AHB memory width which is 4 byte.
 855         */
 856        burst_byte = burst_size * slave_bw;
 857        burst_ahb_width = burst_byte / 4;
 858
 859        /* If burst size is 0 then calculate the burst size based on length */
 860        if (!burst_ahb_width) {
 861                if (len & 0xF)
 862                        return TEGRA_APBDMA_AHBSEQ_BURST_1;
 863                else if ((len >> 4) & 0x1)
 864                        return TEGRA_APBDMA_AHBSEQ_BURST_4;
 865                else
 866                        return TEGRA_APBDMA_AHBSEQ_BURST_8;
 867        }
 868        if (burst_ahb_width < 4)
 869                return TEGRA_APBDMA_AHBSEQ_BURST_1;
 870        else if (burst_ahb_width < 8)
 871                return TEGRA_APBDMA_AHBSEQ_BURST_4;
 872        else
 873                return TEGRA_APBDMA_AHBSEQ_BURST_8;
 874}
 875
 876static int get_transfer_param(struct tegra_dma_channel *tdc,
 877        enum dma_transfer_direction direction, unsigned long *apb_addr,
 878        unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
 879        enum dma_slave_buswidth *slave_bw)
 880{
 881
 882        switch (direction) {
 883        case DMA_MEM_TO_DEV:
 884                *apb_addr = tdc->dma_sconfig.dst_addr;
 885                *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
 886                *burst_size = tdc->dma_sconfig.dst_maxburst;
 887                *slave_bw = tdc->dma_sconfig.dst_addr_width;
 888                *csr = TEGRA_APBDMA_CSR_DIR;
 889                return 0;
 890
 891        case DMA_DEV_TO_MEM:
 892                *apb_addr = tdc->dma_sconfig.src_addr;
 893                *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
 894                *burst_size = tdc->dma_sconfig.src_maxburst;
 895                *slave_bw = tdc->dma_sconfig.src_addr_width;
 896                *csr = 0;
 897                return 0;
 898
 899        default:
 900                dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
 901                return -EINVAL;
 902        }
 903        return -EINVAL;
 904}
 905
 906static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 907        struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
 908        enum dma_transfer_direction direction, unsigned long flags,
 909        void *context)
 910{
 911        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
 912        struct tegra_dma_desc *dma_desc;
 913        unsigned int        i;
 914        struct scatterlist      *sg;
 915        unsigned long csr, ahb_seq, apb_ptr, apb_seq;
 916        struct list_head req_list;
 917        struct tegra_dma_sg_req  *sg_req = NULL;
 918        u32 burst_size;
 919        enum dma_slave_buswidth slave_bw;
 920        int ret;
 921
 922        if (!tdc->config_init) {
 923                dev_err(tdc2dev(tdc), "dma channel is not configured\n");
 924                return NULL;
 925        }
 926        if (sg_len < 1) {
 927                dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
 928                return NULL;
 929        }
 930
 931        ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
 932                                &burst_size, &slave_bw);
 933        if (ret < 0)
 934                return NULL;
 935
 936        INIT_LIST_HEAD(&req_list);
 937
 938        ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
 939        ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
 940                                        TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
 941        ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
 942
 943        csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
 944        csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
 945        if (flags & DMA_PREP_INTERRUPT)
 946                csr |= TEGRA_APBDMA_CSR_IE_EOC;
 947
 948        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
 949
 950        dma_desc = tegra_dma_desc_get(tdc);
 951        if (!dma_desc) {
 952                dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
 953                return NULL;
 954        }
 955        INIT_LIST_HEAD(&dma_desc->tx_list);
 956        INIT_LIST_HEAD(&dma_desc->cb_node);
 957        dma_desc->cb_count = 0;
 958        dma_desc->bytes_requested = 0;
 959        dma_desc->bytes_transferred = 0;
 960        dma_desc->dma_status = DMA_IN_PROGRESS;
 961
 962        /* Make transfer requests */
 963        for_each_sg(sgl, sg, sg_len, i) {
 964                u32 len, mem;
 965
 966                mem = sg_dma_address(sg);
 967                len = sg_dma_len(sg);
 968
 969                if ((len & 3) || (mem & 3) ||
 970                                (len > tdc->tdma->chip_data->max_dma_count)) {
 971                        dev_err(tdc2dev(tdc),
 972                                "Dma length/memory address is not supported\n");
 973                        tegra_dma_desc_put(tdc, dma_desc);
 974                        return NULL;
 975                }
 976
 977                sg_req = tegra_dma_sg_req_get(tdc);
 978                if (!sg_req) {
 979                        dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
 980                        tegra_dma_desc_put(tdc, dma_desc);
 981                        return NULL;
 982                }
 983
 984                ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
 985                dma_desc->bytes_requested += len;
 986
 987                sg_req->ch_regs.apb_ptr = apb_ptr;
 988                sg_req->ch_regs.ahb_ptr = mem;
 989                sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
 990                sg_req->ch_regs.apb_seq = apb_seq;
 991                sg_req->ch_regs.ahb_seq = ahb_seq;
 992                sg_req->configured = false;
 993                sg_req->last_sg = false;
 994                sg_req->dma_desc = dma_desc;
 995                sg_req->req_len = len;
 996
 997                list_add_tail(&sg_req->node, &dma_desc->tx_list);
 998        }
 999        sg_req->last_sg = true;
1000        if (flags & DMA_CTRL_ACK)
1001                dma_desc->txd.flags = DMA_CTRL_ACK;
1002
1003        /*
1004         * Make sure that mode should not be conflicting with currently
1005         * configured mode.
1006         */
1007        if (!tdc->isr_handler) {
1008                tdc->isr_handler = handle_once_dma_done;
1009                tdc->cyclic = false;
1010        } else {
1011                if (tdc->cyclic) {
1012                        dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1013                        tegra_dma_desc_put(tdc, dma_desc);
1014                        return NULL;
1015                }
1016        }
1017
1018        return &dma_desc->txd;
1019}
1020
1021struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1022        struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1023        size_t period_len, enum dma_transfer_direction direction,
1024        unsigned long flags, void *context)
1025{
1026        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1027        struct tegra_dma_desc *dma_desc = NULL;
1028        struct tegra_dma_sg_req  *sg_req = NULL;
1029        unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1030        int len;
1031        size_t remain_len;
1032        dma_addr_t mem = buf_addr;
1033        u32 burst_size;
1034        enum dma_slave_buswidth slave_bw;
1035        int ret;
1036
1037        if (!buf_len || !period_len) {
1038                dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1039                return NULL;
1040        }
1041
1042        if (!tdc->config_init) {
1043                dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1044                return NULL;
1045        }
1046
1047        /*
1048         * We allow to take more number of requests till DMA is
1049         * not started. The driver will loop over all requests.
1050         * Once DMA is started then new requests can be queued only after
1051         * terminating the DMA.
1052         */
1053        if (tdc->busy) {
1054                dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
1055                return NULL;
1056        }
1057
1058        /*
1059         * We only support cycle transfer when buf_len is multiple of
1060         * period_len.
1061         */
1062        if (buf_len % period_len) {
1063                dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1064                return NULL;
1065        }
1066
1067        len = period_len;
1068        if ((len & 3) || (buf_addr & 3) ||
1069                        (len > tdc->tdma->chip_data->max_dma_count)) {
1070                dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1071                return NULL;
1072        }
1073
1074        ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1075                                &burst_size, &slave_bw);
1076        if (ret < 0)
1077                return NULL;
1078
1079
1080        ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1081        ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1082                                        TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1083        ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1084
1085        csr |= TEGRA_APBDMA_CSR_FLOW;
1086        if (flags & DMA_PREP_INTERRUPT)
1087                csr |= TEGRA_APBDMA_CSR_IE_EOC;
1088        csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1089
1090        apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1091
1092        dma_desc = tegra_dma_desc_get(tdc);
1093        if (!dma_desc) {
1094                dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1095                return NULL;
1096        }
1097
1098        INIT_LIST_HEAD(&dma_desc->tx_list);
1099        INIT_LIST_HEAD(&dma_desc->cb_node);
1100        dma_desc->cb_count = 0;
1101
1102        dma_desc->bytes_transferred = 0;
1103        dma_desc->bytes_requested = buf_len;
1104        remain_len = buf_len;
1105
1106        /* Split transfer equal to period size */
1107        while (remain_len) {
1108                sg_req = tegra_dma_sg_req_get(tdc);
1109                if (!sg_req) {
1110                        dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
1111                        tegra_dma_desc_put(tdc, dma_desc);
1112                        return NULL;
1113                }
1114
1115                ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1116                sg_req->ch_regs.apb_ptr = apb_ptr;
1117                sg_req->ch_regs.ahb_ptr = mem;
1118                sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
1119                sg_req->ch_regs.apb_seq = apb_seq;
1120                sg_req->ch_regs.ahb_seq = ahb_seq;
1121                sg_req->configured = false;
1122                sg_req->half_done = false;
1123                sg_req->last_sg = false;
1124                sg_req->dma_desc = dma_desc;
1125                sg_req->req_len = len;
1126
1127                list_add_tail(&sg_req->node, &dma_desc->tx_list);
1128                remain_len -= len;
1129                mem += len;
1130        }
1131        sg_req->last_sg = true;
1132        if (flags & DMA_CTRL_ACK)
1133                dma_desc->txd.flags = DMA_CTRL_ACK;
1134
1135        /*
1136         * Make sure that mode should not be conflicting with currently
1137         * configured mode.
1138         */
1139        if (!tdc->isr_handler) {
1140                tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1141                tdc->cyclic = true;
1142        } else {
1143                if (!tdc->cyclic) {
1144                        dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1145                        tegra_dma_desc_put(tdc, dma_desc);
1146                        return NULL;
1147                }
1148        }
1149
1150        return &dma_desc->txd;
1151}
1152
1153static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1154{
1155        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1156        struct tegra_dma *tdma = tdc->tdma;
1157        int ret;
1158
1159        dma_cookie_init(&tdc->dma_chan);
1160        tdc->config_init = false;
1161        ret = clk_prepare_enable(tdma->dma_clk);
1162        if (ret < 0)
1163                dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret);
1164        return ret;
1165}
1166
1167static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1168{
1169        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1170        struct tegra_dma *tdma = tdc->tdma;
1171
1172        struct tegra_dma_desc *dma_desc;
1173        struct tegra_dma_sg_req *sg_req;
1174        struct list_head dma_desc_list;
1175        struct list_head sg_req_list;
1176        unsigned long flags;
1177
1178        INIT_LIST_HEAD(&dma_desc_list);
1179        INIT_LIST_HEAD(&sg_req_list);
1180
1181        dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1182
1183        if (tdc->busy)
1184                tegra_dma_terminate_all(dc);
1185
1186        spin_lock_irqsave(&tdc->lock, flags);
1187        list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1188        list_splice_init(&tdc->free_sg_req, &sg_req_list);
1189        list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1190        INIT_LIST_HEAD(&tdc->cb_desc);
1191        tdc->config_init = false;
1192        spin_unlock_irqrestore(&tdc->lock, flags);
1193
1194        while (!list_empty(&dma_desc_list)) {
1195                dma_desc = list_first_entry(&dma_desc_list,
1196                                        typeof(*dma_desc), node);
1197                list_del(&dma_desc->node);
1198                kfree(dma_desc);
1199        }
1200
1201        while (!list_empty(&sg_req_list)) {
1202                sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1203                list_del(&sg_req->node);
1204                kfree(sg_req);
1205        }
1206        clk_disable_unprepare(tdma->dma_clk);
1207}
1208
1209/* Tegra20 specific DMA controller information */
1210static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1211        .nr_channels            = 16,
1212        .max_dma_count          = 1024UL * 64,
1213        .support_channel_pause  = false,
1214};
1215
1216#if defined(CONFIG_OF)
1217/* Tegra30 specific DMA controller information */
1218static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1219        .nr_channels            = 32,
1220        .max_dma_count          = 1024UL * 64,
1221        .support_channel_pause  = false,
1222};
1223
1224/* Tegra114 specific DMA controller information */
1225static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1226        .nr_channels            = 32,
1227        .max_dma_count          = 1024UL * 64,
1228        .support_channel_pause  = true,
1229};
1230
1231
1232static const struct of_device_id tegra_dma_of_match[] = {
1233        {
1234                .compatible = "nvidia,tegra114-apbdma",
1235                .data = &tegra114_dma_chip_data,
1236        }, {
1237                .compatible = "nvidia,tegra30-apbdma",
1238                .data = &tegra30_dma_chip_data,
1239        }, {
1240                .compatible = "nvidia,tegra20-apbdma",
1241                .data = &tegra20_dma_chip_data,
1242        }, {
1243        },
1244};
1245MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1246#endif
1247
1248static int tegra_dma_probe(struct platform_device *pdev)
1249{
1250        struct resource *res;
1251        struct tegra_dma *tdma;
1252        int ret;
1253        int i;
1254        const struct tegra_dma_chip_data *cdata = NULL;
1255
1256        if (pdev->dev.of_node) {
1257                const struct of_device_id *match;
1258                match = of_match_device(of_match_ptr(tegra_dma_of_match),
1259                                        &pdev->dev);
1260                if (!match) {
1261                        dev_err(&pdev->dev, "Error: No device match found\n");
1262                        return -ENODEV;
1263                }
1264                cdata = match->data;
1265        } else {
1266                /* If no device tree then fallback to tegra20 */
1267                cdata = &tegra20_dma_chip_data;
1268        }
1269
1270        tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
1271                        sizeof(struct tegra_dma_channel), GFP_KERNEL);
1272        if (!tdma) {
1273                dev_err(&pdev->dev, "Error: memory allocation failed\n");
1274                return -ENOMEM;
1275        }
1276
1277        tdma->dev = &pdev->dev;
1278        tdma->chip_data = cdata;
1279        platform_set_drvdata(pdev, tdma);
1280
1281        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1282        if (!res) {
1283                dev_err(&pdev->dev, "No mem resource for DMA\n");
1284                return -EINVAL;
1285        }
1286
1287        tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1288        if (IS_ERR(tdma->base_addr))
1289                return PTR_ERR(tdma->base_addr);
1290
1291        tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1292        if (IS_ERR(tdma->dma_clk)) {
1293                dev_err(&pdev->dev, "Error: Missing controller clock\n");
1294                return PTR_ERR(tdma->dma_clk);
1295        }
1296
1297        spin_lock_init(&tdma->global_lock);
1298
1299        pm_runtime_enable(&pdev->dev);
1300        if (!pm_runtime_enabled(&pdev->dev)) {
1301                ret = tegra_dma_runtime_resume(&pdev->dev);
1302                if (ret) {
1303                        dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
1304                                ret);
1305                        goto err_pm_disable;
1306                }
1307        }
1308
1309        /* Enable clock before accessing registers */
1310        ret = clk_prepare_enable(tdma->dma_clk);
1311        if (ret < 0) {
1312                dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1313                goto err_pm_disable;
1314        }
1315
1316        /* Reset DMA controller */
1317        tegra_periph_reset_assert(tdma->dma_clk);
1318        udelay(2);
1319        tegra_periph_reset_deassert(tdma->dma_clk);
1320
1321        /* Enable global DMA registers */
1322        tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1323        tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1324        tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1325
1326        clk_disable_unprepare(tdma->dma_clk);
1327
1328        INIT_LIST_HEAD(&tdma->dma_dev.channels);
1329        for (i = 0; i < cdata->nr_channels; i++) {
1330                struct tegra_dma_channel *tdc = &tdma->channels[i];
1331
1332                tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1333                                        i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
1334
1335                res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1336                if (!res) {
1337                        ret = -EINVAL;
1338                        dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1339                        goto err_irq;
1340                }
1341                tdc->irq = res->start;
1342                snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1343                ret = devm_request_irq(&pdev->dev, tdc->irq,
1344                                tegra_dma_isr, 0, tdc->name, tdc);
1345                if (ret) {
1346                        dev_err(&pdev->dev,
1347                                "request_irq failed with err %d channel %d\n",
1348                                i, ret);
1349                        goto err_irq;
1350                }
1351
1352                tdc->dma_chan.device = &tdma->dma_dev;
1353                dma_cookie_init(&tdc->dma_chan);
1354                list_add_tail(&tdc->dma_chan.device_node,
1355                                &tdma->dma_dev.channels);
1356                tdc->tdma = tdma;
1357                tdc->id = i;
1358
1359                tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1360                                (unsigned long)tdc);
1361                spin_lock_init(&tdc->lock);
1362
1363                INIT_LIST_HEAD(&tdc->pending_sg_req);
1364                INIT_LIST_HEAD(&tdc->free_sg_req);
1365                INIT_LIST_HEAD(&tdc->free_dma_desc);
1366                INIT_LIST_HEAD(&tdc->cb_desc);
1367        }
1368
1369        dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1370        dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1371        dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1372
1373        tdma->dma_dev.dev = &pdev->dev;
1374        tdma->dma_dev.device_alloc_chan_resources =
1375                                        tegra_dma_alloc_chan_resources;
1376        tdma->dma_dev.device_free_chan_resources =
1377                                        tegra_dma_free_chan_resources;
1378        tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1379        tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1380        tdma->dma_dev.device_control = tegra_dma_device_control;
1381        tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1382        tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1383
1384        ret = dma_async_device_register(&tdma->dma_dev);
1385        if (ret < 0) {
1386                dev_err(&pdev->dev,
1387                        "Tegra20 APB DMA driver registration failed %d\n", ret);
1388                goto err_irq;
1389        }
1390
1391        dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1392                        cdata->nr_channels);
1393        return 0;
1394
1395err_irq:
1396        while (--i >= 0) {
1397                struct tegra_dma_channel *tdc = &tdma->channels[i];
1398                tasklet_kill(&tdc->tasklet);
1399        }
1400
1401err_pm_disable:
1402        pm_runtime_disable(&pdev->dev);
1403        if (!pm_runtime_status_suspended(&pdev->dev))
1404                tegra_dma_runtime_suspend(&pdev->dev);
1405        return ret;
1406}
1407
1408static int tegra_dma_remove(struct platform_device *pdev)
1409{
1410        struct tegra_dma *tdma = platform_get_drvdata(pdev);
1411        int i;
1412        struct tegra_dma_channel *tdc;
1413
1414        dma_async_device_unregister(&tdma->dma_dev);
1415
1416        for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1417                tdc = &tdma->channels[i];
1418                tasklet_kill(&tdc->tasklet);
1419        }
1420
1421        pm_runtime_disable(&pdev->dev);
1422        if (!pm_runtime_status_suspended(&pdev->dev))
1423                tegra_dma_runtime_suspend(&pdev->dev);
1424
1425        return 0;
1426}
1427
1428static int tegra_dma_runtime_suspend(struct device *dev)
1429{
1430        struct platform_device *pdev = to_platform_device(dev);
1431        struct tegra_dma *tdma = platform_get_drvdata(pdev);
1432
1433        clk_disable_unprepare(tdma->dma_clk);
1434        return 0;
1435}
1436
1437static int tegra_dma_runtime_resume(struct device *dev)
1438{
1439        struct platform_device *pdev = to_platform_device(dev);
1440        struct tegra_dma *tdma = platform_get_drvdata(pdev);
1441        int ret;
1442
1443        ret = clk_prepare_enable(tdma->dma_clk);
1444        if (ret < 0) {
1445                dev_err(dev, "clk_enable failed: %d\n", ret);
1446                return ret;
1447        }
1448        return 0;
1449}
1450
1451static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1452#ifdef CONFIG_PM_RUNTIME
1453        .runtime_suspend = tegra_dma_runtime_suspend,
1454        .runtime_resume = tegra_dma_runtime_resume,
1455#endif
1456};
1457
1458static struct platform_driver tegra_dmac_driver = {
1459        .driver = {
1460                .name   = "tegra-apbdma",
1461                .owner = THIS_MODULE,
1462                .pm     = &tegra_dma_dev_pm_ops,
1463                .of_match_table = of_match_ptr(tegra_dma_of_match),
1464        },
1465        .probe          = tegra_dma_probe,
1466        .remove         = tegra_dma_remove,
1467};
1468
1469module_platform_driver(tegra_dmac_driver);
1470
1471MODULE_ALIAS("platform:tegra20-apbdma");
1472MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1473MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1474MODULE_LICENSE("GPL v2");
1475
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.