linux/drivers/dma/xilinx/xilinx_vdma.c
<<
>>
Prefs
   1/*
   2 * DMA driver for Xilinx Video DMA Engine
   3 *
   4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
   5 *
   6 * Based on the Freescale DMA driver.
   7 *
   8 * Description:
   9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
  10 * core that provides high-bandwidth direct memory access between memory
  11 * and AXI4-Stream type video target peripherals. The core provides efficient
  12 * two dimensional DMA operations with independent asynchronous read (S2MM)
  13 * and write (MM2S) channel operation. It can be configured to have either
  14 * one channel or two channels. If configured as two channels, one is to
  15 * transmit to the video device (MM2S) and another is to receive from the
  16 * video device (S2MM). Initialization, status, interrupt and management
  17 * registers are accessed through an AXI4-Lite slave interface.
  18 *
  19 * This program is free software: you can redistribute it and/or modify
  20 * it under the terms of the GNU General Public License as published by
  21 * the Free Software Foundation, either version 2 of the License, or
  22 * (at your option) any later version.
  23 */
  24
  25#include <linux/amba/xilinx_dma.h>
  26#include <linux/bitops.h>
  27#include <linux/dmapool.h>
  28#include <linux/init.h>
  29#include <linux/interrupt.h>
  30#include <linux/io.h>
  31#include <linux/module.h>
  32#include <linux/of_address.h>
  33#include <linux/of_dma.h>
  34#include <linux/of_platform.h>
  35#include <linux/of_irq.h>
  36#include <linux/slab.h>
  37
  38#include "../dmaengine.h"
  39
  40/* Register/Descriptor Offsets */
  41#define XILINX_VDMA_MM2S_CTRL_OFFSET            0x0000
  42#define XILINX_VDMA_S2MM_CTRL_OFFSET            0x0030
  43#define XILINX_VDMA_MM2S_DESC_OFFSET            0x0050
  44#define XILINX_VDMA_S2MM_DESC_OFFSET            0x00a0
  45
  46/* Control Registers */
  47#define XILINX_VDMA_REG_DMACR                   0x0000
  48#define XILINX_VDMA_DMACR_DELAY_MAX             0xff
  49#define XILINX_VDMA_DMACR_DELAY_SHIFT           24
  50#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX       0xff
  51#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT     16
  52#define XILINX_VDMA_DMACR_ERR_IRQ               BIT(14)
  53#define XILINX_VDMA_DMACR_DLY_CNT_IRQ           BIT(13)
  54#define XILINX_VDMA_DMACR_FRM_CNT_IRQ           BIT(12)
  55#define XILINX_VDMA_DMACR_MASTER_SHIFT          8
  56#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT        5
  57#define XILINX_VDMA_DMACR_FRAMECNT_EN           BIT(4)
  58#define XILINX_VDMA_DMACR_GENLOCK_EN            BIT(3)
  59#define XILINX_VDMA_DMACR_RESET                 BIT(2)
  60#define XILINX_VDMA_DMACR_CIRC_EN               BIT(1)
  61#define XILINX_VDMA_DMACR_RUNSTOP               BIT(0)
  62#define XILINX_VDMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
  63
  64#define XILINX_VDMA_REG_DMASR                   0x0004
  65#define XILINX_VDMA_DMASR_EOL_LATE_ERR          BIT(15)
  66#define XILINX_VDMA_DMASR_ERR_IRQ               BIT(14)
  67#define XILINX_VDMA_DMASR_DLY_CNT_IRQ           BIT(13)
  68#define XILINX_VDMA_DMASR_FRM_CNT_IRQ           BIT(12)
  69#define XILINX_VDMA_DMASR_SOF_LATE_ERR          BIT(11)
  70#define XILINX_VDMA_DMASR_SG_DEC_ERR            BIT(10)
  71#define XILINX_VDMA_DMASR_SG_SLV_ERR            BIT(9)
  72#define XILINX_VDMA_DMASR_EOF_EARLY_ERR         BIT(8)
  73#define XILINX_VDMA_DMASR_SOF_EARLY_ERR         BIT(7)
  74#define XILINX_VDMA_DMASR_DMA_DEC_ERR           BIT(6)
  75#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR         BIT(5)
  76#define XILINX_VDMA_DMASR_DMA_INT_ERR           BIT(4)
  77#define XILINX_VDMA_DMASR_IDLE                  BIT(1)
  78#define XILINX_VDMA_DMASR_HALTED                BIT(0)
  79#define XILINX_VDMA_DMASR_DELAY_MASK            GENMASK(31, 24)
  80#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK      GENMASK(23, 16)
  81
  82#define XILINX_VDMA_REG_CURDESC                 0x0008
  83#define XILINX_VDMA_REG_TAILDESC                0x0010
  84#define XILINX_VDMA_REG_REG_INDEX               0x0014
  85#define XILINX_VDMA_REG_FRMSTORE                0x0018
  86#define XILINX_VDMA_REG_THRESHOLD               0x001c
  87#define XILINX_VDMA_REG_FRMPTR_STS              0x0024
  88#define XILINX_VDMA_REG_PARK_PTR                0x0028
  89#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT       8
  90#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT       0
  91#define XILINX_VDMA_REG_VDMA_VERSION            0x002c
  92
  93/* Register Direct Mode Registers */
  94#define XILINX_VDMA_REG_VSIZE                   0x0000
  95#define XILINX_VDMA_REG_HSIZE                   0x0004
  96
  97#define XILINX_VDMA_REG_FRMDLY_STRIDE           0x0008
  98#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT  24
  99#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT  0
 100
 101#define XILINX_VDMA_REG_START_ADDRESS(n)        (0x000c + 4 * (n))
 102
 103/* HW specific definitions */
 104#define XILINX_VDMA_MAX_CHANS_PER_DEVICE        0x2
 105
 106#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK  \
 107                (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \
 108                 XILINX_VDMA_DMASR_DLY_CNT_IRQ | \
 109                 XILINX_VDMA_DMASR_ERR_IRQ)
 110
 111#define XILINX_VDMA_DMASR_ALL_ERR_MASK  \
 112                (XILINX_VDMA_DMASR_EOL_LATE_ERR | \
 113                 XILINX_VDMA_DMASR_SOF_LATE_ERR | \
 114                 XILINX_VDMA_DMASR_SG_DEC_ERR | \
 115                 XILINX_VDMA_DMASR_SG_SLV_ERR | \
 116                 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
 117                 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
 118                 XILINX_VDMA_DMASR_DMA_DEC_ERR | \
 119                 XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \
 120                 XILINX_VDMA_DMASR_DMA_INT_ERR)
 121
 122/*
 123 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
 124 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
 125 * is enabled in the h/w system.
 126 */
 127#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK      \
 128                (XILINX_VDMA_DMASR_SOF_LATE_ERR | \
 129                 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \
 130                 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \
 131                 XILINX_VDMA_DMASR_DMA_INT_ERR)
 132
 133/* Axi VDMA Flush on Fsync bits */
 134#define XILINX_VDMA_FLUSH_S2MM          3
 135#define XILINX_VDMA_FLUSH_MM2S          2
 136#define XILINX_VDMA_FLUSH_BOTH          1
 137
 138/* Delay loop counter to prevent hardware failure */
 139#define XILINX_VDMA_LOOP_COUNT          1000000
 140
 141/**
 142 * struct xilinx_vdma_desc_hw - Hardware Descriptor
 143 * @next_desc: Next Descriptor Pointer @0x00
 144 * @pad1: Reserved @0x04
 145 * @buf_addr: Buffer address @0x08
 146 * @pad2: Reserved @0x0C
 147 * @vsize: Vertical Size @0x10
 148 * @hsize: Horizontal Size @0x14
 149 * @stride: Number of bytes between the first
 150 *          pixels of each horizontal line @0x18
 151 */
 152struct xilinx_vdma_desc_hw {
 153        u32 next_desc;
 154        u32 pad1;
 155        u32 buf_addr;
 156        u32 pad2;
 157        u32 vsize;
 158        u32 hsize;
 159        u32 stride;
 160} __aligned(64);
 161
 162/**
 163 * struct xilinx_vdma_tx_segment - Descriptor segment
 164 * @hw: Hardware descriptor
 165 * @node: Node in the descriptor segments list
 166 * @phys: Physical address of segment
 167 */
 168struct xilinx_vdma_tx_segment {
 169        struct xilinx_vdma_desc_hw hw;
 170        struct list_head node;
 171        dma_addr_t phys;
 172} __aligned(64);
 173
 174/**
 175 * struct xilinx_vdma_tx_descriptor - Per Transaction structure
 176 * @async_tx: Async transaction descriptor
 177 * @segments: TX segments list
 178 * @node: Node in the channel descriptors list
 179 */
 180struct xilinx_vdma_tx_descriptor {
 181        struct dma_async_tx_descriptor async_tx;
 182        struct list_head segments;
 183        struct list_head node;
 184};
 185
 186/**
 187 * struct xilinx_vdma_chan - Driver specific VDMA channel structure
 188 * @xdev: Driver specific device structure
 189 * @ctrl_offset: Control registers offset
 190 * @desc_offset: TX descriptor registers offset
 191 * @lock: Descriptor operation lock
 192 * @pending_list: Descriptors waiting
 193 * @active_desc: Active descriptor
 194 * @allocated_desc: Allocated descriptor
 195 * @done_list: Complete descriptors
 196 * @common: DMA common channel
 197 * @desc_pool: Descriptors pool
 198 * @dev: The dma device
 199 * @irq: Channel IRQ
 200 * @id: Channel ID
 201 * @direction: Transfer direction
 202 * @num_frms: Number of frames
 203 * @has_sg: Support scatter transfers
 204 * @genlock: Support genlock mode
 205 * @err: Channel has errors
 206 * @tasklet: Cleanup work after irq
 207 * @config: Device configuration info
 208 * @flush_on_fsync: Flush on Frame sync
 209 */
 210struct xilinx_vdma_chan {
 211        struct xilinx_vdma_device *xdev;
 212        u32 ctrl_offset;
 213        u32 desc_offset;
 214        spinlock_t lock;
 215        struct list_head pending_list;
 216        struct xilinx_vdma_tx_descriptor *active_desc;
 217        struct xilinx_vdma_tx_descriptor *allocated_desc;
 218        struct list_head done_list;
 219        struct dma_chan common;
 220        struct dma_pool *desc_pool;
 221        struct device *dev;
 222        int irq;
 223        int id;
 224        enum dma_transfer_direction direction;
 225        int num_frms;
 226        bool has_sg;
 227        bool genlock;
 228        bool err;
 229        struct tasklet_struct tasklet;
 230        struct xilinx_vdma_config config;
 231        bool flush_on_fsync;
 232};
 233
 234/**
 235 * struct xilinx_vdma_device - VDMA device structure
 236 * @regs: I/O mapped base address
 237 * @dev: Device Structure
 238 * @common: DMA device structure
 239 * @chan: Driver specific VDMA channel
 240 * @has_sg: Specifies whether Scatter-Gather is present or not
 241 * @flush_on_fsync: Flush on frame sync
 242 */
 243struct xilinx_vdma_device {
 244        void __iomem *regs;
 245        struct device *dev;
 246        struct dma_device common;
 247        struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE];
 248        bool has_sg;
 249        u32 flush_on_fsync;
 250};
 251
 252/* Macros */
 253#define to_xilinx_chan(chan) \
 254        container_of(chan, struct xilinx_vdma_chan, common)
 255#define to_vdma_tx_descriptor(tx) \
 256        container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
 257
 258/* IO accessors */
 259static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg)
 260{
 261        return ioread32(chan->xdev->regs + reg);
 262}
 263
 264static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value)
 265{
 266        iowrite32(value, chan->xdev->regs + reg);
 267}
 268
 269static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg,
 270                                   u32 value)
 271{
 272        vdma_write(chan, chan->desc_offset + reg, value);
 273}
 274
 275static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg)
 276{
 277        return vdma_read(chan, chan->ctrl_offset + reg);
 278}
 279
 280static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg,
 281                                   u32 value)
 282{
 283        vdma_write(chan, chan->ctrl_offset + reg, value);
 284}
 285
 286static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg,
 287                                 u32 clr)
 288{
 289        vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr);
 290}
 291
 292static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
 293                                 u32 set)
 294{
 295        vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set);
 296}
 297
 298/* -----------------------------------------------------------------------------
 299 * Descriptors and segments alloc and free
 300 */
 301
 302/**
 303 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
 304 * @chan: Driver specific VDMA channel
 305 *
 306 * Return: The allocated segment on success and NULL on failure.
 307 */
 308static struct xilinx_vdma_tx_segment *
 309xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
 310{
 311        struct xilinx_vdma_tx_segment *segment;
 312        dma_addr_t phys;
 313
 314        segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
 315        if (!segment)
 316                return NULL;
 317
 318        memset(segment, 0, sizeof(*segment));
 319        segment->phys = phys;
 320
 321        return segment;
 322}
 323
 324/**
 325 * xilinx_vdma_free_tx_segment - Free transaction segment
 326 * @chan: Driver specific VDMA channel
 327 * @segment: VDMA transaction segment
 328 */
 329static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan,
 330                                        struct xilinx_vdma_tx_segment *segment)
 331{
 332        dma_pool_free(chan->desc_pool, segment, segment->phys);
 333}
 334
 335/**
 336 * xilinx_vdma_tx_descriptor - Allocate transaction descriptor
 337 * @chan: Driver specific VDMA channel
 338 *
 339 * Return: The allocated descriptor on success and NULL on failure.
 340 */
 341static struct xilinx_vdma_tx_descriptor *
 342xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
 343{
 344        struct xilinx_vdma_tx_descriptor *desc;
 345        unsigned long flags;
 346
 347        if (chan->allocated_desc)
 348                return chan->allocated_desc;
 349
 350        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 351        if (!desc)
 352                return NULL;
 353
 354        spin_lock_irqsave(&chan->lock, flags);
 355        chan->allocated_desc = desc;
 356        spin_unlock_irqrestore(&chan->lock, flags);
 357
 358        INIT_LIST_HEAD(&desc->segments);
 359
 360        return desc;
 361}
 362
 363/**
 364 * xilinx_vdma_free_tx_descriptor - Free transaction descriptor
 365 * @chan: Driver specific VDMA channel
 366 * @desc: VDMA transaction descriptor
 367 */
 368static void
 369xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
 370                               struct xilinx_vdma_tx_descriptor *desc)
 371{
 372        struct xilinx_vdma_tx_segment *segment, *next;
 373
 374        if (!desc)
 375                return;
 376
 377        list_for_each_entry_safe(segment, next, &desc->segments, node) {
 378                list_del(&segment->node);
 379                xilinx_vdma_free_tx_segment(chan, segment);
 380        }
 381
 382        kfree(desc);
 383}
 384
 385/* Required functions */
 386
 387/**
 388 * xilinx_vdma_free_desc_list - Free descriptors list
 389 * @chan: Driver specific VDMA channel
 390 * @list: List to parse and delete the descriptor
 391 */
 392static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan,
 393                                        struct list_head *list)
 394{
 395        struct xilinx_vdma_tx_descriptor *desc, *next;
 396
 397        list_for_each_entry_safe(desc, next, list, node) {
 398                list_del(&desc->node);
 399                xilinx_vdma_free_tx_descriptor(chan, desc);
 400        }
 401}
 402
 403/**
 404 * xilinx_vdma_free_descriptors - Free channel descriptors
 405 * @chan: Driver specific VDMA channel
 406 */
 407static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
 408{
 409        unsigned long flags;
 410
 411        spin_lock_irqsave(&chan->lock, flags);
 412
 413        xilinx_vdma_free_desc_list(chan, &chan->pending_list);
 414        xilinx_vdma_free_desc_list(chan, &chan->done_list);
 415
 416        xilinx_vdma_free_tx_descriptor(chan, chan->active_desc);
 417        chan->active_desc = NULL;
 418
 419        spin_unlock_irqrestore(&chan->lock, flags);
 420}
 421
 422/**
 423 * xilinx_vdma_free_chan_resources - Free channel resources
 424 * @dchan: DMA channel
 425 */
 426static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan)
 427{
 428        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
 429
 430        dev_dbg(chan->dev, "Free all channel resources.\n");
 431
 432        xilinx_vdma_free_descriptors(chan);
 433        dma_pool_destroy(chan->desc_pool);
 434        chan->desc_pool = NULL;
 435}
 436
 437/**
 438 * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors
 439 * @chan: Driver specific VDMA channel
 440 */
 441static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
 442{
 443        struct xilinx_vdma_tx_descriptor *desc, *next;
 444        unsigned long flags;
 445
 446        spin_lock_irqsave(&chan->lock, flags);
 447
 448        list_for_each_entry_safe(desc, next, &chan->done_list, node) {
 449                dma_async_tx_callback callback;
 450                void *callback_param;
 451
 452                /* Remove from the list of running transactions */
 453                list_del(&desc->node);
 454
 455                /* Run the link descriptor callback function */
 456                callback = desc->async_tx.callback;
 457                callback_param = desc->async_tx.callback_param;
 458                if (callback) {
 459                        spin_unlock_irqrestore(&chan->lock, flags);
 460                        callback(callback_param);
 461                        spin_lock_irqsave(&chan->lock, flags);
 462                }
 463
 464                /* Run any dependencies, then free the descriptor */
 465                dma_run_dependencies(&desc->async_tx);
 466                xilinx_vdma_free_tx_descriptor(chan, desc);
 467        }
 468
 469        spin_unlock_irqrestore(&chan->lock, flags);
 470}
 471
 472/**
 473 * xilinx_vdma_do_tasklet - Schedule completion tasklet
 474 * @data: Pointer to the Xilinx VDMA channel structure
 475 */
 476static void xilinx_vdma_do_tasklet(unsigned long data)
 477{
 478        struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data;
 479
 480        xilinx_vdma_chan_desc_cleanup(chan);
 481}
 482
 483/**
 484 * xilinx_vdma_alloc_chan_resources - Allocate channel resources
 485 * @dchan: DMA channel
 486 *
 487 * Return: '0' on success and failure value on error
 488 */
 489static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
 490{
 491        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
 492
 493        /* Has this channel already been allocated? */
 494        if (chan->desc_pool)
 495                return 0;
 496
 497        /*
 498         * We need the descriptor to be aligned to 64bytes
 499         * for meeting Xilinx VDMA specification requirement.
 500         */
 501        chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
 502                                chan->dev,
 503                                sizeof(struct xilinx_vdma_tx_segment),
 504                                __alignof__(struct xilinx_vdma_tx_segment), 0);
 505        if (!chan->desc_pool) {
 506                dev_err(chan->dev,
 507                        "unable to allocate channel %d descriptor pool\n",
 508                        chan->id);
 509                return -ENOMEM;
 510        }
 511
 512        dma_cookie_init(dchan);
 513        return 0;
 514}
 515
 516/**
 517 * xilinx_vdma_tx_status - Get VDMA transaction status
 518 * @dchan: DMA channel
 519 * @cookie: Transaction identifier
 520 * @txstate: Transaction state
 521 *
 522 * Return: DMA transaction status
 523 */
 524static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan,
 525                                        dma_cookie_t cookie,
 526                                        struct dma_tx_state *txstate)
 527{
 528        return dma_cookie_status(dchan, cookie, txstate);
 529}
 530
 531/**
 532 * xilinx_vdma_is_running - Check if VDMA channel is running
 533 * @chan: Driver specific VDMA channel
 534 *
 535 * Return: '1' if running, '0' if not.
 536 */
 537static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan)
 538{
 539        return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
 540                 XILINX_VDMA_DMASR_HALTED) &&
 541                (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
 542                 XILINX_VDMA_DMACR_RUNSTOP);
 543}
 544
 545/**
 546 * xilinx_vdma_is_idle - Check if VDMA channel is idle
 547 * @chan: Driver specific VDMA channel
 548 *
 549 * Return: '1' if idle, '0' if not.
 550 */
 551static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
 552{
 553        return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
 554                XILINX_VDMA_DMASR_IDLE;
 555}
 556
 557/**
 558 * xilinx_vdma_halt - Halt VDMA channel
 559 * @chan: Driver specific VDMA channel
 560 */
 561static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
 562{
 563        int loop = XILINX_VDMA_LOOP_COUNT;
 564
 565        vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 566
 567        /* Wait for the hardware to halt */
 568        do {
 569                if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
 570                    XILINX_VDMA_DMASR_HALTED)
 571                        break;
 572        } while (loop--);
 573
 574        if (!loop) {
 575                dev_err(chan->dev, "Cannot stop channel %p: %x\n",
 576                        chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 577                chan->err = true;
 578        }
 579
 580        return;
 581}
 582
 583/**
 584 * xilinx_vdma_start - Start VDMA channel
 585 * @chan: Driver specific VDMA channel
 586 */
 587static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
 588{
 589        int loop = XILINX_VDMA_LOOP_COUNT;
 590
 591        vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP);
 592
 593        /* Wait for the hardware to start */
 594        do {
 595                if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) &
 596                      XILINX_VDMA_DMASR_HALTED))
 597                        break;
 598        } while (loop--);
 599
 600        if (!loop) {
 601                dev_err(chan->dev, "Cannot start channel %p: %x\n",
 602                        chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 603
 604                chan->err = true;
 605        }
 606
 607        return;
 608}
 609
 610/**
 611 * xilinx_vdma_start_transfer - Starts VDMA transfer
 612 * @chan: Driver specific channel struct pointer
 613 */
 614static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
 615{
 616        struct xilinx_vdma_config *config = &chan->config;
 617        struct xilinx_vdma_tx_descriptor *desc;
 618        unsigned long flags;
 619        u32 reg;
 620        struct xilinx_vdma_tx_segment *head, *tail = NULL;
 621
 622        if (chan->err)
 623                return;
 624
 625        spin_lock_irqsave(&chan->lock, flags);
 626
 627        /* There's already an active descriptor, bail out. */
 628        if (chan->active_desc)
 629                goto out_unlock;
 630
 631        if (list_empty(&chan->pending_list))
 632                goto out_unlock;
 633
 634        desc = list_first_entry(&chan->pending_list,
 635                                struct xilinx_vdma_tx_descriptor, node);
 636
 637        /* If it is SG mode and hardware is busy, cannot submit */
 638        if (chan->has_sg && xilinx_vdma_is_running(chan) &&
 639            !xilinx_vdma_is_idle(chan)) {
 640                dev_dbg(chan->dev, "DMA controller still busy\n");
 641                goto out_unlock;
 642        }
 643
 644        /*
 645         * If hardware is idle, then all descriptors on the running lists are
 646         * done, start new transfers
 647         */
 648        if (chan->has_sg) {
 649                head = list_first_entry(&desc->segments,
 650                                        struct xilinx_vdma_tx_segment, node);
 651                tail = list_entry(desc->segments.prev,
 652                                  struct xilinx_vdma_tx_segment, node);
 653
 654                vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys);
 655        }
 656
 657        /* Configure the hardware using info in the config structure */
 658        reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
 659
 660        if (config->frm_cnt_en)
 661                reg |= XILINX_VDMA_DMACR_FRAMECNT_EN;
 662        else
 663                reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN;
 664
 665        /*
 666         * With SG, start with circular mode, so that BDs can be fetched.
 667         * In direct register mode, if not parking, enable circular mode
 668         */
 669        if (chan->has_sg || !config->park)
 670                reg |= XILINX_VDMA_DMACR_CIRC_EN;
 671
 672        if (config->park)
 673                reg &= ~XILINX_VDMA_DMACR_CIRC_EN;
 674
 675        vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg);
 676
 677        if (config->park && (config->park_frm >= 0) &&
 678                        (config->park_frm < chan->num_frms)) {
 679                if (chan->direction == DMA_MEM_TO_DEV)
 680                        vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
 681                                config->park_frm <<
 682                                        XILINX_VDMA_PARK_PTR_RD_REF_SHIFT);
 683                else
 684                        vdma_write(chan, XILINX_VDMA_REG_PARK_PTR,
 685                                config->park_frm <<
 686                                        XILINX_VDMA_PARK_PTR_WR_REF_SHIFT);
 687        }
 688
 689        /* Start the hardware */
 690        xilinx_vdma_start(chan);
 691
 692        if (chan->err)
 693                goto out_unlock;
 694
 695        /* Start the transfer */
 696        if (chan->has_sg) {
 697                vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys);
 698        } else {
 699                struct xilinx_vdma_tx_segment *segment, *last = NULL;
 700                int i = 0;
 701
 702                list_for_each_entry(segment, &desc->segments, node) {
 703                        vdma_desc_write(chan,
 704                                        XILINX_VDMA_REG_START_ADDRESS(i++),
 705                                        segment->hw.buf_addr);
 706                        last = segment;
 707                }
 708
 709                if (!last)
 710                        goto out_unlock;
 711
 712                /* HW expects these parameters to be same for one transaction */
 713                vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize);
 714                vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE,
 715                                last->hw.stride);
 716                vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize);
 717        }
 718
 719        list_del(&desc->node);
 720        chan->active_desc = desc;
 721
 722out_unlock:
 723        spin_unlock_irqrestore(&chan->lock, flags);
 724}
 725
 726/**
 727 * xilinx_vdma_issue_pending - Issue pending transactions
 728 * @dchan: DMA channel
 729 */
 730static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
 731{
 732        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
 733
 734        xilinx_vdma_start_transfer(chan);
 735}
 736
 737/**
 738 * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete
 739 * @chan : xilinx DMA channel
 740 *
 741 * CONTEXT: hardirq
 742 */
 743static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
 744{
 745        struct xilinx_vdma_tx_descriptor *desc;
 746        unsigned long flags;
 747
 748        spin_lock_irqsave(&chan->lock, flags);
 749
 750        desc = chan->active_desc;
 751        if (!desc) {
 752                dev_dbg(chan->dev, "no running descriptors\n");
 753                goto out_unlock;
 754        }
 755
 756        dma_cookie_complete(&desc->async_tx);
 757        list_add_tail(&desc->node, &chan->done_list);
 758
 759        chan->active_desc = NULL;
 760
 761out_unlock:
 762        spin_unlock_irqrestore(&chan->lock, flags);
 763}
 764
 765/**
 766 * xilinx_vdma_reset - Reset VDMA channel
 767 * @chan: Driver specific VDMA channel
 768 *
 769 * Return: '0' on success and failure value on error
 770 */
 771static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
 772{
 773        int loop = XILINX_VDMA_LOOP_COUNT;
 774        u32 tmp;
 775
 776        vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET);
 777
 778        tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
 779                XILINX_VDMA_DMACR_RESET;
 780
 781        /* Wait for the hardware to finish reset */
 782        do {
 783                tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) &
 784                        XILINX_VDMA_DMACR_RESET;
 785        } while (loop-- && tmp);
 786
 787        if (!loop) {
 788                dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
 789                        vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR),
 790                        vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR));
 791                return -ETIMEDOUT;
 792        }
 793
 794        chan->err = false;
 795
 796        return 0;
 797}
 798
 799/**
 800 * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts
 801 * @chan: Driver specific VDMA channel
 802 *
 803 * Return: '0' on success and failure value on error
 804 */
 805static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan)
 806{
 807        int err;
 808
 809        /* Reset VDMA */
 810        err = xilinx_vdma_reset(chan);
 811        if (err)
 812                return err;
 813
 814        /* Enable interrupts */
 815        vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR,
 816                      XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
 817
 818        return 0;
 819}
 820
 821/**
 822 * xilinx_vdma_irq_handler - VDMA Interrupt handler
 823 * @irq: IRQ number
 824 * @data: Pointer to the Xilinx VDMA channel structure
 825 *
 826 * Return: IRQ_HANDLED/IRQ_NONE
 827 */
 828static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
 829{
 830        struct xilinx_vdma_chan *chan = data;
 831        u32 status;
 832
 833        /* Read the status and ack the interrupts. */
 834        status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR);
 835        if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK))
 836                return IRQ_NONE;
 837
 838        vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
 839                        status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
 840
 841        if (status & XILINX_VDMA_DMASR_ERR_IRQ) {
 842                /*
 843                 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
 844                 * error is recoverable, ignore it. Otherwise flag the error.
 845                 *
 846                 * Only recoverable errors can be cleared in the DMASR register,
 847                 * make sure not to write to other error bits to 1.
 848                 */
 849                u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK;
 850                vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR,
 851                                errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK);
 852
 853                if (!chan->flush_on_fsync ||
 854                    (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) {
 855                        dev_err(chan->dev,
 856                                "Channel %p has errors %x, cdr %x tdr %x\n",
 857                                chan, errors,
 858                                vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC),
 859                                vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC));
 860                        chan->err = true;
 861                }
 862        }
 863
 864        if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) {
 865                /*
 866                 * Device takes too long to do the transfer when user requires
 867                 * responsiveness.
 868                 */
 869                dev_dbg(chan->dev, "Inter-packet latency too long\n");
 870        }
 871
 872        if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) {
 873                xilinx_vdma_complete_descriptor(chan);
 874                xilinx_vdma_start_transfer(chan);
 875        }
 876
 877        tasklet_schedule(&chan->tasklet);
 878        return IRQ_HANDLED;
 879}
 880
 881/**
 882 * xilinx_vdma_tx_submit - Submit DMA transaction
 883 * @tx: Async transaction descriptor
 884 *
 885 * Return: cookie value on success and failure value on error
 886 */
 887static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
 888{
 889        struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx);
 890        struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan);
 891        dma_cookie_t cookie;
 892        unsigned long flags;
 893        int err;
 894
 895        if (chan->err) {
 896                /*
 897                 * If reset fails, need to hard reset the system.
 898                 * Channel is no longer functional
 899                 */
 900                err = xilinx_vdma_chan_reset(chan);
 901                if (err < 0)
 902                        return err;
 903        }
 904
 905        spin_lock_irqsave(&chan->lock, flags);
 906
 907        cookie = dma_cookie_assign(tx);
 908
 909        /* Append the transaction to the pending transactions queue. */
 910        list_add_tail(&desc->node, &chan->pending_list);
 911
 912        /* Free the allocated desc */
 913        chan->allocated_desc = NULL;
 914
 915        spin_unlock_irqrestore(&chan->lock, flags);
 916
 917        return cookie;
 918}
 919
 920/**
 921 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
 922 *      DMA_SLAVE transaction
 923 * @dchan: DMA channel
 924 * @xt: Interleaved template pointer
 925 * @flags: transfer ack flags
 926 *
 927 * Return: Async transaction descriptor on success and NULL on failure
 928 */
 929static struct dma_async_tx_descriptor *
 930xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
 931                                 struct dma_interleaved_template *xt,
 932                                 unsigned long flags)
 933{
 934        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
 935        struct xilinx_vdma_tx_descriptor *desc;
 936        struct xilinx_vdma_tx_segment *segment, *prev = NULL;
 937        struct xilinx_vdma_desc_hw *hw;
 938
 939        if (!is_slave_direction(xt->dir))
 940                return NULL;
 941
 942        if (!xt->numf || !xt->sgl[0].size)
 943                return NULL;
 944
 945        if (xt->frame_size != 1)
 946                return NULL;
 947
 948        /* Allocate a transaction descriptor. */
 949        desc = xilinx_vdma_alloc_tx_descriptor(chan);
 950        if (!desc)
 951                return NULL;
 952
 953        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
 954        desc->async_tx.tx_submit = xilinx_vdma_tx_submit;
 955        async_tx_ack(&desc->async_tx);
 956
 957        /* Allocate the link descriptor from DMA pool */
 958        segment = xilinx_vdma_alloc_tx_segment(chan);
 959        if (!segment)
 960                goto error;
 961
 962        /* Fill in the hardware descriptor */
 963        hw = &segment->hw;
 964        hw->vsize = xt->numf;
 965        hw->hsize = xt->sgl[0].size;
 966        hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
 967                        XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
 968        hw->stride |= chan->config.frm_dly <<
 969                        XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
 970
 971        if (xt->dir != DMA_MEM_TO_DEV)
 972                hw->buf_addr = xt->dst_start;
 973        else
 974                hw->buf_addr = xt->src_start;
 975
 976        /* Link the previous next descriptor to current */
 977        if (!list_empty(&desc->segments)) {
 978                prev = list_last_entry(&desc->segments,
 979                                       struct xilinx_vdma_tx_segment, node);
 980                prev->hw.next_desc = segment->phys;
 981        }
 982
 983        /* Insert the segment into the descriptor segments list. */
 984        list_add_tail(&segment->node, &desc->segments);
 985
 986        prev = segment;
 987
 988        /* Link the last hardware descriptor with the first. */
 989        segment = list_first_entry(&desc->segments,
 990                                   struct xilinx_vdma_tx_segment, node);
 991        prev->hw.next_desc = segment->phys;
 992
 993        return &desc->async_tx;
 994
 995error:
 996        xilinx_vdma_free_tx_descriptor(chan, desc);
 997        return NULL;
 998}
 999
1000/**
1001 * xilinx_vdma_terminate_all - Halt the channel and free descriptors
1002 * @chan: Driver specific VDMA Channel pointer
1003 */
1004static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan)
1005{
1006        /* Halt the DMA engine */
1007        xilinx_vdma_halt(chan);
1008
1009        /* Remove and free all of the descriptors in the lists */
1010        xilinx_vdma_free_descriptors(chan);
1011}
1012
1013/**
1014 * xilinx_vdma_channel_set_config - Configure VDMA channel
1015 * Run-time configuration for Axi VDMA, supports:
1016 * . halt the channel
1017 * . configure interrupt coalescing and inter-packet delay threshold
1018 * . start/stop parking
1019 * . enable genlock
1020 *
1021 * @dchan: DMA channel
1022 * @cfg: VDMA device configuration pointer
1023 *
1024 * Return: '0' on success and failure value on error
1025 */
1026int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1027                                        struct xilinx_vdma_config *cfg)
1028{
1029        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1030        u32 dmacr;
1031
1032        if (cfg->reset)
1033                return xilinx_vdma_chan_reset(chan);
1034
1035        dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR);
1036
1037        chan->config.frm_dly = cfg->frm_dly;
1038        chan->config.park = cfg->park;
1039
1040        /* genlock settings */
1041        chan->config.gen_lock = cfg->gen_lock;
1042        chan->config.master = cfg->master;
1043
1044        if (cfg->gen_lock && chan->genlock) {
1045                dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN;
1046                dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT;
1047        }
1048
1049        chan->config.frm_cnt_en = cfg->frm_cnt_en;
1050        if (cfg->park)
1051                chan->config.park_frm = cfg->park_frm;
1052        else
1053                chan->config.park_frm = -1;
1054
1055        chan->config.coalesc = cfg->coalesc;
1056        chan->config.delay = cfg->delay;
1057
1058        if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) {
1059                dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT;
1060                chan->config.coalesc = cfg->coalesc;
1061        }
1062
1063        if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) {
1064                dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT;
1065                chan->config.delay = cfg->delay;
1066        }
1067
1068        /* FSync Source selection */
1069        dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK;
1070        dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT;
1071
1072        vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr);
1073
1074        return 0;
1075}
1076EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1077
1078/**
1079 * xilinx_vdma_device_control - Configure DMA channel of the device
1080 * @dchan: DMA Channel pointer
1081 * @cmd: DMA control command
1082 * @arg: Channel configuration
1083 *
1084 * Return: '0' on success and failure value on error
1085 */
1086static int xilinx_vdma_device_control(struct dma_chan *dchan,
1087                                      enum dma_ctrl_cmd cmd, unsigned long arg)
1088{
1089        struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan);
1090
1091        if (cmd != DMA_TERMINATE_ALL)
1092                return -ENXIO;
1093
1094        xilinx_vdma_terminate_all(chan);
1095
1096        return 0;
1097}
1098
1099/* -----------------------------------------------------------------------------
1100 * Probe and remove
1101 */
1102
1103/**
1104 * xilinx_vdma_chan_remove - Per Channel remove function
1105 * @chan: Driver specific VDMA channel
1106 */
1107static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
1108{
1109        /* Disable all interrupts */
1110        vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR,
1111                      XILINX_VDMA_DMAXR_ALL_IRQ_MASK);
1112
1113        if (chan->irq > 0)
1114                free_irq(chan->irq, chan);
1115
1116        tasklet_kill(&chan->tasklet);
1117
1118        list_del(&chan->common.device_node);
1119}
1120
1121/**
1122 * xilinx_vdma_chan_probe - Per Channel Probing
1123 * It get channel features from the device tree entry and
1124 * initialize special channel handling routines
1125 *
1126 * @xdev: Driver specific device structure
1127 * @node: Device node
1128 *
1129 * Return: '0' on success and failure value on error
1130 */
1131static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1132                                  struct device_node *node)
1133{
1134        struct xilinx_vdma_chan *chan;
1135        bool has_dre = false;
1136        u32 value, width;
1137        int err;
1138
1139        /* Allocate and initialize the channel structure */
1140        chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1141        if (!chan)
1142                return -ENOMEM;
1143
1144        chan->dev = xdev->dev;
1145        chan->xdev = xdev;
1146        chan->has_sg = xdev->has_sg;
1147
1148        spin_lock_init(&chan->lock);
1149        INIT_LIST_HEAD(&chan->pending_list);
1150        INIT_LIST_HEAD(&chan->done_list);
1151
1152        /* Retrieve the channel properties from the device tree */
1153        has_dre = of_property_read_bool(node, "xlnx,include-dre");
1154
1155        chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
1156
1157        err = of_property_read_u32(node, "xlnx,datawidth", &value);
1158        if (err) {
1159                dev_err(xdev->dev, "missing xlnx,datawidth property\n");
1160                return err;
1161        }
1162        width = value >> 3; /* Convert bits to bytes */
1163
1164        /* If data width is greater than 8 bytes, DRE is not in hw */
1165        if (width > 8)
1166                has_dre = false;
1167
1168        if (!has_dre)
1169                xdev->common.copy_align = fls(width - 1);
1170
1171        if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) {
1172                chan->direction = DMA_MEM_TO_DEV;
1173                chan->id = 0;
1174
1175                chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET;
1176                chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
1177
1178                if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
1179                    xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S)
1180                        chan->flush_on_fsync = true;
1181        } else if (of_device_is_compatible(node,
1182                                            "xlnx,axi-vdma-s2mm-channel")) {
1183                chan->direction = DMA_DEV_TO_MEM;
1184                chan->id = 1;
1185
1186                chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET;
1187                chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
1188
1189                if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH ||
1190                    xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM)
1191                        chan->flush_on_fsync = true;
1192        } else {
1193                dev_err(xdev->dev, "Invalid channel compatible node\n");
1194                return -EINVAL;
1195        }
1196
1197        /* Request the interrupt */
1198        chan->irq = irq_of_parse_and_map(node, 0);
1199        err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED,
1200                          "xilinx-vdma-controller", chan);
1201        if (err) {
1202                dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
1203                return err;
1204        }
1205
1206        /* Initialize the tasklet */
1207        tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet,
1208                        (unsigned long)chan);
1209
1210        /*
1211         * Initialize the DMA channel and add it to the DMA engine channels
1212         * list.
1213         */
1214        chan->common.device = &xdev->common;
1215
1216        list_add_tail(&chan->common.device_node, &xdev->common.channels);
1217        xdev->chan[chan->id] = chan;
1218
1219        /* Reset the channel */
1220        err = xilinx_vdma_chan_reset(chan);
1221        if (err < 0) {
1222                dev_err(xdev->dev, "Reset channel failed\n");
1223                return err;
1224        }
1225
1226        return 0;
1227}
1228
1229/**
1230 * of_dma_xilinx_xlate - Translation function
1231 * @dma_spec: Pointer to DMA specifier as found in the device tree
1232 * @ofdma: Pointer to DMA controller data
1233 *
1234 * Return: DMA channel pointer on success and NULL on error
1235 */
1236static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1237                                                struct of_dma *ofdma)
1238{
1239        struct xilinx_vdma_device *xdev = ofdma->of_dma_data;
1240        int chan_id = dma_spec->args[0];
1241
1242        if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE)
1243                return NULL;
1244
1245        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
1246}
1247
1248/**
1249 * xilinx_vdma_probe - Driver probe function
1250 * @pdev: Pointer to the platform_device structure
1251 *
1252 * Return: '0' on success and failure value on error
1253 */
1254static int xilinx_vdma_probe(struct platform_device *pdev)
1255{
1256        struct device_node *node = pdev->dev.of_node;
1257        struct xilinx_vdma_device *xdev;
1258        struct device_node *child;
1259        struct resource *io;
1260        u32 num_frames;
1261        int i, err;
1262
1263        /* Allocate and initialize the DMA engine structure */
1264        xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1265        if (!xdev)
1266                return -ENOMEM;
1267
1268        xdev->dev = &pdev->dev;
1269
1270        /* Request and map I/O memory */
1271        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1272        xdev->regs = devm_ioremap_resource(&pdev->dev, io);
1273        if (IS_ERR(xdev->regs))
1274                return PTR_ERR(xdev->regs);
1275
1276        /* Retrieve the DMA engine properties from the device tree */
1277        xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
1278
1279        err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames);
1280        if (err < 0) {
1281                dev_err(xdev->dev, "missing xlnx,num-fstores property\n");
1282                return err;
1283        }
1284
1285        err = of_property_read_u32(node, "xlnx,flush-fsync",
1286                                        &xdev->flush_on_fsync);
1287        if (err < 0)
1288                dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n");
1289
1290        /* Initialize the DMA engine */
1291        xdev->common.dev = &pdev->dev;
1292
1293        INIT_LIST_HEAD(&xdev->common.channels);
1294        dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1295        dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1296
1297        xdev->common.device_alloc_chan_resources =
1298                                xilinx_vdma_alloc_chan_resources;
1299        xdev->common.device_free_chan_resources =
1300                                xilinx_vdma_free_chan_resources;
1301        xdev->common.device_prep_interleaved_dma =
1302                                xilinx_vdma_dma_prep_interleaved;
1303        xdev->common.device_control = xilinx_vdma_device_control;
1304        xdev->common.device_tx_status = xilinx_vdma_tx_status;
1305        xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1306
1307        platform_set_drvdata(pdev, xdev);
1308
1309        /* Initialize the channels */
1310        for_each_child_of_node(node, child) {
1311                err = xilinx_vdma_chan_probe(xdev, child);
1312                if (err < 0)
1313                        goto error;
1314        }
1315
1316        for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
1317                if (xdev->chan[i])
1318                        xdev->chan[i]->num_frms = num_frames;
1319
1320        /* Register the DMA engine with the core */
1321        dma_async_device_register(&xdev->common);
1322
1323        err = of_dma_controller_register(node, of_dma_xilinx_xlate,
1324                                         xdev);
1325        if (err < 0) {
1326                dev_err(&pdev->dev, "Unable to register DMA to DT\n");
1327                dma_async_device_unregister(&xdev->common);
1328                goto error;
1329        }
1330
1331        dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
1332
1333        return 0;
1334
1335error:
1336        for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
1337                if (xdev->chan[i])
1338                        xilinx_vdma_chan_remove(xdev->chan[i]);
1339
1340        return err;
1341}
1342
1343/**
1344 * xilinx_vdma_remove - Driver remove function
1345 * @pdev: Pointer to the platform_device structure
1346 *
1347 * Return: Always '0'
1348 */
1349static int xilinx_vdma_remove(struct platform_device *pdev)
1350{
1351        struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev);
1352        int i;
1353
1354        of_dma_controller_free(pdev->dev.of_node);
1355
1356        dma_async_device_unregister(&xdev->common);
1357
1358        for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++)
1359                if (xdev->chan[i])
1360                        xilinx_vdma_chan_remove(xdev->chan[i]);
1361
1362        return 0;
1363}
1364
1365static const struct of_device_id xilinx_vdma_of_ids[] = {
1366        { .compatible = "xlnx,axi-vdma-1.00.a",},
1367        {}
1368};
1369
1370static struct platform_driver xilinx_vdma_driver = {
1371        .driver = {
1372                .name = "xilinx-vdma",
1373                .of_match_table = xilinx_vdma_of_ids,
1374        },
1375        .probe = xilinx_vdma_probe,
1376        .remove = xilinx_vdma_remove,
1377};
1378
1379module_platform_driver(xilinx_vdma_driver);
1380
1381MODULE_AUTHOR("Xilinx, Inc.");
1382MODULE_DESCRIPTION("Xilinx VDMA driver");
1383MODULE_LICENSE("GPL v2");
1384
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.