linux/drivers/dma/pch_dma.c
<<
>>
Prefs
   1/*
   2 * Topcliff PCH DMA controller driver
   3 * Copyright (c) 2010 Intel Corporation
   4 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 */
  19
  20#include <linux/dmaengine.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/init.h>
  23#include <linux/pci.h>
  24#include <linux/interrupt.h>
  25#include <linux/module.h>
  26#include <linux/pch_dma.h>
  27
  28#define DRV_NAME "pch-dma"
  29
  30#define DMA_CTL0_DISABLE                0x0
  31#define DMA_CTL0_SG                     0x1
  32#define DMA_CTL0_ONESHOT                0x2
  33#define DMA_CTL0_MODE_MASK_BITS         0x3
  34#define DMA_CTL0_DIR_SHIFT_BITS         2
  35#define DMA_CTL0_BITS_PER_CH            4
  36
  37#define DMA_CTL2_START_SHIFT_BITS       8
  38#define DMA_CTL2_IRQ_ENABLE_MASK        ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
  39
  40#define DMA_STATUS_IDLE                 0x0
  41#define DMA_STATUS_DESC_READ            0x1
  42#define DMA_STATUS_WAIT                 0x2
  43#define DMA_STATUS_ACCESS               0x3
  44#define DMA_STATUS_BITS_PER_CH          2
  45#define DMA_STATUS_MASK_BITS            0x3
  46#define DMA_STATUS_SHIFT_BITS           16
  47#define DMA_STATUS_IRQ(x)               (0x1 << (x))
  48#define DMA_STATUS0_ERR(x)              (0x1 << ((x) + 8))
  49#define DMA_STATUS2_ERR(x)              (0x1 << (x))
  50
  51#define DMA_DESC_WIDTH_SHIFT_BITS       12
  52#define DMA_DESC_WIDTH_1_BYTE           (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
  53#define DMA_DESC_WIDTH_2_BYTES          (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
  54#define DMA_DESC_WIDTH_4_BYTES          (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
  55#define DMA_DESC_MAX_COUNT_1_BYTE       0x3FF
  56#define DMA_DESC_MAX_COUNT_2_BYTES      0x3FF
  57#define DMA_DESC_MAX_COUNT_4_BYTES      0x7FF
  58#define DMA_DESC_END_WITHOUT_IRQ        0x0
  59#define DMA_DESC_END_WITH_IRQ           0x1
  60#define DMA_DESC_FOLLOW_WITHOUT_IRQ     0x2
  61#define DMA_DESC_FOLLOW_WITH_IRQ        0x3
  62
  63#define MAX_CHAN_NR                     12
  64
  65#define DMA_MASK_CTL0_MODE      0x33333333
  66#define DMA_MASK_CTL2_MODE      0x00003333
  67
  68static unsigned int init_nr_desc_per_channel = 64;
  69module_param(init_nr_desc_per_channel, uint, 0644);
  70MODULE_PARM_DESC(init_nr_desc_per_channel,
  71                 "initial descriptors per channel (default: 64)");
  72
  73struct pch_dma_desc_regs {
  74        u32     dev_addr;
  75        u32     mem_addr;
  76        u32     size;
  77        u32     next;
  78};
  79
  80struct pch_dma_regs {
  81        u32     dma_ctl0;
  82        u32     dma_ctl1;
  83        u32     dma_ctl2;
  84        u32     dma_ctl3;
  85        u32     dma_sts0;
  86        u32     dma_sts1;
  87        u32     dma_sts2;
  88        u32     reserved3;
  89        struct pch_dma_desc_regs desc[MAX_CHAN_NR];
  90};
  91
  92struct pch_dma_desc {
  93        struct pch_dma_desc_regs regs;
  94        struct dma_async_tx_descriptor txd;
  95        struct list_head        desc_node;
  96        struct list_head        tx_list;
  97};
  98
  99struct pch_dma_chan {
 100        struct dma_chan         chan;
 101        void __iomem *membase;
 102        enum dma_transfer_direction dir;
 103        struct tasklet_struct   tasklet;
 104        unsigned long           err_status;
 105
 106        spinlock_t              lock;
 107
 108        dma_cookie_t            completed_cookie;
 109        struct list_head        active_list;
 110        struct list_head        queue;
 111        struct list_head        free_list;
 112        unsigned int            descs_allocated;
 113};
 114
 115#define PDC_DEV_ADDR    0x00
 116#define PDC_MEM_ADDR    0x04
 117#define PDC_SIZE        0x08
 118#define PDC_NEXT        0x0C
 119
 120#define channel_readl(pdc, name) \
 121        readl((pdc)->membase + PDC_##name)
 122#define channel_writel(pdc, name, val) \
 123        writel((val), (pdc)->membase + PDC_##name)
 124
 125struct pch_dma {
 126        struct dma_device       dma;
 127        void __iomem *membase;
 128        struct pci_pool         *pool;
 129        struct pch_dma_regs     regs;
 130        struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
 131        struct pch_dma_chan     channels[MAX_CHAN_NR];
 132};
 133
 134#define PCH_DMA_CTL0    0x00
 135#define PCH_DMA_CTL1    0x04
 136#define PCH_DMA_CTL2    0x08
 137#define PCH_DMA_CTL3    0x0C
 138#define PCH_DMA_STS0    0x10
 139#define PCH_DMA_STS1    0x14
 140#define PCH_DMA_STS2    0x18
 141
 142#define dma_readl(pd, name) \
 143        readl((pd)->membase + PCH_DMA_##name)
 144#define dma_writel(pd, name, val) \
 145        writel((val), (pd)->membase + PCH_DMA_##name)
 146
 147static inline
 148struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
 149{
 150        return container_of(txd, struct pch_dma_desc, txd);
 151}
 152
 153static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
 154{
 155        return container_of(chan, struct pch_dma_chan, chan);
 156}
 157
 158static inline struct pch_dma *to_pd(struct dma_device *ddev)
 159{
 160        return container_of(ddev, struct pch_dma, dma);
 161}
 162
 163static inline struct device *chan2dev(struct dma_chan *chan)
 164{
 165        return &chan->dev->device;
 166}
 167
 168static inline struct device *chan2parent(struct dma_chan *chan)
 169{
 170        return chan->dev->device.parent;
 171}
 172
 173static inline
 174struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
 175{
 176        return list_first_entry(&pd_chan->active_list,
 177                                struct pch_dma_desc, desc_node);
 178}
 179
 180static inline
 181struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
 182{
 183        return list_first_entry(&pd_chan->queue,
 184                                struct pch_dma_desc, desc_node);
 185}
 186
 187static void pdc_enable_irq(struct dma_chan *chan, int enable)
 188{
 189        struct pch_dma *pd = to_pd(chan->device);
 190        u32 val;
 191        int pos;
 192
 193        if (chan->chan_id < 8)
 194                pos = chan->chan_id;
 195        else
 196                pos = chan->chan_id + 8;
 197
 198        val = dma_readl(pd, CTL2);
 199
 200        if (enable)
 201                val |= 0x1 << pos;
 202        else
 203                val &= ~(0x1 << pos);
 204
 205        dma_writel(pd, CTL2, val);
 206
 207        dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
 208                chan->chan_id, val);
 209}
 210
 211static void pdc_set_dir(struct dma_chan *chan)
 212{
 213        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 214        struct pch_dma *pd = to_pd(chan->device);
 215        u32 val;
 216        u32 mask_mode;
 217        u32 mask_ctl;
 218
 219        if (chan->chan_id < 8) {
 220                val = dma_readl(pd, CTL0);
 221
 222                mask_mode = DMA_CTL0_MODE_MASK_BITS <<
 223                                        (DMA_CTL0_BITS_PER_CH * chan->chan_id);
 224                mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 225                                       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
 226                val &= mask_mode;
 227                if (pd_chan->dir == DMA_MEM_TO_DEV)
 228                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
 229                                       DMA_CTL0_DIR_SHIFT_BITS);
 230                else
 231                        val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
 232                                         DMA_CTL0_DIR_SHIFT_BITS));
 233
 234                val |= mask_ctl;
 235                dma_writel(pd, CTL0, val);
 236        } else {
 237                int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
 238                val = dma_readl(pd, CTL3);
 239
 240                mask_mode = DMA_CTL0_MODE_MASK_BITS <<
 241                                                (DMA_CTL0_BITS_PER_CH * ch);
 242                mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 243                                                 (DMA_CTL0_BITS_PER_CH * ch));
 244                val &= mask_mode;
 245                if (pd_chan->dir == DMA_MEM_TO_DEV)
 246                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
 247                                       DMA_CTL0_DIR_SHIFT_BITS);
 248                else
 249                        val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
 250                                         DMA_CTL0_DIR_SHIFT_BITS));
 251                val |= mask_ctl;
 252                dma_writel(pd, CTL3, val);
 253        }
 254
 255        dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
 256                chan->chan_id, val);
 257}
 258
 259static void pdc_set_mode(struct dma_chan *chan, u32 mode)
 260{
 261        struct pch_dma *pd = to_pd(chan->device);
 262        u32 val;
 263        u32 mask_ctl;
 264        u32 mask_dir;
 265
 266        if (chan->chan_id < 8) {
 267                mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 268                           (DMA_CTL0_BITS_PER_CH * chan->chan_id));
 269                mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\
 270                                 DMA_CTL0_DIR_SHIFT_BITS);
 271                val = dma_readl(pd, CTL0);
 272                val &= mask_dir;
 273                val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
 274                val |= mask_ctl;
 275                dma_writel(pd, CTL0, val);
 276        } else {
 277                int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
 278                mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
 279                                                 (DMA_CTL0_BITS_PER_CH * ch));
 280                mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\
 281                                 DMA_CTL0_DIR_SHIFT_BITS);
 282                val = dma_readl(pd, CTL3);
 283                val &= mask_dir;
 284                val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
 285                val |= mask_ctl;
 286                dma_writel(pd, CTL3, val);
 287        }
 288
 289        dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
 290                chan->chan_id, val);
 291}
 292
 293static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
 294{
 295        struct pch_dma *pd = to_pd(pd_chan->chan.device);
 296        u32 val;
 297
 298        val = dma_readl(pd, STS0);
 299        return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
 300                        DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
 301}
 302
 303static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
 304{
 305        struct pch_dma *pd = to_pd(pd_chan->chan.device);
 306        u32 val;
 307
 308        val = dma_readl(pd, STS2);
 309        return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
 310                        DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
 311}
 312
 313static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
 314{
 315        u32 sts;
 316
 317        if (pd_chan->chan.chan_id < 8)
 318                sts = pdc_get_status0(pd_chan);
 319        else
 320                sts = pdc_get_status2(pd_chan);
 321
 322
 323        if (sts == DMA_STATUS_IDLE)
 324                return true;
 325        else
 326                return false;
 327}
 328
 329static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
 330{
 331        if (!pdc_is_idle(pd_chan)) {
 332                dev_err(chan2dev(&pd_chan->chan),
 333                        "BUG: Attempt to start non-idle channel\n");
 334                return;
 335        }
 336
 337        dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
 338                pd_chan->chan.chan_id, desc->regs.dev_addr);
 339        dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
 340                pd_chan->chan.chan_id, desc->regs.mem_addr);
 341        dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
 342                pd_chan->chan.chan_id, desc->regs.size);
 343        dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
 344                pd_chan->chan.chan_id, desc->regs.next);
 345
 346        if (list_empty(&desc->tx_list)) {
 347                channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
 348                channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
 349                channel_writel(pd_chan, SIZE, desc->regs.size);
 350                channel_writel(pd_chan, NEXT, desc->regs.next);
 351                pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
 352        } else {
 353                channel_writel(pd_chan, NEXT, desc->txd.phys);
 354                pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
 355        }
 356}
 357
 358static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
 359                               struct pch_dma_desc *desc)
 360{
 361        struct dma_async_tx_descriptor *txd = &desc->txd;
 362        dma_async_tx_callback callback = txd->callback;
 363        void *param = txd->callback_param;
 364
 365        list_splice_init(&desc->tx_list, &pd_chan->free_list);
 366        list_move(&desc->desc_node, &pd_chan->free_list);
 367
 368        if (callback)
 369                callback(param);
 370}
 371
 372static void pdc_complete_all(struct pch_dma_chan *pd_chan)
 373{
 374        struct pch_dma_desc *desc, *_d;
 375        LIST_HEAD(list);
 376
 377        BUG_ON(!pdc_is_idle(pd_chan));
 378
 379        if (!list_empty(&pd_chan->queue))
 380                pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
 381
 382        list_splice_init(&pd_chan->active_list, &list);
 383        list_splice_init(&pd_chan->queue, &pd_chan->active_list);
 384
 385        list_for_each_entry_safe(desc, _d, &list, desc_node)
 386                pdc_chain_complete(pd_chan, desc);
 387}
 388
 389static void pdc_handle_error(struct pch_dma_chan *pd_chan)
 390{
 391        struct pch_dma_desc *bad_desc;
 392
 393        bad_desc = pdc_first_active(pd_chan);
 394        list_del(&bad_desc->desc_node);
 395
 396        list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
 397
 398        if (!list_empty(&pd_chan->active_list))
 399                pdc_dostart(pd_chan, pdc_first_active(pd_chan));
 400
 401        dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
 402        dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
 403                 bad_desc->txd.cookie);
 404
 405        pdc_chain_complete(pd_chan, bad_desc);
 406}
 407
 408static void pdc_advance_work(struct pch_dma_chan *pd_chan)
 409{
 410        if (list_empty(&pd_chan->active_list) ||
 411                list_is_singular(&pd_chan->active_list)) {
 412                pdc_complete_all(pd_chan);
 413        } else {
 414                pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
 415                pdc_dostart(pd_chan, pdc_first_active(pd_chan));
 416        }
 417}
 418
 419static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
 420                                      struct pch_dma_desc *desc)
 421{
 422        dma_cookie_t cookie = pd_chan->chan.cookie;
 423
 424        if (++cookie < 0)
 425                cookie = 1;
 426
 427        pd_chan->chan.cookie = cookie;
 428        desc->txd.cookie = cookie;
 429
 430        return cookie;
 431}
 432
 433static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
 434{
 435        struct pch_dma_desc *desc = to_pd_desc(txd);
 436        struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
 437        dma_cookie_t cookie;
 438
 439        spin_lock(&pd_chan->lock);
 440        cookie = pdc_assign_cookie(pd_chan, desc);
 441
 442        if (list_empty(&pd_chan->active_list)) {
 443                list_add_tail(&desc->desc_node, &pd_chan->active_list);
 444                pdc_dostart(pd_chan, desc);
 445        } else {
 446                list_add_tail(&desc->desc_node, &pd_chan->queue);
 447        }
 448
 449        spin_unlock(&pd_chan->lock);
 450        return 0;
 451}
 452
 453static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
 454{
 455        struct pch_dma_desc *desc = NULL;
 456        struct pch_dma *pd = to_pd(chan->device);
 457        dma_addr_t addr;
 458
 459        desc = pci_pool_alloc(pd->pool, flags, &addr);
 460        if (desc) {
 461                memset(desc, 0, sizeof(struct pch_dma_desc));
 462                INIT_LIST_HEAD(&desc->tx_list);
 463                dma_async_tx_descriptor_init(&desc->txd, chan);
 464                desc->txd.tx_submit = pd_tx_submit;
 465                desc->txd.flags = DMA_CTRL_ACK;
 466                desc->txd.phys = addr;
 467        }
 468
 469        return desc;
 470}
 471
 472static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
 473{
 474        struct pch_dma_desc *desc, *_d;
 475        struct pch_dma_desc *ret = NULL;
 476        int i = 0;
 477
 478        spin_lock(&pd_chan->lock);
 479        list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
 480                i++;
 481                if (async_tx_test_ack(&desc->txd)) {
 482                        list_del(&desc->desc_node);
 483                        ret = desc;
 484                        break;
 485                }
 486                dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
 487        }
 488        spin_unlock(&pd_chan->lock);
 489        dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
 490
 491        if (!ret) {
 492                ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
 493                if (ret) {
 494                        spin_lock(&pd_chan->lock);
 495                        pd_chan->descs_allocated++;
 496                        spin_unlock(&pd_chan->lock);
 497                } else {
 498                        dev_err(chan2dev(&pd_chan->chan),
 499                                "failed to alloc desc\n");
 500                }
 501        }
 502
 503        return ret;
 504}
 505
 506static void pdc_desc_put(struct pch_dma_chan *pd_chan,
 507                         struct pch_dma_desc *desc)
 508{
 509        if (desc) {
 510                spin_lock(&pd_chan->lock);
 511                list_splice_init(&desc->tx_list, &pd_chan->free_list);
 512                list_add(&desc->desc_node, &pd_chan->free_list);
 513                spin_unlock(&pd_chan->lock);
 514        }
 515}
 516
 517static int pd_alloc_chan_resources(struct dma_chan *chan)
 518{
 519        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 520        struct pch_dma_desc *desc;
 521        LIST_HEAD(tmp_list);
 522        int i;
 523
 524        if (!pdc_is_idle(pd_chan)) {
 525                dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
 526                return -EIO;
 527        }
 528
 529        if (!list_empty(&pd_chan->free_list))
 530                return pd_chan->descs_allocated;
 531
 532        for (i = 0; i < init_nr_desc_per_channel; i++) {
 533                desc = pdc_alloc_desc(chan, GFP_KERNEL);
 534
 535                if (!desc) {
 536                        dev_warn(chan2dev(chan),
 537                                "Only allocated %d initial descriptors\n", i);
 538                        break;
 539                }
 540
 541                list_add_tail(&desc->desc_node, &tmp_list);
 542        }
 543
 544        spin_lock_irq(&pd_chan->lock);
 545        list_splice(&tmp_list, &pd_chan->free_list);
 546        pd_chan->descs_allocated = i;
 547        pd_chan->completed_cookie = chan->cookie = 1;
 548        spin_unlock_irq(&pd_chan->lock);
 549
 550        pdc_enable_irq(chan, 1);
 551
 552        return pd_chan->descs_allocated;
 553}
 554
 555static void pd_free_chan_resources(struct dma_chan *chan)
 556{
 557        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 558        struct pch_dma *pd = to_pd(chan->device);
 559        struct pch_dma_desc *desc, *_d;
 560        LIST_HEAD(tmp_list);
 561
 562        BUG_ON(!pdc_is_idle(pd_chan));
 563        BUG_ON(!list_empty(&pd_chan->active_list));
 564        BUG_ON(!list_empty(&pd_chan->queue));
 565
 566        spin_lock_irq(&pd_chan->lock);
 567        list_splice_init(&pd_chan->free_list, &tmp_list);
 568        pd_chan->descs_allocated = 0;
 569        spin_unlock_irq(&pd_chan->lock);
 570
 571        list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
 572                pci_pool_free(pd->pool, desc, desc->txd.phys);
 573
 574        pdc_enable_irq(chan, 0);
 575}
 576
 577static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 578                                    struct dma_tx_state *txstate)
 579{
 580        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 581        dma_cookie_t last_used;
 582        dma_cookie_t last_completed;
 583        int ret;
 584
 585        spin_lock_irq(&pd_chan->lock);
 586        last_completed = pd_chan->completed_cookie;
 587        last_used = chan->cookie;
 588        spin_unlock_irq(&pd_chan->lock);
 589
 590        ret = dma_async_is_complete(cookie, last_completed, last_used);
 591
 592        dma_set_tx_state(txstate, last_completed, last_used, 0);
 593
 594        return ret;
 595}
 596
 597static void pd_issue_pending(struct dma_chan *chan)
 598{
 599        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 600
 601        if (pdc_is_idle(pd_chan)) {
 602                spin_lock(&pd_chan->lock);
 603                pdc_advance_work(pd_chan);
 604                spin_unlock(&pd_chan->lock);
 605        }
 606}
 607
 608static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
 609                        struct scatterlist *sgl, unsigned int sg_len,
 610                        enum dma_transfer_direction direction, unsigned long flags)
 611{
 612        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 613        struct pch_dma_slave *pd_slave = chan->private;
 614        struct pch_dma_desc *first = NULL;
 615        struct pch_dma_desc *prev = NULL;
 616        struct pch_dma_desc *desc = NULL;
 617        struct scatterlist *sg;
 618        dma_addr_t reg;
 619        int i;
 620
 621        if (unlikely(!sg_len)) {
 622                dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
 623                return NULL;
 624        }
 625
 626        if (direction == DMA_DEV_TO_MEM)
 627                reg = pd_slave->rx_reg;
 628        else if (direction == DMA_MEM_TO_DEV)
 629                reg = pd_slave->tx_reg;
 630        else
 631                return NULL;
 632
 633        pd_chan->dir = direction;
 634        pdc_set_dir(chan);
 635
 636        for_each_sg(sgl, sg, sg_len, i) {
 637                desc = pdc_desc_get(pd_chan);
 638
 639                if (!desc)
 640                        goto err_desc_get;
 641
 642                desc->regs.dev_addr = reg;
 643                desc->regs.mem_addr = sg_phys(sg);
 644                desc->regs.size = sg_dma_len(sg);
 645                desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
 646
 647                switch (pd_slave->width) {
 648                case PCH_DMA_WIDTH_1_BYTE:
 649                        if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
 650                                goto err_desc_get;
 651                        desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
 652                        break;
 653                case PCH_DMA_WIDTH_2_BYTES:
 654                        if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
 655                                goto err_desc_get;
 656                        desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
 657                        break;
 658                case PCH_DMA_WIDTH_4_BYTES:
 659                        if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
 660                                goto err_desc_get;
 661                        desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
 662                        break;
 663                default:
 664                        goto err_desc_get;
 665                }
 666
 667                if (!first) {
 668                        first = desc;
 669                } else {
 670                        prev->regs.next |= desc->txd.phys;
 671                        list_add_tail(&desc->desc_node, &first->tx_list);
 672                }
 673
 674                prev = desc;
 675        }
 676
 677        if (flags & DMA_PREP_INTERRUPT)
 678                desc->regs.next = DMA_DESC_END_WITH_IRQ;
 679        else
 680                desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
 681
 682        first->txd.cookie = -EBUSY;
 683        desc->txd.flags = flags;
 684
 685        return &first->txd;
 686
 687err_desc_get:
 688        dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
 689        pdc_desc_put(pd_chan, first);
 690        return NULL;
 691}
 692
 693static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 694                             unsigned long arg)
 695{
 696        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
 697        struct pch_dma_desc *desc, *_d;
 698        LIST_HEAD(list);
 699
 700        if (cmd != DMA_TERMINATE_ALL)
 701                return -ENXIO;
 702
 703        spin_lock_irq(&pd_chan->lock);
 704
 705        pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
 706
 707        list_splice_init(&pd_chan->active_list, &list);
 708        list_splice_init(&pd_chan->queue, &list);
 709
 710        list_for_each_entry_safe(desc, _d, &list, desc_node)
 711                pdc_chain_complete(pd_chan, desc);
 712
 713        spin_unlock_irq(&pd_chan->lock);
 714
 715        return 0;
 716}
 717
 718static void pdc_tasklet(unsigned long data)
 719{
 720        struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
 721        unsigned long flags;
 722
 723        if (!pdc_is_idle(pd_chan)) {
 724                dev_err(chan2dev(&pd_chan->chan),
 725                        "BUG: handle non-idle channel in tasklet\n");
 726                return;
 727        }
 728
 729        spin_lock_irqsave(&pd_chan->lock, flags);
 730        if (test_and_clear_bit(0, &pd_chan->err_status))
 731                pdc_handle_error(pd_chan);
 732        else
 733                pdc_advance_work(pd_chan);
 734        spin_unlock_irqrestore(&pd_chan->lock, flags);
 735}
 736
 737static irqreturn_t pd_irq(int irq, void *devid)
 738{
 739        struct pch_dma *pd = (struct pch_dma *)devid;
 740        struct pch_dma_chan *pd_chan;
 741        u32 sts0;
 742        u32 sts2;
 743        int i;
 744        int ret0 = IRQ_NONE;
 745        int ret2 = IRQ_NONE;
 746
 747        sts0 = dma_readl(pd, STS0);
 748        sts2 = dma_readl(pd, STS2);
 749
 750        dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
 751
 752        for (i = 0; i < pd->dma.chancnt; i++) {
 753                pd_chan = &pd->channels[i];
 754
 755                if (i < 8) {
 756                        if (sts0 & DMA_STATUS_IRQ(i)) {
 757                                if (sts0 & DMA_STATUS0_ERR(i))
 758                                        set_bit(0, &pd_chan->err_status);
 759
 760                                tasklet_schedule(&pd_chan->tasklet);
 761                                ret0 = IRQ_HANDLED;
 762                        }
 763                } else {
 764                        if (sts2 & DMA_STATUS_IRQ(i - 8)) {
 765                                if (sts2 & DMA_STATUS2_ERR(i))
 766                                        set_bit(0, &pd_chan->err_status);
 767
 768                                tasklet_schedule(&pd_chan->tasklet);
 769                                ret2 = IRQ_HANDLED;
 770                        }
 771                }
 772        }
 773
 774        /* clear interrupt bits in status register */
 775        if (ret0)
 776                dma_writel(pd, STS0, sts0);
 777        if (ret2)
 778                dma_writel(pd, STS2, sts2);
 779
 780        return ret0 | ret2;
 781}
 782
 783#ifdef  CONFIG_PM
 784static void pch_dma_save_regs(struct pch_dma *pd)
 785{
 786        struct pch_dma_chan *pd_chan;
 787        struct dma_chan *chan, *_c;
 788        int i = 0;
 789
 790        pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
 791        pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
 792        pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
 793        pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
 794
 795        list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
 796                pd_chan = to_pd_chan(chan);
 797
 798                pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
 799                pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
 800                pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
 801                pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
 802
 803                i++;
 804        }
 805}
 806
 807static void pch_dma_restore_regs(struct pch_dma *pd)
 808{
 809        struct pch_dma_chan *pd_chan;
 810        struct dma_chan *chan, *_c;
 811        int i = 0;
 812
 813        dma_writel(pd, CTL0, pd->regs.dma_ctl0);
 814        dma_writel(pd, CTL1, pd->regs.dma_ctl1);
 815        dma_writel(pd, CTL2, pd->regs.dma_ctl2);
 816        dma_writel(pd, CTL3, pd->regs.dma_ctl3);
 817
 818        list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
 819                pd_chan = to_pd_chan(chan);
 820
 821                channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
 822                channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
 823                channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
 824                channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
 825
 826                i++;
 827        }
 828}
 829
 830static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
 831{
 832        struct pch_dma *pd = pci_get_drvdata(pdev);
 833
 834        if (pd)
 835                pch_dma_save_regs(pd);
 836
 837        pci_save_state(pdev);
 838        pci_disable_device(pdev);
 839        pci_set_power_state(pdev, pci_choose_state(pdev, state));
 840
 841        return 0;
 842}
 843
 844static int pch_dma_resume(struct pci_dev *pdev)
 845{
 846        struct pch_dma *pd = pci_get_drvdata(pdev);
 847        int err;
 848
 849        pci_set_power_state(pdev, PCI_D0);
 850        pci_restore_state(pdev);
 851
 852        err = pci_enable_device(pdev);
 853        if (err) {
 854                dev_dbg(&pdev->dev, "failed to enable device\n");
 855                return err;
 856        }
 857
 858        if (pd)
 859                pch_dma_restore_regs(pd);
 860
 861        return 0;
 862}
 863#endif
 864
 865static int __devinit pch_dma_probe(struct pci_dev *pdev,
 866                                   const struct pci_device_id *id)
 867{
 868        struct pch_dma *pd;
 869        struct pch_dma_regs *regs;
 870        unsigned int nr_channels;
 871        int err;
 872        int i;
 873
 874        nr_channels = id->driver_data;
 875        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 876        if (!pd)
 877                return -ENOMEM;
 878
 879        pci_set_drvdata(pdev, pd);
 880
 881        err = pci_enable_device(pdev);
 882        if (err) {
 883                dev_err(&pdev->dev, "Cannot enable PCI device\n");
 884                goto err_free_mem;
 885        }
 886
 887        if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
 888                dev_err(&pdev->dev, "Cannot find proper base address\n");
 889                goto err_disable_pdev;
 890        }
 891
 892        err = pci_request_regions(pdev, DRV_NAME);
 893        if (err) {
 894                dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
 895                goto err_disable_pdev;
 896        }
 897
 898        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 899        if (err) {
 900                dev_err(&pdev->dev, "Cannot set proper DMA config\n");
 901                goto err_free_res;
 902        }
 903
 904        regs = pd->membase = pci_iomap(pdev, 1, 0);
 905        if (!pd->membase) {
 906                dev_err(&pdev->dev, "Cannot map MMIO registers\n");
 907                err = -ENOMEM;
 908                goto err_free_res;
 909        }
 910
 911        pci_set_master(pdev);
 912
 913        err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
 914        if (err) {
 915                dev_err(&pdev->dev, "Failed to request IRQ\n");
 916                goto err_iounmap;
 917        }
 918
 919        pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
 920                                   sizeof(struct pch_dma_desc), 4, 0);
 921        if (!pd->pool) {
 922                dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
 923                err = -ENOMEM;
 924                goto err_free_irq;
 925        }
 926
 927        pd->dma.dev = &pdev->dev;
 928
 929        INIT_LIST_HEAD(&pd->dma.channels);
 930
 931        for (i = 0; i < nr_channels; i++) {
 932                struct pch_dma_chan *pd_chan = &pd->channels[i];
 933
 934                pd_chan->chan.device = &pd->dma;
 935                pd_chan->chan.cookie = 1;
 936
 937                pd_chan->membase = &regs->desc[i];
 938
 939                spin_lock_init(&pd_chan->lock);
 940
 941                INIT_LIST_HEAD(&pd_chan->active_list);
 942                INIT_LIST_HEAD(&pd_chan->queue);
 943                INIT_LIST_HEAD(&pd_chan->free_list);
 944
 945                tasklet_init(&pd_chan->tasklet, pdc_tasklet,
 946                             (unsigned long)pd_chan);
 947                list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
 948        }
 949
 950        dma_cap_zero(pd->dma.cap_mask);
 951        dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
 952        dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
 953
 954        pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
 955        pd->dma.device_free_chan_resources = pd_free_chan_resources;
 956        pd->dma.device_tx_status = pd_tx_status;
 957        pd->dma.device_issue_pending = pd_issue_pending;
 958        pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
 959        pd->dma.device_control = pd_device_control;
 960
 961        err = dma_async_device_register(&pd->dma);
 962        if (err) {
 963                dev_err(&pdev->dev, "Failed to register DMA device\n");
 964                goto err_free_pool;
 965        }
 966
 967        return 0;
 968
 969err_free_pool:
 970        pci_pool_destroy(pd->pool);
 971err_free_irq:
 972        free_irq(pdev->irq, pd);
 973err_iounmap:
 974        pci_iounmap(pdev, pd->membase);
 975err_free_res:
 976        pci_release_regions(pdev);
 977err_disable_pdev:
 978        pci_disable_device(pdev);
 979err_free_mem:
 980        return err;
 981}
 982
 983static void __devexit pch_dma_remove(struct pci_dev *pdev)
 984{
 985        struct pch_dma *pd = pci_get_drvdata(pdev);
 986        struct pch_dma_chan *pd_chan;
 987        struct dma_chan *chan, *_c;
 988
 989        if (pd) {
 990                dma_async_device_unregister(&pd->dma);
 991
 992                list_for_each_entry_safe(chan, _c, &pd->dma.channels,
 993                                         device_node) {
 994                        pd_chan = to_pd_chan(chan);
 995
 996                        tasklet_disable(&pd_chan->tasklet);
 997                        tasklet_kill(&pd_chan->tasklet);
 998                }
 999
1000                pci_pool_destroy(pd->pool);
1001                free_irq(pdev->irq, pd);
1002                pci_iounmap(pdev, pd->membase);
1003                pci_release_regions(pdev);
1004                pci_disable_device(pdev);
1005                kfree(pd);
1006        }
1007}
1008
1009/* PCI Device ID of DMA device */
1010#define PCI_VENDOR_ID_ROHM             0x10DB
1011#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH        0x8810
1012#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH        0x8815
1013#define PCI_DEVICE_ID_ML7213_DMA1_8CH   0x8026
1014#define PCI_DEVICE_ID_ML7213_DMA2_8CH   0x802B
1015#define PCI_DEVICE_ID_ML7213_DMA3_4CH   0x8034
1016#define PCI_DEVICE_ID_ML7213_DMA4_12CH  0x8032
1017#define PCI_DEVICE_ID_ML7223_DMA1_4CH   0x800B
1018#define PCI_DEVICE_ID_ML7223_DMA2_4CH   0x800E
1019#define PCI_DEVICE_ID_ML7223_DMA3_4CH   0x8017
1020#define PCI_DEVICE_ID_ML7223_DMA4_4CH   0x803B
1021#define PCI_DEVICE_ID_ML7831_DMA1_8CH   0x8810
1022#define PCI_DEVICE_ID_ML7831_DMA2_4CH   0x8815
1023
1024DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1025        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1026        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1027        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1028        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1029        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1030        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1031        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1032        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1033        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1034        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1035        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
1036        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
1037        { 0, },
1038};
1039
1040static struct pci_driver pch_dma_driver = {
1041        .name           = DRV_NAME,
1042        .id_table       = pch_dma_id_table,
1043        .probe          = pch_dma_probe,
1044        .remove         = __devexit_p(pch_dma_remove),
1045#ifdef CONFIG_PM
1046        .suspend        = pch_dma_suspend,
1047        .resume         = pch_dma_resume,
1048#endif
1049};
1050
1051static int __init pch_dma_init(void)
1052{
1053        return pci_register_driver(&pch_dma_driver);
1054}
1055
1056static void __exit pch_dma_exit(void)
1057{
1058        pci_unregister_driver(&pch_dma_driver);
1059}
1060
1061module_init(pch_dma_init);
1062module_exit(pch_dma_exit);
1063
1064MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
1065                   "DMA controller driver");
1066MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1067MODULE_LICENSE("GPL v2");
1068