linux/drivers/soc/ti/knav_dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014 Texas Instruments Incorporated
   3 * Authors:     Santosh Shilimkar <santosh.shilimkar@ti.com>
   4 *              Sandeep Nair <sandeep_n@ti.com>
   5 *              Cyril Chemparathy <cyril@ti.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License as
   9 * published by the Free Software Foundation version 2.
  10 *
  11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12 * kind, whether express or implied; without even the implied warranty
  13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 */
  16
  17#include <linux/io.h>
  18#include <linux/sched.h>
  19#include <linux/module.h>
  20#include <linux/dma-direction.h>
  21#include <linux/interrupt.h>
  22#include <linux/pm_runtime.h>
  23#include <linux/of_dma.h>
  24#include <linux/of_address.h>
  25#include <linux/platform_device.h>
  26#include <linux/soc/ti/knav_dma.h>
  27#include <linux/debugfs.h>
  28#include <linux/seq_file.h>
  29
  30#define REG_MASK                0xffffffff
  31
  32#define DMA_LOOPBACK            BIT(31)
  33#define DMA_ENABLE              BIT(31)
  34#define DMA_TEARDOWN            BIT(30)
  35
  36#define DMA_TX_FILT_PSWORDS     BIT(29)
  37#define DMA_TX_FILT_EINFO       BIT(30)
  38#define DMA_TX_PRIO_SHIFT       0
  39#define DMA_RX_PRIO_SHIFT       16
  40#define DMA_PRIO_MASK           GENMASK(3, 0)
  41#define DMA_PRIO_DEFAULT        0
  42#define DMA_RX_TIMEOUT_DEFAULT  17500 /* cycles */
  43#define DMA_RX_TIMEOUT_MASK     GENMASK(16, 0)
  44#define DMA_RX_TIMEOUT_SHIFT    0
  45
  46#define CHAN_HAS_EPIB           BIT(30)
  47#define CHAN_HAS_PSINFO         BIT(29)
  48#define CHAN_ERR_RETRY          BIT(28)
  49#define CHAN_PSINFO_AT_SOP      BIT(25)
  50#define CHAN_SOP_OFF_SHIFT      16
  51#define CHAN_SOP_OFF_MASK       GENMASK(9, 0)
  52#define DESC_TYPE_SHIFT         26
  53#define DESC_TYPE_MASK          GENMASK(2, 0)
  54
  55/*
  56 * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
  57 * navigator cloud mapping scheme.
  58 * using the 14bit physical queue numbers directly maps into this scheme.
  59 */
  60#define CHAN_QNUM_MASK          GENMASK(14, 0)
  61#define DMA_MAX_QMS             4
  62#define DMA_TIMEOUT             1       /* msecs */
  63#define DMA_INVALID_ID          0xffff
  64
  65struct reg_global {
  66        u32     revision;
  67        u32     perf_control;
  68        u32     emulation_control;
  69        u32     priority_control;
  70        u32     qm_base_address[DMA_MAX_QMS];
  71};
  72
  73struct reg_chan {
  74        u32     control;
  75        u32     mode;
  76        u32     __rsvd[6];
  77};
  78
  79struct reg_tx_sched {
  80        u32     prio;
  81};
  82
  83struct reg_rx_flow {
  84        u32     control;
  85        u32     tags;
  86        u32     tag_sel;
  87        u32     fdq_sel[2];
  88        u32     thresh[3];
  89};
  90
  91struct knav_dma_pool_device {
  92        struct device                   *dev;
  93        struct list_head                list;
  94};
  95
  96struct knav_dma_device {
  97        bool                            loopback, enable_all;
  98        unsigned                        tx_priority, rx_priority, rx_timeout;
  99        unsigned                        logical_queue_managers;
 100        unsigned                        qm_base_address[DMA_MAX_QMS];
 101        struct reg_global __iomem       *reg_global;
 102        struct reg_chan __iomem         *reg_tx_chan;
 103        struct reg_rx_flow __iomem      *reg_rx_flow;
 104        struct reg_chan __iomem         *reg_rx_chan;
 105        struct reg_tx_sched __iomem     *reg_tx_sched;
 106        unsigned                        max_rx_chan, max_tx_chan;
 107        unsigned                        max_rx_flow;
 108        char                            name[32];
 109        atomic_t                        ref_count;
 110        struct list_head                list;
 111        struct list_head                chan_list;
 112        spinlock_t                      lock;
 113};
 114
 115struct knav_dma_chan {
 116        enum dma_transfer_direction     direction;
 117        struct knav_dma_device          *dma;
 118        atomic_t                        ref_count;
 119
 120        /* registers */
 121        struct reg_chan __iomem         *reg_chan;
 122        struct reg_tx_sched __iomem     *reg_tx_sched;
 123        struct reg_rx_flow __iomem      *reg_rx_flow;
 124
 125        /* configuration stuff */
 126        unsigned                        channel, flow;
 127        struct knav_dma_cfg             cfg;
 128        struct list_head                list;
 129        spinlock_t                      lock;
 130};
 131
 132#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
 133                        ch->channel : ch->flow)
 134
 135static struct knav_dma_pool_device *kdev;
 136
 137static bool device_ready;
 138bool knav_dma_device_ready(void)
 139{
 140        return device_ready;
 141}
 142EXPORT_SYMBOL_GPL(knav_dma_device_ready);
 143
 144static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
 145{
 146        if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
 147                return true;
 148        else
 149                return false;
 150}
 151
 152static int chan_start(struct knav_dma_chan *chan,
 153                        struct knav_dma_cfg *cfg)
 154{
 155        u32 v = 0;
 156
 157        spin_lock(&chan->lock);
 158        if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
 159                if (cfg->u.tx.filt_pswords)
 160                        v |= DMA_TX_FILT_PSWORDS;
 161                if (cfg->u.tx.filt_einfo)
 162                        v |= DMA_TX_FILT_EINFO;
 163                writel_relaxed(v, &chan->reg_chan->mode);
 164                writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
 165        }
 166
 167        if (chan->reg_tx_sched)
 168                writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
 169
 170        if (chan->reg_rx_flow) {
 171                v = 0;
 172
 173                if (cfg->u.rx.einfo_present)
 174                        v |= CHAN_HAS_EPIB;
 175                if (cfg->u.rx.psinfo_present)
 176                        v |= CHAN_HAS_PSINFO;
 177                if (cfg->u.rx.err_mode == DMA_RETRY)
 178                        v |= CHAN_ERR_RETRY;
 179                v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
 180                if (cfg->u.rx.psinfo_at_sop)
 181                        v |= CHAN_PSINFO_AT_SOP;
 182                v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
 183                        << CHAN_SOP_OFF_SHIFT;
 184                v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
 185
 186                writel_relaxed(v, &chan->reg_rx_flow->control);
 187                writel_relaxed(0, &chan->reg_rx_flow->tags);
 188                writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
 189
 190                v =  cfg->u.rx.fdq[0] << 16;
 191                v |=  cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
 192                writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
 193
 194                v =  cfg->u.rx.fdq[2] << 16;
 195                v |=  cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
 196                writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
 197
 198                writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
 199                writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
 200                writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
 201        }
 202
 203        /* Keep a copy of the cfg */
 204        memcpy(&chan->cfg, cfg, sizeof(*cfg));
 205        spin_unlock(&chan->lock);
 206
 207        return 0;
 208}
 209
 210static int chan_teardown(struct knav_dma_chan *chan)
 211{
 212        unsigned long end, value;
 213
 214        if (!chan->reg_chan)
 215                return 0;
 216
 217        /* indicate teardown */
 218        writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
 219
 220        /* wait for the dma to shut itself down */
 221        end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
 222        do {
 223                value = readl_relaxed(&chan->reg_chan->control);
 224                if ((value & DMA_ENABLE) == 0)
 225                        break;
 226        } while (time_after(end, jiffies));
 227
 228        if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
 229                dev_err(kdev->dev, "timeout waiting for teardown\n");
 230                return -ETIMEDOUT;
 231        }
 232
 233        return 0;
 234}
 235
 236static void chan_stop(struct knav_dma_chan *chan)
 237{
 238        spin_lock(&chan->lock);
 239        if (chan->reg_rx_flow) {
 240                /* first detach fdqs, starve out the flow */
 241                writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
 242                writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
 243                writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
 244                writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
 245                writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
 246        }
 247
 248        /* teardown the dma channel */
 249        chan_teardown(chan);
 250
 251        /* then disconnect the completion side */
 252        if (chan->reg_rx_flow) {
 253                writel_relaxed(0, &chan->reg_rx_flow->control);
 254                writel_relaxed(0, &chan->reg_rx_flow->tags);
 255                writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
 256        }
 257
 258        memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
 259        spin_unlock(&chan->lock);
 260
 261        dev_dbg(kdev->dev, "channel stopped\n");
 262}
 263
 264static void dma_hw_enable_all(struct knav_dma_device *dma)
 265{
 266        int i;
 267
 268        for (i = 0; i < dma->max_tx_chan; i++) {
 269                writel_relaxed(0, &dma->reg_tx_chan[i].mode);
 270                writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
 271        }
 272}
 273
 274
 275static void knav_dma_hw_init(struct knav_dma_device *dma)
 276{
 277        unsigned v;
 278        int i;
 279
 280        spin_lock(&dma->lock);
 281        v  = dma->loopback ? DMA_LOOPBACK : 0;
 282        writel_relaxed(v, &dma->reg_global->emulation_control);
 283
 284        v = readl_relaxed(&dma->reg_global->perf_control);
 285        v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
 286        writel_relaxed(v, &dma->reg_global->perf_control);
 287
 288        v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
 289             (dma->rx_priority << DMA_RX_PRIO_SHIFT));
 290
 291        writel_relaxed(v, &dma->reg_global->priority_control);
 292
 293        /* Always enable all Rx channels. Rx paths are managed using flows */
 294        for (i = 0; i < dma->max_rx_chan; i++)
 295                writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
 296
 297        for (i = 0; i < dma->logical_queue_managers; i++)
 298                writel_relaxed(dma->qm_base_address[i],
 299                               &dma->reg_global->qm_base_address[i]);
 300        spin_unlock(&dma->lock);
 301}
 302
 303static void knav_dma_hw_destroy(struct knav_dma_device *dma)
 304{
 305        int i;
 306        unsigned v;
 307
 308        spin_lock(&dma->lock);
 309        v = ~DMA_ENABLE & REG_MASK;
 310
 311        for (i = 0; i < dma->max_rx_chan; i++)
 312                writel_relaxed(v, &dma->reg_rx_chan[i].control);
 313
 314        for (i = 0; i < dma->max_tx_chan; i++)
 315                writel_relaxed(v, &dma->reg_tx_chan[i].control);
 316        spin_unlock(&dma->lock);
 317}
 318
 319static void dma_debug_show_channels(struct seq_file *s,
 320                                        struct knav_dma_chan *chan)
 321{
 322        int i;
 323
 324        seq_printf(s, "\t%s %d:\t",
 325                ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
 326                chan_number(chan));
 327
 328        if (chan->direction == DMA_MEM_TO_DEV) {
 329                seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
 330                        chan->cfg.u.tx.filt_einfo,
 331                        chan->cfg.u.tx.filt_pswords,
 332                        chan->cfg.u.tx.priority);
 333        } else {
 334                seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
 335                        chan->cfg.u.rx.einfo_present,
 336                        chan->cfg.u.rx.psinfo_present,
 337                        chan->cfg.u.rx.desc_type);
 338                seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
 339                        chan->cfg.u.rx.dst_q,
 340                        chan->cfg.u.rx.thresh);
 341                for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
 342                        seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
 343                seq_printf(s, "\n");
 344        }
 345}
 346
 347static void dma_debug_show_devices(struct seq_file *s,
 348                                        struct knav_dma_device *dma)
 349{
 350        struct knav_dma_chan *chan;
 351
 352        list_for_each_entry(chan, &dma->chan_list, list) {
 353                if (atomic_read(&chan->ref_count))
 354                        dma_debug_show_channels(s, chan);
 355        }
 356}
 357
 358static int knav_dma_debug_show(struct seq_file *s, void *v)
 359{
 360        struct knav_dma_device *dma;
 361
 362        list_for_each_entry(dma, &kdev->list, list) {
 363                if (atomic_read(&dma->ref_count)) {
 364                        seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
 365                        dma->name, dma->max_tx_chan, dma->max_rx_flow);
 366                        dma_debug_show_devices(s, dma);
 367                }
 368        }
 369
 370        return 0;
 371}
 372
 373DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
 374
 375static int of_channel_match_helper(struct device_node *np, const char *name,
 376                                        const char **dma_instance)
 377{
 378        struct of_phandle_args args;
 379        struct device_node *dma_node;
 380        int index;
 381
 382        dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
 383        if (!dma_node)
 384                return -ENODEV;
 385
 386        *dma_instance = dma_node->name;
 387        index = of_property_match_string(np, "ti,navigator-dma-names", name);
 388        if (index < 0) {
 389                dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
 390                return -ENODEV;
 391        }
 392
 393        if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
 394                                        1, index, &args)) {
 395                dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
 396                return -ENODEV;
 397        }
 398
 399        if (args.args[0] < 0) {
 400                dev_err(kdev->dev, "Missing args for %s\n", name);
 401                return -ENODEV;
 402        }
 403
 404        return args.args[0];
 405}
 406
 407/**
 408 * knav_dma_open_channel() - try to setup an exclusive slave channel
 409 * @dev:        pointer to client device structure
 410 * @name:       slave channel name
 411 * @config:     dma configuration parameters
 412 *
 413 * Returns pointer to appropriate DMA channel on success or error.
 414 */
 415void *knav_dma_open_channel(struct device *dev, const char *name,
 416                                        struct knav_dma_cfg *config)
 417{
 418        struct knav_dma_chan *chan;
 419        struct knav_dma_device *dma;
 420        bool found = false;
 421        int chan_num = -1;
 422        const char *instance;
 423
 424        if (!kdev) {
 425                pr_err("keystone-navigator-dma driver not registered\n");
 426                return (void *)-EINVAL;
 427        }
 428
 429        chan_num = of_channel_match_helper(dev->of_node, name, &instance);
 430        if (chan_num < 0) {
 431                dev_err(kdev->dev, "No DMA instance with name %s\n", name);
 432                return (void *)-EINVAL;
 433        }
 434
 435        dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
 436                  config->direction == DMA_MEM_TO_DEV ? "transmit" :
 437                  config->direction == DMA_DEV_TO_MEM ? "receive"  :
 438                  "unknown", chan_num, instance);
 439
 440        if (config->direction != DMA_MEM_TO_DEV &&
 441            config->direction != DMA_DEV_TO_MEM) {
 442                dev_err(kdev->dev, "bad direction\n");
 443                return (void *)-EINVAL;
 444        }
 445
 446        /* Look for correct dma instance */
 447        list_for_each_entry(dma, &kdev->list, list) {
 448                if (!strcmp(dma->name, instance)) {
 449                        found = true;
 450                        break;
 451                }
 452        }
 453        if (!found) {
 454                dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
 455                return (void *)-EINVAL;
 456        }
 457
 458        /* Look for correct dma channel from dma instance */
 459        found = false;
 460        list_for_each_entry(chan, &dma->chan_list, list) {
 461                if (config->direction == DMA_MEM_TO_DEV) {
 462                        if (chan->channel == chan_num) {
 463                                found = true;
 464                                break;
 465                        }
 466                } else {
 467                        if (chan->flow == chan_num) {
 468                                found = true;
 469                                break;
 470                        }
 471                }
 472        }
 473        if (!found) {
 474                dev_err(kdev->dev, "channel %d is not in DMA %s\n",
 475                                chan_num, instance);
 476                return (void *)-EINVAL;
 477        }
 478
 479        if (atomic_read(&chan->ref_count) >= 1) {
 480                if (!check_config(chan, config)) {
 481                        dev_err(kdev->dev, "channel %d config miss-match\n",
 482                                chan_num);
 483                        return (void *)-EINVAL;
 484                }
 485        }
 486
 487        if (atomic_inc_return(&chan->dma->ref_count) <= 1)
 488                knav_dma_hw_init(chan->dma);
 489
 490        if (atomic_inc_return(&chan->ref_count) <= 1)
 491                chan_start(chan, config);
 492
 493        dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
 494                                chan_num, instance);
 495
 496        return chan;
 497}
 498EXPORT_SYMBOL_GPL(knav_dma_open_channel);
 499
 500/**
 501 * knav_dma_close_channel()     - Destroy a dma channel
 502 *
 503 * @channel:    dma channel handle
 504 *
 505 */
 506void knav_dma_close_channel(void *channel)
 507{
 508        struct knav_dma_chan *chan = channel;
 509
 510        if (!kdev) {
 511                pr_err("keystone-navigator-dma driver not registered\n");
 512                return;
 513        }
 514
 515        if (atomic_dec_return(&chan->ref_count) <= 0)
 516                chan_stop(chan);
 517
 518        if (atomic_dec_return(&chan->dma->ref_count) <= 0)
 519                knav_dma_hw_destroy(chan->dma);
 520
 521        dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
 522                        chan->channel, chan->flow, chan->dma->name);
 523}
 524EXPORT_SYMBOL_GPL(knav_dma_close_channel);
 525
 526static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
 527                                struct device_node *node,
 528                                unsigned index, resource_size_t *_size)
 529{
 530        struct device *dev = kdev->dev;
 531        struct resource res;
 532        void __iomem *regs;
 533        int ret;
 534
 535        ret = of_address_to_resource(node, index, &res);
 536        if (ret) {
 537                dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
 538                        node, index);
 539                return ERR_PTR(ret);
 540        }
 541
 542        regs = devm_ioremap_resource(kdev->dev, &res);
 543        if (IS_ERR(regs))
 544                dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
 545                        index, node);
 546        if (_size)
 547                *_size = resource_size(&res);
 548
 549        return regs;
 550}
 551
 552static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
 553{
 554        struct knav_dma_device *dma = chan->dma;
 555
 556        chan->flow = flow;
 557        chan->reg_rx_flow = dma->reg_rx_flow + flow;
 558        chan->channel = DMA_INVALID_ID;
 559        dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
 560
 561        return 0;
 562}
 563
 564static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
 565{
 566        struct knav_dma_device *dma = chan->dma;
 567
 568        chan->channel = channel;
 569        chan->reg_chan = dma->reg_tx_chan + channel;
 570        chan->reg_tx_sched = dma->reg_tx_sched + channel;
 571        chan->flow = DMA_INVALID_ID;
 572        dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
 573
 574        return 0;
 575}
 576
 577static int pktdma_init_chan(struct knav_dma_device *dma,
 578                                enum dma_transfer_direction dir,
 579                                unsigned chan_num)
 580{
 581        struct device *dev = kdev->dev;
 582        struct knav_dma_chan *chan;
 583        int ret = -EINVAL;
 584
 585        chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
 586        if (!chan)
 587                return -ENOMEM;
 588
 589        INIT_LIST_HEAD(&chan->list);
 590        chan->dma       = dma;
 591        chan->direction = DMA_TRANS_NONE;
 592        atomic_set(&chan->ref_count, 0);
 593        spin_lock_init(&chan->lock);
 594
 595        if (dir == DMA_MEM_TO_DEV) {
 596                chan->direction = dir;
 597                ret = pktdma_init_tx_chan(chan, chan_num);
 598        } else if (dir == DMA_DEV_TO_MEM) {
 599                chan->direction = dir;
 600                ret = pktdma_init_rx_chan(chan, chan_num);
 601        } else {
 602                dev_err(dev, "channel(%d) direction unknown\n", chan_num);
 603        }
 604
 605        list_add_tail(&chan->list, &dma->chan_list);
 606
 607        return ret;
 608}
 609
 610static int dma_init(struct device_node *cloud, struct device_node *dma_node)
 611{
 612        unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
 613        struct device_node *node = dma_node;
 614        struct knav_dma_device *dma;
 615        int ret, len, num_chan = 0;
 616        resource_size_t size;
 617        u32 timeout;
 618        u32 i;
 619
 620        dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
 621        if (!dma) {
 622                dev_err(kdev->dev, "could not allocate driver mem\n");
 623                return -ENOMEM;
 624        }
 625        INIT_LIST_HEAD(&dma->list);
 626        INIT_LIST_HEAD(&dma->chan_list);
 627
 628        if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
 629                dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
 630                return -ENODEV;
 631        }
 632
 633        dma->logical_queue_managers = len / sizeof(u32);
 634        if (dma->logical_queue_managers > DMA_MAX_QMS) {
 635                dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
 636                         dma->logical_queue_managers);
 637                dma->logical_queue_managers = DMA_MAX_QMS;
 638        }
 639
 640        ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
 641                                        dma->qm_base_address,
 642                                        dma->logical_queue_managers);
 643        if (ret) {
 644                dev_err(kdev->dev, "invalid navigator cloud addresses\n");
 645                return -ENODEV;
 646        }
 647
 648        dma->reg_global  = pktdma_get_regs(dma, node, 0, &size);
 649        if (!dma->reg_global)
 650                return -ENODEV;
 651        if (size < sizeof(struct reg_global)) {
 652                dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
 653                return -ENODEV;
 654        }
 655
 656        dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
 657        if (!dma->reg_tx_chan)
 658                return -ENODEV;
 659
 660        max_tx_chan = size / sizeof(struct reg_chan);
 661        dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
 662        if (!dma->reg_rx_chan)
 663                return -ENODEV;
 664
 665        max_rx_chan = size / sizeof(struct reg_chan);
 666        dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
 667        if (!dma->reg_tx_sched)
 668                return -ENODEV;
 669
 670        max_tx_sched = size / sizeof(struct reg_tx_sched);
 671        dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
 672        if (!dma->reg_rx_flow)
 673                return -ENODEV;
 674
 675        max_rx_flow = size / sizeof(struct reg_rx_flow);
 676        dma->rx_priority = DMA_PRIO_DEFAULT;
 677        dma->tx_priority = DMA_PRIO_DEFAULT;
 678
 679        dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
 680        dma->loopback   = (of_get_property(node, "ti,loop-back",  NULL) != NULL);
 681
 682        ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
 683        if (ret < 0) {
 684                dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
 685                        DMA_RX_TIMEOUT_DEFAULT);
 686                timeout = DMA_RX_TIMEOUT_DEFAULT;
 687        }
 688
 689        dma->rx_timeout = timeout;
 690        dma->max_rx_chan = max_rx_chan;
 691        dma->max_rx_flow = max_rx_flow;
 692        dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
 693        atomic_set(&dma->ref_count, 0);
 694        strcpy(dma->name, node->name);
 695        spin_lock_init(&dma->lock);
 696
 697        for (i = 0; i < dma->max_tx_chan; i++) {
 698                if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
 699                        num_chan++;
 700        }
 701
 702        for (i = 0; i < dma->max_rx_flow; i++) {
 703                if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
 704                        num_chan++;
 705        }
 706
 707        list_add_tail(&dma->list, &kdev->list);
 708
 709        /*
 710         * For DSP software usecases or userpace transport software, setup all
 711         * the DMA hardware resources.
 712         */
 713        if (dma->enable_all) {
 714                atomic_inc(&dma->ref_count);
 715                knav_dma_hw_init(dma);
 716                dma_hw_enable_all(dma);
 717        }
 718
 719        dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
 720                dma->name, num_chan, dma->max_rx_flow,
 721                dma->max_tx_chan, dma->max_rx_chan,
 722                dma->loopback ? ", loopback" : "");
 723
 724        return 0;
 725}
 726
 727static int knav_dma_probe(struct platform_device *pdev)
 728{
 729        struct device *dev = &pdev->dev;
 730        struct device_node *node = pdev->dev.of_node;
 731        struct device_node *child;
 732        int ret = 0;
 733
 734        if (!node) {
 735                dev_err(&pdev->dev, "could not find device info\n");
 736                return -EINVAL;
 737        }
 738
 739        kdev = devm_kzalloc(dev,
 740                        sizeof(struct knav_dma_pool_device), GFP_KERNEL);
 741        if (!kdev) {
 742                dev_err(dev, "could not allocate driver mem\n");
 743                return -ENOMEM;
 744        }
 745
 746        kdev->dev = dev;
 747        INIT_LIST_HEAD(&kdev->list);
 748
 749        pm_runtime_enable(kdev->dev);
 750        ret = pm_runtime_get_sync(kdev->dev);
 751        if (ret < 0) {
 752                pm_runtime_put_noidle(kdev->dev);
 753                dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
 754                goto err_pm_disable;
 755        }
 756
 757        /* Initialise all packet dmas */
 758        for_each_child_of_node(node, child) {
 759                ret = dma_init(node, child);
 760                if (ret) {
 761                        of_node_put(child);
 762                        dev_err(&pdev->dev, "init failed with %d\n", ret);
 763                        break;
 764                }
 765        }
 766
 767        if (list_empty(&kdev->list)) {
 768                dev_err(dev, "no valid dma instance\n");
 769                ret = -ENODEV;
 770                goto err_put_sync;
 771        }
 772
 773        debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
 774                            &knav_dma_debug_fops);
 775
 776        device_ready = true;
 777        return ret;
 778
 779err_put_sync:
 780        pm_runtime_put_sync(kdev->dev);
 781err_pm_disable:
 782        pm_runtime_disable(kdev->dev);
 783
 784        return ret;
 785}
 786
 787static int knav_dma_remove(struct platform_device *pdev)
 788{
 789        struct knav_dma_device *dma;
 790
 791        list_for_each_entry(dma, &kdev->list, list) {
 792                if (atomic_dec_return(&dma->ref_count) == 0)
 793                        knav_dma_hw_destroy(dma);
 794        }
 795
 796        pm_runtime_put_sync(&pdev->dev);
 797        pm_runtime_disable(&pdev->dev);
 798
 799        return 0;
 800}
 801
 802static struct of_device_id of_match[] = {
 803        { .compatible = "ti,keystone-navigator-dma", },
 804        {},
 805};
 806
 807MODULE_DEVICE_TABLE(of, of_match);
 808
 809static struct platform_driver knav_dma_driver = {
 810        .probe  = knav_dma_probe,
 811        .remove = knav_dma_remove,
 812        .driver = {
 813                .name           = "keystone-navigator-dma",
 814                .of_match_table = of_match,
 815        },
 816};
 817module_platform_driver(knav_dma_driver);
 818
 819MODULE_LICENSE("GPL v2");
 820MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
 821MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
 822MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
 823
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.