linux/drivers/dma/dmaengine.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
  48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  49
  50#include <linux/dma-mapping.h>
  51#include <linux/init.h>
  52#include <linux/module.h>
  53#include <linux/mm.h>
  54#include <linux/device.h>
  55#include <linux/dmaengine.h>
  56#include <linux/hardirq.h>
  57#include <linux/spinlock.h>
  58#include <linux/percpu.h>
  59#include <linux/rcupdate.h>
  60#include <linux/mutex.h>
  61#include <linux/jiffies.h>
  62#include <linux/rculist.h>
  63#include <linux/idr.h>
  64#include <linux/slab.h>
  65#include <linux/acpi.h>
  66#include <linux/acpi_dma.h>
  67#include <linux/of_dma.h>
  68
  69static DEFINE_MUTEX(dma_list_mutex);
  70static DEFINE_IDR(dma_idr);
  71static LIST_HEAD(dma_device_list);
  72static long dmaengine_ref_count;
  73
  74/* --- sysfs implementation --- */
  75
  76/**
  77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  78 * @dev - device node
  79 *
  80 * Must be called under dma_list_mutex
  81 */
  82static struct dma_chan *dev_to_dma_chan(struct device *dev)
  83{
  84        struct dma_chan_dev *chan_dev;
  85
  86        chan_dev = container_of(dev, typeof(*chan_dev), device);
  87        return chan_dev->chan;
  88}
  89
  90static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
  91{
  92        struct dma_chan *chan;
  93        unsigned long count = 0;
  94        int i;
  95        int err;
  96
  97        mutex_lock(&dma_list_mutex);
  98        chan = dev_to_dma_chan(dev);
  99        if (chan) {
 100                for_each_possible_cpu(i)
 101                        count += per_cpu_ptr(chan->local, i)->memcpy_count;
 102                err = sprintf(buf, "%lu\n", count);
 103        } else
 104                err = -ENODEV;
 105        mutex_unlock(&dma_list_mutex);
 106
 107        return err;
 108}
 109
 110static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 111                                      char *buf)
 112{
 113        struct dma_chan *chan;
 114        unsigned long count = 0;
 115        int i;
 116        int err;
 117
 118        mutex_lock(&dma_list_mutex);
 119        chan = dev_to_dma_chan(dev);
 120        if (chan) {
 121                for_each_possible_cpu(i)
 122                        count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 123                err = sprintf(buf, "%lu\n", count);
 124        } else
 125                err = -ENODEV;
 126        mutex_unlock(&dma_list_mutex);
 127
 128        return err;
 129}
 130
 131static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 132{
 133        struct dma_chan *chan;
 134        int err;
 135
 136        mutex_lock(&dma_list_mutex);
 137        chan = dev_to_dma_chan(dev);
 138        if (chan)
 139                err = sprintf(buf, "%d\n", chan->client_count);
 140        else
 141                err = -ENODEV;
 142        mutex_unlock(&dma_list_mutex);
 143
 144        return err;
 145}
 146
 147static struct device_attribute dma_attrs[] = {
 148        __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 149        __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 150        __ATTR(in_use, S_IRUGO, show_in_use, NULL),
 151        __ATTR_NULL
 152};
 153
 154static void chan_dev_release(struct device *dev)
 155{
 156        struct dma_chan_dev *chan_dev;
 157
 158        chan_dev = container_of(dev, typeof(*chan_dev), device);
 159        if (atomic_dec_and_test(chan_dev->idr_ref)) {
 160                mutex_lock(&dma_list_mutex);
 161                idr_remove(&dma_idr, chan_dev->dev_id);
 162                mutex_unlock(&dma_list_mutex);
 163                kfree(chan_dev->idr_ref);
 164        }
 165        kfree(chan_dev);
 166}
 167
 168static struct class dma_devclass = {
 169        .name           = "dma",
 170        .dev_attrs      = dma_attrs,
 171        .dev_release    = chan_dev_release,
 172};
 173
 174/* --- client and device registration --- */
 175
 176#define dma_device_satisfies_mask(device, mask) \
 177        __dma_device_satisfies_mask((device), &(mask))
 178static int
 179__dma_device_satisfies_mask(struct dma_device *device,
 180                            const dma_cap_mask_t *want)
 181{
 182        dma_cap_mask_t has;
 183
 184        bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 185                DMA_TX_TYPE_END);
 186        return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 187}
 188
 189static struct module *dma_chan_to_owner(struct dma_chan *chan)
 190{
 191        return chan->device->dev->driver->owner;
 192}
 193
 194/**
 195 * balance_ref_count - catch up the channel reference count
 196 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 197 *
 198 * balance_ref_count must be called under dma_list_mutex
 199 */
 200static void balance_ref_count(struct dma_chan *chan)
 201{
 202        struct module *owner = dma_chan_to_owner(chan);
 203
 204        while (chan->client_count < dmaengine_ref_count) {
 205                __module_get(owner);
 206                chan->client_count++;
 207        }
 208}
 209
 210/**
 211 * dma_chan_get - try to grab a dma channel's parent driver module
 212 * @chan - channel to grab
 213 *
 214 * Must be called under dma_list_mutex
 215 */
 216static int dma_chan_get(struct dma_chan *chan)
 217{
 218        int err = -ENODEV;
 219        struct module *owner = dma_chan_to_owner(chan);
 220
 221        if (chan->client_count) {
 222                __module_get(owner);
 223                err = 0;
 224        } else if (try_module_get(owner))
 225                err = 0;
 226
 227        if (err == 0)
 228                chan->client_count++;
 229
 230        /* allocate upon first client reference */
 231        if (chan->client_count == 1 && err == 0) {
 232                int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 233
 234                if (desc_cnt < 0) {
 235                        err = desc_cnt;
 236                        chan->client_count = 0;
 237                        module_put(owner);
 238                } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 239                        balance_ref_count(chan);
 240        }
 241
 242        return err;
 243}
 244
 245/**
 246 * dma_chan_put - drop a reference to a dma channel's parent driver module
 247 * @chan - channel to release
 248 *
 249 * Must be called under dma_list_mutex
 250 */
 251static void dma_chan_put(struct dma_chan *chan)
 252{
 253        if (!chan->client_count)
 254                return; /* this channel failed alloc_chan_resources */
 255        chan->client_count--;
 256        module_put(dma_chan_to_owner(chan));
 257        if (chan->client_count == 0)
 258                chan->device->device_free_chan_resources(chan);
 259}
 260
 261enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 262{
 263        enum dma_status status;
 264        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 265
 266        dma_async_issue_pending(chan);
 267        do {
 268                status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 269                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 270                        pr_err("%s: timeout!\n", __func__);
 271                        return DMA_ERROR;
 272                }
 273                if (status != DMA_IN_PROGRESS)
 274                        break;
 275                cpu_relax();
 276        } while (1);
 277
 278        return status;
 279}
 280EXPORT_SYMBOL(dma_sync_wait);
 281
 282/**
 283 * dma_cap_mask_all - enable iteration over all operation types
 284 */
 285static dma_cap_mask_t dma_cap_mask_all;
 286
 287/**
 288 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 289 * @chan - associated channel for this entry
 290 */
 291struct dma_chan_tbl_ent {
 292        struct dma_chan *chan;
 293};
 294
 295/**
 296 * channel_table - percpu lookup table for memory-to-memory offload providers
 297 */
 298static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 299
 300static int __init dma_channel_table_init(void)
 301{
 302        enum dma_transaction_type cap;
 303        int err = 0;
 304
 305        bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 306
 307        /* 'interrupt', 'private', and 'slave' are channel capabilities,
 308         * but are not associated with an operation so they do not need
 309         * an entry in the channel_table
 310         */
 311        clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 312        clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 313        clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 314
 315        for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 316                channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 317                if (!channel_table[cap]) {
 318                        err = -ENOMEM;
 319                        break;
 320                }
 321        }
 322
 323        if (err) {
 324                pr_err("initialization failure\n");
 325                for_each_dma_cap_mask(cap, dma_cap_mask_all)
 326                        if (channel_table[cap])
 327                                free_percpu(channel_table[cap]);
 328        }
 329
 330        return err;
 331}
 332arch_initcall(dma_channel_table_init);
 333
 334/**
 335 * dma_find_channel - find a channel to carry out the operation
 336 * @tx_type: transaction type
 337 */
 338struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 339{
 340        return this_cpu_read(channel_table[tx_type]->chan);
 341}
 342EXPORT_SYMBOL(dma_find_channel);
 343
 344/*
 345 * net_dma_find_channel - find a channel for net_dma
 346 * net_dma has alignment requirements
 347 */
 348struct dma_chan *net_dma_find_channel(void)
 349{
 350        struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
 351        if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
 352                return NULL;
 353
 354        return chan;
 355}
 356EXPORT_SYMBOL(net_dma_find_channel);
 357
 358/**
 359 * dma_issue_pending_all - flush all pending operations across all channels
 360 */
 361void dma_issue_pending_all(void)
 362{
 363        struct dma_device *device;
 364        struct dma_chan *chan;
 365
 366        rcu_read_lock();
 367        list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 368                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 369                        continue;
 370                list_for_each_entry(chan, &device->channels, device_node)
 371                        if (chan->client_count)
 372                                device->device_issue_pending(chan);
 373        }
 374        rcu_read_unlock();
 375}
 376EXPORT_SYMBOL(dma_issue_pending_all);
 377
 378/**
 379 * nth_chan - returns the nth channel of the given capability
 380 * @cap: capability to match
 381 * @n: nth channel desired
 382 *
 383 * Defaults to returning the channel with the desired capability and the
 384 * lowest reference count when 'n' cannot be satisfied.  Must be called
 385 * under dma_list_mutex.
 386 */
 387static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 388{
 389        struct dma_device *device;
 390        struct dma_chan *chan;
 391        struct dma_chan *ret = NULL;
 392        struct dma_chan *min = NULL;
 393
 394        list_for_each_entry(device, &dma_device_list, global_node) {
 395                if (!dma_has_cap(cap, device->cap_mask) ||
 396                    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 397                        continue;
 398                list_for_each_entry(chan, &device->channels, device_node) {
 399                        if (!chan->client_count)
 400                                continue;
 401                        if (!min)
 402                                min = chan;
 403                        else if (chan->table_count < min->table_count)
 404                                min = chan;
 405
 406                        if (n-- == 0) {
 407                                ret = chan;
 408                                break; /* done */
 409                        }
 410                }
 411                if (ret)
 412                        break; /* done */
 413        }
 414
 415        if (!ret)
 416                ret = min;
 417
 418        if (ret)
 419                ret->table_count++;
 420
 421        return ret;
 422}
 423
 424/**
 425 * dma_channel_rebalance - redistribute the available channels
 426 *
 427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 428 * operation type) in the SMP case,  and operation isolation (avoid
 429 * multi-tasking channels) in the non-SMP case.  Must be called under
 430 * dma_list_mutex.
 431 */
 432static void dma_channel_rebalance(void)
 433{
 434        struct dma_chan *chan;
 435        struct dma_device *device;
 436        int cpu;
 437        int cap;
 438        int n;
 439
 440        /* undo the last distribution */
 441        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 442                for_each_possible_cpu(cpu)
 443                        per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 444
 445        list_for_each_entry(device, &dma_device_list, global_node) {
 446                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 447                        continue;
 448                list_for_each_entry(chan, &device->channels, device_node)
 449                        chan->table_count = 0;
 450        }
 451
 452        /* don't populate the channel_table if no clients are available */
 453        if (!dmaengine_ref_count)
 454                return;
 455
 456        /* redistribute available channels */
 457        n = 0;
 458        for_each_dma_cap_mask(cap, dma_cap_mask_all)
 459                for_each_online_cpu(cpu) {
 460                        if (num_possible_cpus() > 1)
 461                                chan = nth_chan(cap, n++);
 462                        else
 463                                chan = nth_chan(cap, -1);
 464
 465                        per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 466                }
 467}
 468
 469static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 470                                          struct dma_device *dev,
 471                                          dma_filter_fn fn, void *fn_param)
 472{
 473        struct dma_chan *chan;
 474
 475        if (!__dma_device_satisfies_mask(dev, mask)) {
 476                pr_debug("%s: wrong capabilities\n", __func__);
 477                return NULL;
 478        }
 479        /* devices with multiple channels need special handling as we need to
 480         * ensure that all channels are either private or public.
 481         */
 482        if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 483                list_for_each_entry(chan, &dev->channels, device_node) {
 484                        /* some channels are already publicly allocated */
 485                        if (chan->client_count)
 486                                return NULL;
 487                }
 488
 489        list_for_each_entry(chan, &dev->channels, device_node) {
 490                if (chan->client_count) {
 491                        pr_debug("%s: %s busy\n",
 492                                 __func__, dma_chan_name(chan));
 493                        continue;
 494                }
 495                if (fn && !fn(chan, fn_param)) {
 496                        pr_debug("%s: %s filter said false\n",
 497                                 __func__, dma_chan_name(chan));
 498                        continue;
 499                }
 500                return chan;
 501        }
 502
 503        return NULL;
 504}
 505
 506/**
 507 * dma_request_channel - try to allocate an exclusive channel
 508 * @mask: capabilities that the channel must satisfy
 509 * @fn: optional callback to disposition available channels
 510 * @fn_param: opaque parameter to pass to dma_filter_fn
 511 */
 512struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 513                                       dma_filter_fn fn, void *fn_param)
 514{
 515        struct dma_device *device, *_d;
 516        struct dma_chan *chan = NULL;
 517        int err;
 518
 519        /* Find a channel */
 520        mutex_lock(&dma_list_mutex);
 521        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 522                chan = private_candidate(mask, device, fn, fn_param);
 523                if (chan) {
 524                        /* Found a suitable channel, try to grab, prep, and
 525                         * return it.  We first set DMA_PRIVATE to disable
 526                         * balance_ref_count as this channel will not be
 527                         * published in the general-purpose allocator
 528                         */
 529                        dma_cap_set(DMA_PRIVATE, device->cap_mask);
 530                        device->privatecnt++;
 531                        err = dma_chan_get(chan);
 532
 533                        if (err == -ENODEV) {
 534                                pr_debug("%s: %s module removed\n",
 535                                         __func__, dma_chan_name(chan));
 536                                list_del_rcu(&device->global_node);
 537                        } else if (err)
 538                                pr_debug("%s: failed to get %s: (%d)\n",
 539                                         __func__, dma_chan_name(chan), err);
 540                        else
 541                                break;
 542                        if (--device->privatecnt == 0)
 543                                dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 544                        chan = NULL;
 545                }
 546        }
 547        mutex_unlock(&dma_list_mutex);
 548
 549        pr_debug("%s: %s (%s)\n",
 550                 __func__,
 551                 chan ? "success" : "fail",
 552                 chan ? dma_chan_name(chan) : NULL);
 553
 554        return chan;
 555}
 556EXPORT_SYMBOL_GPL(__dma_request_channel);
 557
 558/**
 559 * dma_request_slave_channel - try to allocate an exclusive slave channel
 560 * @dev:        pointer to client device structure
 561 * @name:       slave channel name
 562 */
 563struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
 564{
 565        /* If device-tree is present get slave info from here */
 566        if (dev->of_node)
 567                return of_dma_request_slave_channel(dev->of_node, name);
 568
 569        /* If device was enumerated by ACPI get slave info from here */
 570        if (ACPI_HANDLE(dev))
 571                return acpi_dma_request_slave_chan_by_name(dev, name);
 572
 573        return NULL;
 574}
 575EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 576
 577void dma_release_channel(struct dma_chan *chan)
 578{
 579        mutex_lock(&dma_list_mutex);
 580        WARN_ONCE(chan->client_count != 1,
 581                  "chan reference count %d != 1\n", chan->client_count);
 582        dma_chan_put(chan);
 583        /* drop PRIVATE cap enabled by __dma_request_channel() */
 584        if (--chan->device->privatecnt == 0)
 585                dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 586        mutex_unlock(&dma_list_mutex);
 587}
 588EXPORT_SYMBOL_GPL(dma_release_channel);
 589
 590/**
 591 * dmaengine_get - register interest in dma_channels
 592 */
 593void dmaengine_get(void)
 594{
 595        struct dma_device *device, *_d;
 596        struct dma_chan *chan;
 597        int err;
 598
 599        mutex_lock(&dma_list_mutex);
 600        dmaengine_ref_count++;
 601
 602        /* try to grab channels */
 603        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 604                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 605                        continue;
 606                list_for_each_entry(chan, &device->channels, device_node) {
 607                        err = dma_chan_get(chan);
 608                        if (err == -ENODEV) {
 609                                /* module removed before we could use it */
 610                                list_del_rcu(&device->global_node);
 611                                break;
 612                        } else if (err)
 613                                pr_debug("%s: failed to get %s: (%d)\n",
 614                                       __func__, dma_chan_name(chan), err);
 615                }
 616        }
 617
 618        /* if this is the first reference and there were channels
 619         * waiting we need to rebalance to get those channels
 620         * incorporated into the channel table
 621         */
 622        if (dmaengine_ref_count == 1)
 623                dma_channel_rebalance();
 624        mutex_unlock(&dma_list_mutex);
 625}
 626EXPORT_SYMBOL(dmaengine_get);
 627
 628/**
 629 * dmaengine_put - let dma drivers be removed when ref_count == 0
 630 */
 631void dmaengine_put(void)
 632{
 633        struct dma_device *device;
 634        struct dma_chan *chan;
 635
 636        mutex_lock(&dma_list_mutex);
 637        dmaengine_ref_count--;
 638        BUG_ON(dmaengine_ref_count < 0);
 639        /* drop channel references */
 640        list_for_each_entry(device, &dma_device_list, global_node) {
 641                if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 642                        continue;
 643                list_for_each_entry(chan, &device->channels, device_node)
 644                        dma_chan_put(chan);
 645        }
 646        mutex_unlock(&dma_list_mutex);
 647}
 648EXPORT_SYMBOL(dmaengine_put);
 649
 650static bool device_has_all_tx_types(struct dma_device *device)
 651{
 652        /* A device that satisfies this test has channels that will never cause
 653         * an async_tx channel switch event as all possible operation types can
 654         * be handled.
 655         */
 656        #ifdef CONFIG_ASYNC_TX_DMA
 657        if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 658                return false;
 659        #endif
 660
 661        #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 662        if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 663                return false;
 664        #endif
 665
 666        #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 667        if (!dma_has_cap(DMA_XOR, device->cap_mask))
 668                return false;
 669
 670        #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 671        if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 672                return false;
 673        #endif
 674        #endif
 675
 676        #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 677        if (!dma_has_cap(DMA_PQ, device->cap_mask))
 678                return false;
 679
 680        #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 681        if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 682                return false;
 683        #endif
 684        #endif
 685
 686        return true;
 687}
 688
 689static int get_dma_id(struct dma_device *device)
 690{
 691        int rc;
 692
 693        mutex_lock(&dma_list_mutex);
 694
 695        rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
 696        if (rc >= 0)
 697                device->dev_id = rc;
 698
 699        mutex_unlock(&dma_list_mutex);
 700        return rc < 0 ? rc : 0;
 701}
 702
 703/**
 704 * dma_async_device_register - registers DMA devices found
 705 * @device: &dma_device
 706 */
 707int dma_async_device_register(struct dma_device *device)
 708{
 709        int chancnt = 0, rc;
 710        struct dma_chan* chan;
 711        atomic_t *idr_ref;
 712
 713        if (!device)
 714                return -ENODEV;
 715
 716        /* validate device routines */
 717        BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 718                !device->device_prep_dma_memcpy);
 719        BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 720                !device->device_prep_dma_xor);
 721        BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 722                !device->device_prep_dma_xor_val);
 723        BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 724                !device->device_prep_dma_pq);
 725        BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 726                !device->device_prep_dma_pq_val);
 727        BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 728                !device->device_prep_dma_interrupt);
 729        BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 730                !device->device_prep_dma_sg);
 731        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 732                !device->device_prep_dma_cyclic);
 733        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 734                !device->device_control);
 735        BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 736                !device->device_prep_interleaved_dma);
 737
 738        BUG_ON(!device->device_alloc_chan_resources);
 739        BUG_ON(!device->device_free_chan_resources);
 740        BUG_ON(!device->device_tx_status);
 741        BUG_ON(!device->device_issue_pending);
 742        BUG_ON(!device->dev);
 743
 744        /* note: this only matters in the
 745         * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 746         */
 747        if (device_has_all_tx_types(device))
 748                dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 749
 750        idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 751        if (!idr_ref)
 752                return -ENOMEM;
 753        rc = get_dma_id(device);
 754        if (rc != 0) {
 755                kfree(idr_ref);
 756                return rc;
 757        }
 758
 759        atomic_set(idr_ref, 0);
 760
 761        /* represent channels in sysfs. Probably want devs too */
 762        list_for_each_entry(chan, &device->channels, device_node) {
 763                rc = -ENOMEM;
 764                chan->local = alloc_percpu(typeof(*chan->local));
 765                if (chan->local == NULL)
 766                        goto err_out;
 767                chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 768                if (chan->dev == NULL) {
 769                        free_percpu(chan->local);
 770                        chan->local = NULL;
 771                        goto err_out;
 772                }
 773
 774                chan->chan_id = chancnt++;
 775                chan->dev->device.class = &dma_devclass;
 776                chan->dev->device.parent = device->dev;
 777                chan->dev->chan = chan;
 778                chan->dev->idr_ref = idr_ref;
 779                chan->dev->dev_id = device->dev_id;
 780                atomic_inc(idr_ref);
 781                dev_set_name(&chan->dev->device, "dma%dchan%d",
 782                             device->dev_id, chan->chan_id);
 783
 784                rc = device_register(&chan->dev->device);
 785                if (rc) {
 786                        free_percpu(chan->local);
 787                        chan->local = NULL;
 788                        kfree(chan->dev);
 789                        atomic_dec(idr_ref);
 790                        goto err_out;
 791                }
 792                chan->client_count = 0;
 793        }
 794        device->chancnt = chancnt;
 795
 796        mutex_lock(&dma_list_mutex);
 797        /* take references on public channels */
 798        if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 799                list_for_each_entry(chan, &device->channels, device_node) {
 800                        /* if clients are already waiting for channels we need
 801                         * to take references on their behalf
 802                         */
 803                        if (dma_chan_get(chan) == -ENODEV) {
 804                                /* note we can only get here for the first
 805                                 * channel as the remaining channels are
 806                                 * guaranteed to get a reference
 807                                 */
 808                                rc = -ENODEV;
 809                                mutex_unlock(&dma_list_mutex);
 810                                goto err_out;
 811                        }
 812                }
 813        list_add_tail_rcu(&device->global_node, &dma_device_list);
 814        if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 815                device->privatecnt++;   /* Always private */
 816        dma_channel_rebalance();
 817        mutex_unlock(&dma_list_mutex);
 818
 819        return 0;
 820
 821err_out:
 822        /* if we never registered a channel just release the idr */
 823        if (atomic_read(idr_ref) == 0) {
 824                mutex_lock(&dma_list_mutex);
 825                idr_remove(&dma_idr, device->dev_id);
 826                mutex_unlock(&dma_list_mutex);
 827                kfree(idr_ref);
 828                return rc;
 829        }
 830
 831        list_for_each_entry(chan, &device->channels, device_node) {
 832                if (chan->local == NULL)
 833                        continue;
 834                mutex_lock(&dma_list_mutex);
 835                chan->dev->chan = NULL;
 836                mutex_unlock(&dma_list_mutex);
 837                device_unregister(&chan->dev->device);
 838                free_percpu(chan->local);
 839        }
 840        return rc;
 841}
 842EXPORT_SYMBOL(dma_async_device_register);
 843
 844/**
 845 * dma_async_device_unregister - unregister a DMA device
 846 * @device: &dma_device
 847 *
 848 * This routine is called by dma driver exit routines, dmaengine holds module
 849 * references to prevent it being called while channels are in use.
 850 */
 851void dma_async_device_unregister(struct dma_device *device)
 852{
 853        struct dma_chan *chan;
 854
 855        mutex_lock(&dma_list_mutex);
 856        list_del_rcu(&device->global_node);
 857        dma_channel_rebalance();
 858        mutex_unlock(&dma_list_mutex);
 859
 860        list_for_each_entry(chan, &device->channels, device_node) {
 861                WARN_ONCE(chan->client_count,
 862                          "%s called while %d clients hold a reference\n",
 863                          __func__, chan->client_count);
 864                mutex_lock(&dma_list_mutex);
 865                chan->dev->chan = NULL;
 866                mutex_unlock(&dma_list_mutex);
 867                device_unregister(&chan->dev->device);
 868                free_percpu(chan->local);
 869        }
 870}
 871EXPORT_SYMBOL(dma_async_device_unregister);
 872
 873/**
 874 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 875 * @chan: DMA channel to offload copy to
 876 * @dest: destination address (virtual)
 877 * @src: source address (virtual)
 878 * @len: length
 879 *
 880 * Both @dest and @src must be mappable to a bus address according to the
 881 * DMA mapping API rules for streaming mappings.
 882 * Both @dest and @src must stay memory resident (kernel memory or locked
 883 * user space pages).
 884 */
 885dma_cookie_t
 886dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 887                        void *src, size_t len)
 888{
 889        struct dma_device *dev = chan->device;
 890        struct dma_async_tx_descriptor *tx;
 891        dma_addr_t dma_dest, dma_src;
 892        dma_cookie_t cookie;
 893        unsigned long flags;
 894
 895        dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 896        dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
 897        flags = DMA_CTRL_ACK |
 898                DMA_COMPL_SRC_UNMAP_SINGLE |
 899                DMA_COMPL_DEST_UNMAP_SINGLE;
 900        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 901
 902        if (!tx) {
 903                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 904                dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 905                return -ENOMEM;
 906        }
 907
 908        tx->callback = NULL;
 909        cookie = tx->tx_submit(tx);
 910
 911        preempt_disable();
 912        __this_cpu_add(chan->local->bytes_transferred, len);
 913        __this_cpu_inc(chan->local->memcpy_count);
 914        preempt_enable();
 915
 916        return cookie;
 917}
 918EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 919
 920/**
 921 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 922 * @chan: DMA channel to offload copy to
 923 * @page: destination page
 924 * @offset: offset in page to copy to
 925 * @kdata: source address (virtual)
 926 * @len: length
 927 *
 928 * Both @page/@offset and @kdata must be mappable to a bus address according
 929 * to the DMA mapping API rules for streaming mappings.
 930 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 931 * locked user space pages)
 932 */
 933dma_cookie_t
 934dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 935                        unsigned int offset, void *kdata, size_t len)
 936{
 937        struct dma_device *dev = chan->device;
 938        struct dma_async_tx_descriptor *tx;
 939        dma_addr_t dma_dest, dma_src;
 940        dma_cookie_t cookie;
 941        unsigned long flags;
 942
 943        dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 944        dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 945        flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
 946        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 947
 948        if (!tx) {
 949                dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 950                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 951                return -ENOMEM;
 952        }
 953
 954        tx->callback = NULL;
 955        cookie = tx->tx_submit(tx);
 956
 957        preempt_disable();
 958        __this_cpu_add(chan->local->bytes_transferred, len);
 959        __this_cpu_inc(chan->local->memcpy_count);
 960        preempt_enable();
 961
 962        return cookie;
 963}
 964EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 965
 966/**
 967 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 968 * @chan: DMA channel to offload copy to
 969 * @dest_pg: destination page
 970 * @dest_off: offset in page to copy to
 971 * @src_pg: source page
 972 * @src_off: offset in page to copy from
 973 * @len: length
 974 *
 975 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 976 * address according to the DMA mapping API rules for streaming mappings.
 977 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 978 * (kernel memory or locked user space pages).
 979 */
 980dma_cookie_t
 981dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 982        unsigned int dest_off, struct page *src_pg, unsigned int src_off,
 983        size_t len)
 984{
 985        struct dma_device *dev = chan->device;
 986        struct dma_async_tx_descriptor *tx;
 987        dma_addr_t dma_dest, dma_src;
 988        dma_cookie_t cookie;
 989        unsigned long flags;
 990
 991        dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 992        dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 993                                DMA_FROM_DEVICE);
 994        flags = DMA_CTRL_ACK;
 995        tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 996
 997        if (!tx) {
 998                dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
 999                dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1000                return -ENOMEM;
1001        }
1002
1003        tx->callback = NULL;
1004        cookie = tx->tx_submit(tx);
1005
1006        preempt_disable();
1007        __this_cpu_add(chan->local->bytes_transferred, len);
1008        __this_cpu_inc(chan->local->memcpy_count);
1009        preempt_enable();
1010
1011        return cookie;
1012}
1013EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1014
1015void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1016        struct dma_chan *chan)
1017{
1018        tx->chan = chan;
1019        #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1020        spin_lock_init(&tx->lock);
1021        #endif
1022}
1023EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1024
1025/* dma_wait_for_async_tx - spin wait for a transaction to complete
1026 * @tx: in-flight transaction to wait on
1027 */
1028enum dma_status
1029dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1030{
1031        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1032
1033        if (!tx)
1034                return DMA_SUCCESS;
1035
1036        while (tx->cookie == -EBUSY) {
1037                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1038                        pr_err("%s timeout waiting for descriptor submission\n",
1039                               __func__);
1040                        return DMA_ERROR;
1041                }
1042                cpu_relax();
1043        }
1044        return dma_sync_wait(tx->chan, tx->cookie);
1045}
1046EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1047
1048/* dma_run_dependencies - helper routine for dma drivers to process
1049 *      (start) dependent operations on their target channel
1050 * @tx: transaction with dependencies
1051 */
1052void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1053{
1054        struct dma_async_tx_descriptor *dep = txd_next(tx);
1055        struct dma_async_tx_descriptor *dep_next;
1056        struct dma_chan *chan;
1057
1058        if (!dep)
1059                return;
1060
1061        /* we'll submit tx->next now, so clear the link */
1062        txd_clear_next(tx);
1063        chan = dep->chan;
1064
1065        /* keep submitting up until a channel switch is detected
1066         * in that case we will be called again as a result of
1067         * processing the interrupt from async_tx_channel_switch
1068         */
1069        for (; dep; dep = dep_next) {
1070                txd_lock(dep);
1071                txd_clear_parent(dep);
1072                dep_next = txd_next(dep);
1073                if (dep_next && dep_next->chan == chan)
1074                        txd_clear_next(dep); /* ->next will be submitted */
1075                else
1076                        dep_next = NULL; /* submit current dep and terminate */
1077                txd_unlock(dep);
1078
1079                dep->tx_submit(dep);
1080        }
1081
1082        chan->device->device_issue_pending(chan);
1083}
1084EXPORT_SYMBOL_GPL(dma_run_dependencies);
1085
1086static int __init dma_bus_init(void)
1087{
1088        return class_register(&dma_devclass);
1089}
1090arch_initcall(dma_bus_init);
1091
1092
1093
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.