linux/drivers/dma/omap-dma.c
<<
>>
Prefs
   1/*
   2 * OMAP DMAengine support
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8#include <linux/dmaengine.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/err.h>
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/list.h>
  14#include <linux/module.h>
  15#include <linux/omap-dma.h>
  16#include <linux/platform_device.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19
  20#include "virt-dma.h"
  21#include <plat/dma.h>
  22
  23struct omap_dmadev {
  24        struct dma_device ddev;
  25        spinlock_t lock;
  26        struct tasklet_struct task;
  27        struct list_head pending;
  28};
  29
  30struct omap_chan {
  31        struct virt_dma_chan vc;
  32        struct list_head node;
  33
  34        struct dma_slave_config cfg;
  35        unsigned dma_sig;
  36        bool cyclic;
  37
  38        int dma_ch;
  39        struct omap_desc *desc;
  40        unsigned sgidx;
  41};
  42
  43struct omap_sg {
  44        dma_addr_t addr;
  45        uint32_t en;            /* number of elements (24-bit) */
  46        uint32_t fn;            /* number of frames (16-bit) */
  47};
  48
  49struct omap_desc {
  50        struct virt_dma_desc vd;
  51        enum dma_transfer_direction dir;
  52        dma_addr_t dev_addr;
  53
  54        int16_t fi;             /* for OMAP_DMA_SYNC_PACKET */
  55        uint8_t es;             /* OMAP_DMA_DATA_TYPE_xxx */
  56        uint8_t sync_mode;      /* OMAP_DMA_SYNC_xxx */
  57        uint8_t sync_type;      /* OMAP_DMA_xxx_SYNC* */
  58        uint8_t periph_port;    /* Peripheral port */
  59
  60        unsigned sglen;
  61        struct omap_sg sg[0];
  62};
  63
  64static const unsigned es_bytes[] = {
  65        [OMAP_DMA_DATA_TYPE_S8] = 1,
  66        [OMAP_DMA_DATA_TYPE_S16] = 2,
  67        [OMAP_DMA_DATA_TYPE_S32] = 4,
  68};
  69
  70static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
  71{
  72        return container_of(d, struct omap_dmadev, ddev);
  73}
  74
  75static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
  76{
  77        return container_of(c, struct omap_chan, vc.chan);
  78}
  79
  80static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
  81{
  82        return container_of(t, struct omap_desc, vd.tx);
  83}
  84
  85static void omap_dma_desc_free(struct virt_dma_desc *vd)
  86{
  87        kfree(container_of(vd, struct omap_desc, vd));
  88}
  89
  90static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d,
  91        unsigned idx)
  92{
  93        struct omap_sg *sg = d->sg + idx;
  94
  95        if (d->dir == DMA_DEV_TO_MEM)
  96                omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
  97                        OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
  98        else
  99                omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF,
 100                        OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0);
 101
 102        omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn,
 103                d->sync_mode, c->dma_sig, d->sync_type);
 104
 105        omap_start_dma(c->dma_ch);
 106}
 107
 108static void omap_dma_start_desc(struct omap_chan *c)
 109{
 110        struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 111        struct omap_desc *d;
 112
 113        if (!vd) {
 114                c->desc = NULL;
 115                return;
 116        }
 117
 118        list_del(&vd->node);
 119
 120        c->desc = d = to_omap_dma_desc(&vd->tx);
 121        c->sgidx = 0;
 122
 123        if (d->dir == DMA_DEV_TO_MEM)
 124                omap_set_dma_src_params(c->dma_ch, d->periph_port,
 125                        OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
 126        else
 127                omap_set_dma_dest_params(c->dma_ch, d->periph_port,
 128                        OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi);
 129
 130        omap_dma_start_sg(c, d, 0);
 131}
 132
 133static void omap_dma_callback(int ch, u16 status, void *data)
 134{
 135        struct omap_chan *c = data;
 136        struct omap_desc *d;
 137        unsigned long flags;
 138
 139        spin_lock_irqsave(&c->vc.lock, flags);
 140        d = c->desc;
 141        if (d) {
 142                if (!c->cyclic) {
 143                        if (++c->sgidx < d->sglen) {
 144                                omap_dma_start_sg(c, d, c->sgidx);
 145                        } else {
 146                                omap_dma_start_desc(c);
 147                                vchan_cookie_complete(&d->vd);
 148                        }
 149                } else {
 150                        vchan_cyclic_callback(&d->vd);
 151                }
 152        }
 153        spin_unlock_irqrestore(&c->vc.lock, flags);
 154}
 155
 156/*
 157 * This callback schedules all pending channels.  We could be more
 158 * clever here by postponing allocation of the real DMA channels to
 159 * this point, and freeing them when our virtual channel becomes idle.
 160 *
 161 * We would then need to deal with 'all channels in-use'
 162 */
 163static void omap_dma_sched(unsigned long data)
 164{
 165        struct omap_dmadev *d = (struct omap_dmadev *)data;
 166        LIST_HEAD(head);
 167
 168        spin_lock_irq(&d->lock);
 169        list_splice_tail_init(&d->pending, &head);
 170        spin_unlock_irq(&d->lock);
 171
 172        while (!list_empty(&head)) {
 173                struct omap_chan *c = list_first_entry(&head,
 174                        struct omap_chan, node);
 175
 176                spin_lock_irq(&c->vc.lock);
 177                list_del_init(&c->node);
 178                omap_dma_start_desc(c);
 179                spin_unlock_irq(&c->vc.lock);
 180        }
 181}
 182
 183static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 184{
 185        struct omap_chan *c = to_omap_dma_chan(chan);
 186
 187        dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
 188
 189        return omap_request_dma(c->dma_sig, "DMA engine",
 190                omap_dma_callback, c, &c->dma_ch);
 191}
 192
 193static void omap_dma_free_chan_resources(struct dma_chan *chan)
 194{
 195        struct omap_chan *c = to_omap_dma_chan(chan);
 196
 197        vchan_free_chan_resources(&c->vc);
 198        omap_free_dma(c->dma_ch);
 199
 200        dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
 201}
 202
 203static size_t omap_dma_sg_size(struct omap_sg *sg)
 204{
 205        return sg->en * sg->fn;
 206}
 207
 208static size_t omap_dma_desc_size(struct omap_desc *d)
 209{
 210        unsigned i;
 211        size_t size;
 212
 213        for (size = i = 0; i < d->sglen; i++)
 214                size += omap_dma_sg_size(&d->sg[i]);
 215
 216        return size * es_bytes[d->es];
 217}
 218
 219static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
 220{
 221        unsigned i;
 222        size_t size, es_size = es_bytes[d->es];
 223
 224        for (size = i = 0; i < d->sglen; i++) {
 225                size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
 226
 227                if (size)
 228                        size += this_size;
 229                else if (addr >= d->sg[i].addr &&
 230                         addr < d->sg[i].addr + this_size)
 231                        size += d->sg[i].addr + this_size - addr;
 232        }
 233        return size;
 234}
 235
 236static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
 237        dma_cookie_t cookie, struct dma_tx_state *txstate)
 238{
 239        struct omap_chan *c = to_omap_dma_chan(chan);
 240        struct virt_dma_desc *vd;
 241        enum dma_status ret;
 242        unsigned long flags;
 243
 244        ret = dma_cookie_status(chan, cookie, txstate);
 245        if (ret == DMA_SUCCESS || !txstate)
 246                return ret;
 247
 248        spin_lock_irqsave(&c->vc.lock, flags);
 249        vd = vchan_find_desc(&c->vc, cookie);
 250        if (vd) {
 251                txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
 252        } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
 253                struct omap_desc *d = c->desc;
 254                dma_addr_t pos;
 255
 256                if (d->dir == DMA_MEM_TO_DEV)
 257                        pos = omap_get_dma_src_pos(c->dma_ch);
 258                else if (d->dir == DMA_DEV_TO_MEM)
 259                        pos = omap_get_dma_dst_pos(c->dma_ch);
 260                else
 261                        pos = 0;
 262
 263                txstate->residue = omap_dma_desc_size_pos(d, pos);
 264        } else {
 265                txstate->residue = 0;
 266        }
 267        spin_unlock_irqrestore(&c->vc.lock, flags);
 268
 269        return ret;
 270}
 271
 272static void omap_dma_issue_pending(struct dma_chan *chan)
 273{
 274        struct omap_chan *c = to_omap_dma_chan(chan);
 275        unsigned long flags;
 276
 277        spin_lock_irqsave(&c->vc.lock, flags);
 278        if (vchan_issue_pending(&c->vc) && !c->desc) {
 279                struct omap_dmadev *d = to_omap_dma_dev(chan->device);
 280                spin_lock(&d->lock);
 281                if (list_empty(&c->node))
 282                        list_add_tail(&c->node, &d->pending);
 283                spin_unlock(&d->lock);
 284                tasklet_schedule(&d->task);
 285        }
 286        spin_unlock_irqrestore(&c->vc.lock, flags);
 287}
 288
 289static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
 290        struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
 291        enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
 292{
 293        struct omap_chan *c = to_omap_dma_chan(chan);
 294        enum dma_slave_buswidth dev_width;
 295        struct scatterlist *sgent;
 296        struct omap_desc *d;
 297        dma_addr_t dev_addr;
 298        unsigned i, j = 0, es, en, frame_bytes, sync_type;
 299        u32 burst;
 300
 301        if (dir == DMA_DEV_TO_MEM) {
 302                dev_addr = c->cfg.src_addr;
 303                dev_width = c->cfg.src_addr_width;
 304                burst = c->cfg.src_maxburst;
 305                sync_type = OMAP_DMA_SRC_SYNC;
 306        } else if (dir == DMA_MEM_TO_DEV) {
 307                dev_addr = c->cfg.dst_addr;
 308                dev_width = c->cfg.dst_addr_width;
 309                burst = c->cfg.dst_maxburst;
 310                sync_type = OMAP_DMA_DST_SYNC;
 311        } else {
 312                dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
 313                return NULL;
 314        }
 315
 316        /* Bus width translates to the element size (ES) */
 317        switch (dev_width) {
 318        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 319                es = OMAP_DMA_DATA_TYPE_S8;
 320                break;
 321        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 322                es = OMAP_DMA_DATA_TYPE_S16;
 323                break;
 324        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 325                es = OMAP_DMA_DATA_TYPE_S32;
 326                break;
 327        default: /* not reached */
 328                return NULL;
 329        }
 330
 331        /* Now allocate and setup the descriptor. */
 332        d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
 333        if (!d)
 334                return NULL;
 335
 336        d->dir = dir;
 337        d->dev_addr = dev_addr;
 338        d->es = es;
 339        d->sync_mode = OMAP_DMA_SYNC_FRAME;
 340        d->sync_type = sync_type;
 341        d->periph_port = OMAP_DMA_PORT_TIPB;
 342
 343        /*
 344         * Build our scatterlist entries: each contains the address,
 345         * the number of elements (EN) in each frame, and the number of
 346         * frames (FN).  Number of bytes for this entry = ES * EN * FN.
 347         *
 348         * Burst size translates to number of elements with frame sync.
 349         * Note: DMA engine defines burst to be the number of dev-width
 350         * transfers.
 351         */
 352        en = burst;
 353        frame_bytes = es_bytes[es] * en;
 354        for_each_sg(sgl, sgent, sglen, i) {
 355                d->sg[j].addr = sg_dma_address(sgent);
 356                d->sg[j].en = en;
 357                d->sg[j].fn = sg_dma_len(sgent) / frame_bytes;
 358                j++;
 359        }
 360
 361        d->sglen = j;
 362
 363        return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
 364}
 365
 366static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
 367        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 368        size_t period_len, enum dma_transfer_direction dir, void *context)
 369{
 370        struct omap_chan *c = to_omap_dma_chan(chan);
 371        enum dma_slave_buswidth dev_width;
 372        struct omap_desc *d;
 373        dma_addr_t dev_addr;
 374        unsigned es, sync_type;
 375        u32 burst;
 376
 377        if (dir == DMA_DEV_TO_MEM) {
 378                dev_addr = c->cfg.src_addr;
 379                dev_width = c->cfg.src_addr_width;
 380                burst = c->cfg.src_maxburst;
 381                sync_type = OMAP_DMA_SRC_SYNC;
 382        } else if (dir == DMA_MEM_TO_DEV) {
 383                dev_addr = c->cfg.dst_addr;
 384                dev_width = c->cfg.dst_addr_width;
 385                burst = c->cfg.dst_maxburst;
 386                sync_type = OMAP_DMA_DST_SYNC;
 387        } else {
 388                dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
 389                return NULL;
 390        }
 391
 392        /* Bus width translates to the element size (ES) */
 393        switch (dev_width) {
 394        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 395                es = OMAP_DMA_DATA_TYPE_S8;
 396                break;
 397        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 398                es = OMAP_DMA_DATA_TYPE_S16;
 399                break;
 400        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 401                es = OMAP_DMA_DATA_TYPE_S32;
 402                break;
 403        default: /* not reached */
 404                return NULL;
 405        }
 406
 407        /* Now allocate and setup the descriptor. */
 408        d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
 409        if (!d)
 410                return NULL;
 411
 412        d->dir = dir;
 413        d->dev_addr = dev_addr;
 414        d->fi = burst;
 415        d->es = es;
 416        d->sync_mode = OMAP_DMA_SYNC_PACKET;
 417        d->sync_type = sync_type;
 418        d->periph_port = OMAP_DMA_PORT_MPUI;
 419        d->sg[0].addr = buf_addr;
 420        d->sg[0].en = period_len / es_bytes[es];
 421        d->sg[0].fn = buf_len / period_len;
 422        d->sglen = 1;
 423
 424        if (!c->cyclic) {
 425                c->cyclic = true;
 426                omap_dma_link_lch(c->dma_ch, c->dma_ch);
 427                omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ);
 428                omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ);
 429        }
 430
 431        if (!cpu_class_is_omap1()) {
 432                omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
 433                omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16);
 434        }
 435
 436        return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
 437}
 438
 439static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg)
 440{
 441        if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 442            cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 443                return -EINVAL;
 444
 445        memcpy(&c->cfg, cfg, sizeof(c->cfg));
 446
 447        return 0;
 448}
 449
 450static int omap_dma_terminate_all(struct omap_chan *c)
 451{
 452        struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device);
 453        unsigned long flags;
 454        LIST_HEAD(head);
 455
 456        spin_lock_irqsave(&c->vc.lock, flags);
 457
 458        /* Prevent this channel being scheduled */
 459        spin_lock(&d->lock);
 460        list_del_init(&c->node);
 461        spin_unlock(&d->lock);
 462
 463        /*
 464         * Stop DMA activity: we assume the callback will not be called
 465         * after omap_stop_dma() returns (even if it does, it will see
 466         * c->desc is NULL and exit.)
 467         */
 468        if (c->desc) {
 469                c->desc = NULL;
 470                omap_stop_dma(c->dma_ch);
 471        }
 472
 473        if (c->cyclic) {
 474                c->cyclic = false;
 475                omap_dma_unlink_lch(c->dma_ch, c->dma_ch);
 476        }
 477
 478        vchan_get_all_descriptors(&c->vc, &head);
 479        spin_unlock_irqrestore(&c->vc.lock, flags);
 480        vchan_dma_desc_free_list(&c->vc, &head);
 481
 482        return 0;
 483}
 484
 485static int omap_dma_pause(struct omap_chan *c)
 486{
 487        /* FIXME: not supported by platform private API */
 488        return -EINVAL;
 489}
 490
 491static int omap_dma_resume(struct omap_chan *c)
 492{
 493        /* FIXME: not supported by platform private API */
 494        return -EINVAL;
 495}
 496
 497static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 498        unsigned long arg)
 499{
 500        struct omap_chan *c = to_omap_dma_chan(chan);
 501        int ret;
 502
 503        switch (cmd) {
 504        case DMA_SLAVE_CONFIG:
 505                ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg);
 506                break;
 507
 508        case DMA_TERMINATE_ALL:
 509                ret = omap_dma_terminate_all(c);
 510                break;
 511
 512        case DMA_PAUSE:
 513                ret = omap_dma_pause(c);
 514                break;
 515
 516        case DMA_RESUME:
 517                ret = omap_dma_resume(c);
 518                break;
 519
 520        default:
 521                ret = -ENXIO;
 522                break;
 523        }
 524
 525        return ret;
 526}
 527
 528static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
 529{
 530        struct omap_chan *c;
 531
 532        c = kzalloc(sizeof(*c), GFP_KERNEL);
 533        if (!c)
 534                return -ENOMEM;
 535
 536        c->dma_sig = dma_sig;
 537        c->vc.desc_free = omap_dma_desc_free;
 538        vchan_init(&c->vc, &od->ddev);
 539        INIT_LIST_HEAD(&c->node);
 540
 541        od->ddev.chancnt++;
 542
 543        return 0;
 544}
 545
 546static void omap_dma_free(struct omap_dmadev *od)
 547{
 548        tasklet_kill(&od->task);
 549        while (!list_empty(&od->ddev.channels)) {
 550                struct omap_chan *c = list_first_entry(&od->ddev.channels,
 551                        struct omap_chan, vc.chan.device_node);
 552
 553                list_del(&c->vc.chan.device_node);
 554                tasklet_kill(&c->vc.task);
 555                kfree(c);
 556        }
 557        kfree(od);
 558}
 559
 560static int omap_dma_probe(struct platform_device *pdev)
 561{
 562        struct omap_dmadev *od;
 563        int rc, i;
 564
 565        od = kzalloc(sizeof(*od), GFP_KERNEL);
 566        if (!od)
 567                return -ENOMEM;
 568
 569        dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
 570        dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
 571        od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
 572        od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
 573        od->ddev.device_tx_status = omap_dma_tx_status;
 574        od->ddev.device_issue_pending = omap_dma_issue_pending;
 575        od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
 576        od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
 577        od->ddev.device_control = omap_dma_control;
 578        od->ddev.dev = &pdev->dev;
 579        INIT_LIST_HEAD(&od->ddev.channels);
 580        INIT_LIST_HEAD(&od->pending);
 581        spin_lock_init(&od->lock);
 582
 583        tasklet_init(&od->task, omap_dma_sched, (unsigned long)od);
 584
 585        for (i = 0; i < 127; i++) {
 586                rc = omap_dma_chan_init(od, i);
 587                if (rc) {
 588                        omap_dma_free(od);
 589                        return rc;
 590                }
 591        }
 592
 593        rc = dma_async_device_register(&od->ddev);
 594        if (rc) {
 595                pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
 596                        rc);
 597                omap_dma_free(od);
 598        } else {
 599                platform_set_drvdata(pdev, od);
 600        }
 601
 602        dev_info(&pdev->dev, "OMAP DMA engine driver\n");
 603
 604        return rc;
 605}
 606
 607static int omap_dma_remove(struct platform_device *pdev)
 608{
 609        struct omap_dmadev *od = platform_get_drvdata(pdev);
 610
 611        dma_async_device_unregister(&od->ddev);
 612        omap_dma_free(od);
 613
 614        return 0;
 615}
 616
 617static struct platform_driver omap_dma_driver = {
 618        .probe  = omap_dma_probe,
 619        .remove = omap_dma_remove,
 620        .driver = {
 621                .name = "omap-dma-engine",
 622                .owner = THIS_MODULE,
 623        },
 624};
 625
 626bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
 627{
 628        if (chan->device->dev->driver == &omap_dma_driver.driver) {
 629                struct omap_chan *c = to_omap_dma_chan(chan);
 630                unsigned req = *(unsigned *)param;
 631
 632                return req == c->dma_sig;
 633        }
 634        return false;
 635}
 636EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
 637
 638static struct platform_device *pdev;
 639
 640static const struct platform_device_info omap_dma_dev_info = {
 641        .name = "omap-dma-engine",
 642        .id = -1,
 643        .dma_mask = DMA_BIT_MASK(32),
 644};
 645
 646static int omap_dma_init(void)
 647{
 648        int rc = platform_driver_register(&omap_dma_driver);
 649
 650        if (rc == 0) {
 651                pdev = platform_device_register_full(&omap_dma_dev_info);
 652                if (IS_ERR(pdev)) {
 653                        platform_driver_unregister(&omap_dma_driver);
 654                        rc = PTR_ERR(pdev);
 655                }
 656        }
 657        return rc;
 658}
 659subsys_initcall(omap_dma_init);
 660
 661static void __exit omap_dma_exit(void)
 662{
 663        platform_device_unregister(pdev);
 664        platform_driver_unregister(&omap_dma_driver);
 665}
 666module_exit(omap_dma_exit);
 667
 668MODULE_AUTHOR("Russell King");
 669MODULE_LICENSE("GPL");
 670
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.