linux/arch/powerpc/platforms/pasemi/dma_lib.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2006-2007 PA Semi, Inc
   3 *
   4 * Common functions for DMA access on PA Semi PWRficient
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  18 */
  19
  20#include <linux/kernel.h>
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/pci.h>
  24#include <linux/of.h>
  25
  26#include <asm/pasemi_dma.h>
  27
  28#define MAX_TXCH 64
  29#define MAX_RXCH 64
  30#define MAX_FLAGS 64
  31#define MAX_FUN 8
  32
  33static struct pasdma_status *dma_status;
  34
  35static void __iomem *iob_regs;
  36static void __iomem *mac_regs[6];
  37static void __iomem *dma_regs;
  38
  39static int base_hw_irq;
  40
  41static int num_txch, num_rxch;
  42
  43static struct pci_dev *dma_pdev;
  44
  45/* Bitmaps to handle allocation of channels */
  46
  47static DECLARE_BITMAP(txch_free, MAX_TXCH);
  48static DECLARE_BITMAP(rxch_free, MAX_RXCH);
  49static DECLARE_BITMAP(flags_free, MAX_FLAGS);
  50static DECLARE_BITMAP(fun_free, MAX_FUN);
  51
  52/* pasemi_read_iob_reg - read IOB register
  53 * @reg: Register to read (offset into PCI CFG space)
  54 */
  55unsigned int pasemi_read_iob_reg(unsigned int reg)
  56{
  57        return in_le32(iob_regs+reg);
  58}
  59EXPORT_SYMBOL(pasemi_read_iob_reg);
  60
  61/* pasemi_write_iob_reg - write IOB register
  62 * @reg: Register to write to (offset into PCI CFG space)
  63 * @val: Value to write
  64 */
  65void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
  66{
  67        out_le32(iob_regs+reg, val);
  68}
  69EXPORT_SYMBOL(pasemi_write_iob_reg);
  70
  71/* pasemi_read_mac_reg - read MAC register
  72 * @intf: MAC interface
  73 * @reg: Register to read (offset into PCI CFG space)
  74 */
  75unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
  76{
  77        return in_le32(mac_regs[intf]+reg);
  78}
  79EXPORT_SYMBOL(pasemi_read_mac_reg);
  80
  81/* pasemi_write_mac_reg - write MAC register
  82 * @intf: MAC interface
  83 * @reg: Register to write to (offset into PCI CFG space)
  84 * @val: Value to write
  85 */
  86void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
  87{
  88        out_le32(mac_regs[intf]+reg, val);
  89}
  90EXPORT_SYMBOL(pasemi_write_mac_reg);
  91
  92/* pasemi_read_dma_reg - read DMA register
  93 * @reg: Register to read (offset into PCI CFG space)
  94 */
  95unsigned int pasemi_read_dma_reg(unsigned int reg)
  96{
  97        return in_le32(dma_regs+reg);
  98}
  99EXPORT_SYMBOL(pasemi_read_dma_reg);
 100
 101/* pasemi_write_dma_reg - write DMA register
 102 * @reg: Register to write to (offset into PCI CFG space)
 103 * @val: Value to write
 104 */
 105void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
 106{
 107        out_le32(dma_regs+reg, val);
 108}
 109EXPORT_SYMBOL(pasemi_write_dma_reg);
 110
 111static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
 112{
 113        int bit;
 114        int start, limit;
 115
 116        switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
 117        case TXCHAN_EVT0:
 118                start = 0;
 119                limit = 10;
 120                break;
 121        case TXCHAN_EVT1:
 122                start = 10;
 123                limit = MAX_TXCH;
 124                break;
 125        default:
 126                start = 0;
 127                limit = MAX_TXCH;
 128                break;
 129        }
 130retry:
 131        bit = find_next_bit(txch_free, MAX_TXCH, start);
 132        if (bit >= limit)
 133                return -ENOSPC;
 134        if (!test_and_clear_bit(bit, txch_free))
 135                goto retry;
 136
 137        return bit;
 138}
 139
 140static void pasemi_free_tx_chan(int chan)
 141{
 142        BUG_ON(test_bit(chan, txch_free));
 143        set_bit(chan, txch_free);
 144}
 145
 146static int pasemi_alloc_rx_chan(void)
 147{
 148        int bit;
 149retry:
 150        bit = find_first_bit(rxch_free, MAX_RXCH);
 151        if (bit >= MAX_TXCH)
 152                return -ENOSPC;
 153        if (!test_and_clear_bit(bit, rxch_free))
 154                goto retry;
 155
 156        return bit;
 157}
 158
 159static void pasemi_free_rx_chan(int chan)
 160{
 161        BUG_ON(test_bit(chan, rxch_free));
 162        set_bit(chan, rxch_free);
 163}
 164
 165/* pasemi_dma_alloc_chan - Allocate a DMA channel
 166 * @type: Type of channel to allocate
 167 * @total_size: Total size of structure to allocate (to allow for more
 168 *              room behind the structure to be used by the client)
 169 * @offset: Offset in bytes from start of the total structure to the beginning
 170 *          of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
 171 *          not the first member of the client structure.
 172 *
 173 * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
 174 * type argument specifies whether it's a RX or TX channel, and in the case
 175 * of TX channels which group it needs to belong to (if any).
 176 *
 177 * Returns a pointer to the total structure allocated on success, NULL
 178 * on failure.
 179 */
 180void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
 181                            int total_size, int offset)
 182{
 183        void *buf;
 184        struct pasemi_dmachan *chan;
 185        int chno;
 186
 187        BUG_ON(total_size < sizeof(struct pasemi_dmachan));
 188
 189        buf = kzalloc(total_size, GFP_KERNEL);
 190
 191        if (!buf)
 192                return NULL;
 193        chan = buf + offset;
 194
 195        chan->priv = buf;
 196
 197        switch (type & (TXCHAN|RXCHAN)) {
 198        case RXCHAN:
 199                chno = pasemi_alloc_rx_chan();
 200                chan->chno = chno;
 201                chan->irq = irq_create_mapping(NULL,
 202                                               base_hw_irq + num_txch + chno);
 203                chan->status = &dma_status->rx_sta[chno];
 204                break;
 205        case TXCHAN:
 206                chno = pasemi_alloc_tx_chan(type);
 207                chan->chno = chno;
 208                chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
 209                chan->status = &dma_status->tx_sta[chno];
 210                break;
 211        }
 212
 213        chan->chan_type = type;
 214
 215        return chan;
 216}
 217EXPORT_SYMBOL(pasemi_dma_alloc_chan);
 218
 219/* pasemi_dma_free_chan - Free a previously allocated channel
 220 * @chan: Channel to free
 221 *
 222 * Frees a previously allocated channel. It will also deallocate any
 223 * descriptor ring associated with the channel, if allocated.
 224 */
 225void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
 226{
 227        if (chan->ring_virt)
 228                pasemi_dma_free_ring(chan);
 229
 230        switch (chan->chan_type & (RXCHAN|TXCHAN)) {
 231        case RXCHAN:
 232                pasemi_free_rx_chan(chan->chno);
 233                break;
 234        case TXCHAN:
 235                pasemi_free_tx_chan(chan->chno);
 236                break;
 237        }
 238
 239        kfree(chan->priv);
 240}
 241EXPORT_SYMBOL(pasemi_dma_free_chan);
 242
 243/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
 244 * @chan: Channel for which to allocate
 245 * @ring_size: Ring size in 64-bit (8-byte) words
 246 *
 247 * Allocate a descriptor ring for a channel. Returns 0 on success, errno
 248 * on failure. The passed in struct pasemi_dmachan is updated with the
 249 * virtual and DMA addresses of the ring.
 250 */
 251int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
 252{
 253        BUG_ON(chan->ring_virt);
 254
 255        chan->ring_size = ring_size;
 256
 257        chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
 258                                             ring_size * sizeof(u64),
 259                                             &chan->ring_dma, GFP_KERNEL);
 260
 261        if (!chan->ring_virt)
 262                return -ENOMEM;
 263
 264        memset(chan->ring_virt, 0, ring_size * sizeof(u64));
 265
 266        return 0;
 267}
 268EXPORT_SYMBOL(pasemi_dma_alloc_ring);
 269
 270/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
 271 * @chan: Channel for which to free the descriptor ring
 272 *
 273 * Frees a previously allocated descriptor ring for a channel.
 274 */
 275void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
 276{
 277        BUG_ON(!chan->ring_virt);
 278
 279        dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
 280                          chan->ring_virt, chan->ring_dma);
 281        chan->ring_virt = NULL;
 282        chan->ring_size = 0;
 283        chan->ring_dma = 0;
 284}
 285EXPORT_SYMBOL(pasemi_dma_free_ring);
 286
 287/* pasemi_dma_start_chan - Start a DMA channel
 288 * @chan: Channel to start
 289 * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
 290 *
 291 * Enables (starts) a DMA channel with optional additional arguments.
 292 */
 293void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
 294{
 295        if (chan->chan_type == RXCHAN)
 296                pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
 297                                     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
 298        else
 299                pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
 300                                     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
 301}
 302EXPORT_SYMBOL(pasemi_dma_start_chan);
 303
 304/* pasemi_dma_stop_chan - Stop a DMA channel
 305 * @chan: Channel to stop
 306 *
 307 * Stops (disables) a DMA channel. This is done by setting the ST bit in the
 308 * CMDSTA register and waiting on the ACT (active) bit to clear, then
 309 * finally disabling the whole channel.
 310 *
 311 * This function will only try for a short while for the channel to stop, if
 312 * it doesn't it will return failure.
 313 *
 314 * Returns 1 on success, 0 on failure.
 315 */
 316#define MAX_RETRIES 5000
 317int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
 318{
 319        int reg, retries;
 320        u32 sta;
 321
 322        if (chan->chan_type == RXCHAN) {
 323                reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
 324                pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
 325                for (retries = 0; retries < MAX_RETRIES; retries++) {
 326                        sta = pasemi_read_dma_reg(reg);
 327                        if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
 328                                pasemi_write_dma_reg(reg, 0);
 329                                return 1;
 330                        }
 331                        cond_resched();
 332                }
 333        } else {
 334                reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
 335                pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
 336                for (retries = 0; retries < MAX_RETRIES; retries++) {
 337                        sta = pasemi_read_dma_reg(reg);
 338                        if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
 339                                pasemi_write_dma_reg(reg, 0);
 340                                return 1;
 341                        }
 342                        cond_resched();
 343                }
 344        }
 345
 346        return 0;
 347}
 348EXPORT_SYMBOL(pasemi_dma_stop_chan);
 349
 350/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
 351 * @chan: Channel to allocate for
 352 * @size: Size of buffer in bytes
 353 * @handle: DMA handle
 354 *
 355 * Allocate a buffer to be used by the DMA engine for read/write,
 356 * similar to dma_alloc_coherent().
 357 *
 358 * Returns the virtual address of the buffer, or NULL in case of failure.
 359 */
 360void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
 361                           dma_addr_t *handle)
 362{
 363        return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 364}
 365EXPORT_SYMBOL(pasemi_dma_alloc_buf);
 366
 367/* pasemi_dma_free_buf - Free a buffer used for DMA
 368 * @chan: Channel the buffer was allocated for
 369 * @size: Size of buffer in bytes
 370 * @handle: DMA handle
 371 *
 372 * Frees a previously allocated buffer.
 373 */
 374void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
 375                         dma_addr_t *handle)
 376{
 377        dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
 378}
 379EXPORT_SYMBOL(pasemi_dma_free_buf);
 380
 381/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization
 382 *
 383 * Allocates a flag for use with channel syncronization (event descriptors).
 384 * Returns allocated flag (0-63), < 0 on error.
 385 */
 386int pasemi_dma_alloc_flag(void)
 387{
 388        int bit;
 389
 390retry:
 391        bit = find_next_bit(flags_free, MAX_FLAGS, 0);
 392        if (bit >= MAX_FLAGS)
 393                return -ENOSPC;
 394        if (!test_and_clear_bit(bit, flags_free))
 395                goto retry;
 396
 397        return bit;
 398}
 399EXPORT_SYMBOL(pasemi_dma_alloc_flag);
 400
 401
 402/* pasemi_dma_free_flag - Deallocates a flag (event)
 403 * @flag: Flag number to deallocate
 404 *
 405 * Frees up a flag so it can be reused for other purposes.
 406 */
 407void pasemi_dma_free_flag(int flag)
 408{
 409        BUG_ON(test_bit(flag, flags_free));
 410        BUG_ON(flag >= MAX_FLAGS);
 411        set_bit(flag, flags_free);
 412}
 413EXPORT_SYMBOL(pasemi_dma_free_flag);
 414
 415
 416/* pasemi_dma_set_flag - Sets a flag (event) to 1
 417 * @flag: Flag number to set active
 418 *
 419 * Sets the flag provided to 1.
 420 */
 421void pasemi_dma_set_flag(int flag)
 422{
 423        BUG_ON(flag >= MAX_FLAGS);
 424        if (flag < 32)
 425                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
 426        else
 427                pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
 428}
 429EXPORT_SYMBOL(pasemi_dma_set_flag);
 430
 431/* pasemi_dma_clear_flag - Sets a flag (event) to 0
 432 * @flag: Flag number to set inactive
 433 *
 434 * Sets the flag provided to 0.
 435 */
 436void pasemi_dma_clear_flag(int flag)
 437{
 438        BUG_ON(flag >= MAX_FLAGS);
 439        if (flag < 32)
 440                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
 441        else
 442                pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
 443}
 444EXPORT_SYMBOL(pasemi_dma_clear_flag);
 445
 446/* pasemi_dma_alloc_fun - Allocate a function engine
 447 *
 448 * Allocates a function engine to use for crypto/checksum offload
 449 * Returns allocated engine (0-8), < 0 on error.
 450 */
 451int pasemi_dma_alloc_fun(void)
 452{
 453        int bit;
 454
 455retry:
 456        bit = find_next_bit(fun_free, MAX_FLAGS, 0);
 457        if (bit >= MAX_FLAGS)
 458                return -ENOSPC;
 459        if (!test_and_clear_bit(bit, fun_free))
 460                goto retry;
 461
 462        return bit;
 463}
 464EXPORT_SYMBOL(pasemi_dma_alloc_fun);
 465
 466
 467/* pasemi_dma_free_fun - Deallocates a function engine
 468 * @flag: Engine number to deallocate
 469 *
 470 * Frees up a function engine so it can be used for other purposes.
 471 */
 472void pasemi_dma_free_fun(int fun)
 473{
 474        BUG_ON(test_bit(fun, fun_free));
 475        BUG_ON(fun >= MAX_FLAGS);
 476        set_bit(fun, fun_free);
 477}
 478EXPORT_SYMBOL(pasemi_dma_free_fun);
 479
 480
 481static void *map_onedev(struct pci_dev *p, int index)
 482{
 483        struct device_node *dn;
 484        void __iomem *ret;
 485
 486        dn = pci_device_to_OF_node(p);
 487        if (!dn)
 488                goto fallback;
 489
 490        ret = of_iomap(dn, index);
 491        if (!ret)
 492                goto fallback;
 493
 494        return ret;
 495fallback:
 496        /* This is hardcoded and ugly, but we have some firmware versions
 497         * that don't provide the register space in the device tree. Luckily
 498         * they are at well-known locations so we can just do the math here.
 499         */
 500        return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
 501}
 502
 503/* pasemi_dma_init - Initialize the PA Semi DMA library
 504 *
 505 * This function initializes the DMA library. It must be called before
 506 * any other function in the library.
 507 *
 508 * Returns 0 on success, errno on failure.
 509 */
 510int pasemi_dma_init(void)
 511{
 512        static spinlock_t init_lock = SPIN_LOCK_UNLOCKED;
 513        struct pci_dev *iob_pdev;
 514        struct pci_dev *pdev;
 515        struct resource res;
 516        struct device_node *dn;
 517        int i, intf, err = 0;
 518        unsigned long timeout;
 519        u32 tmp;
 520
 521        if (!machine_is(pasemi))
 522                return -ENODEV;
 523
 524        spin_lock(&init_lock);
 525
 526        /* Make sure we haven't already initialized */
 527        if (dma_pdev)
 528                goto out;
 529
 530        iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
 531        if (!iob_pdev) {
 532                BUG();
 533                printk(KERN_WARNING "Can't find I/O Bridge\n");
 534                err = -ENODEV;
 535                goto out;
 536        }
 537        iob_regs = map_onedev(iob_pdev, 0);
 538
 539        dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
 540        if (!dma_pdev) {
 541                BUG();
 542                printk(KERN_WARNING "Can't find DMA controller\n");
 543                err = -ENODEV;
 544                goto out;
 545        }
 546        dma_regs = map_onedev(dma_pdev, 0);
 547        base_hw_irq = virq_to_hw(dma_pdev->irq);
 548
 549        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
 550        num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
 551
 552        pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
 553        num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
 554
 555        intf = 0;
 556        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
 557             pdev;
 558             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
 559                mac_regs[intf++] = map_onedev(pdev, 0);
 560
 561        pci_dev_put(pdev);
 562
 563        for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
 564             pdev;
 565             pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
 566                mac_regs[intf++] = map_onedev(pdev, 0);
 567
 568        pci_dev_put(pdev);
 569
 570        dn = pci_device_to_OF_node(iob_pdev);
 571        if (dn)
 572                err = of_address_to_resource(dn, 1, &res);
 573        if (!dn || err) {
 574                /* Fallback for old firmware */
 575                res.start = 0xfd800000;
 576                res.end = res.start + 0x1000;
 577        }
 578        dma_status = __ioremap(res.start, res.end-res.start, 0);
 579        pci_dev_put(iob_pdev);
 580
 581        for (i = 0; i < MAX_TXCH; i++)
 582                __set_bit(i, txch_free);
 583
 584        for (i = 0; i < MAX_RXCH; i++)
 585                __set_bit(i, rxch_free);
 586
 587        timeout = jiffies + HZ;
 588        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
 589        while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
 590                if (time_after(jiffies, timeout)) {
 591                        pr_warning("Warning: Could not disable RX section\n");
 592                        break;
 593                }
 594        }
 595
 596        timeout = jiffies + HZ;
 597        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
 598        while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
 599                if (time_after(jiffies, timeout)) {
 600                        pr_warning("Warning: Could not disable TX section\n");
 601                        break;
 602                }
 603        }
 604
 605        /* setup resource allocations for the different DMA sections */
 606        tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
 607        pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
 608
 609        /* enable tx section */
 610        pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
 611
 612        /* enable rx section */
 613        pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
 614
 615        for (i = 0; i < MAX_FLAGS; i++)
 616                __set_bit(i, flags_free);
 617
 618        for (i = 0; i < MAX_FUN; i++)
 619                __set_bit(i, fun_free);
 620
 621        /* clear all status flags */
 622        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
 623        pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
 624
 625        printk(KERN_INFO "PA Semi PWRficient DMA library initialized "
 626                "(%d tx, %d rx channels)\n", num_txch, num_rxch);
 627
 628out:
 629        spin_unlock(&init_lock);
 630        return err;
 631}
 632EXPORT_SYMBOL(pasemi_dma_init);
 633
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.