linux/arch/powerpc/include/asm/dma.h
<<
>>
Prefs
   1#ifndef _ASM_POWERPC_DMA_H
   2#define _ASM_POWERPC_DMA_H
   3#ifdef __KERNEL__
   4
   5/*
   6 * Defines for using and allocating dma channels.
   7 * Written by Hennus Bergman, 1992.
   8 * High DMA channel support & info by Hannu Savolainen
   9 * and John Boyd, Nov. 1992.
  10 * Changes for ppc sound by Christoph Nadig
  11 */
  12
  13/*
  14 * Note: Adapted for PowerPC by Gary Thomas
  15 * Modified by Cort Dougan <cort@cs.nmt.edu>
  16 *
  17 * None of this really applies for Power Macintoshes.  There is
  18 * basically just enough here to get kernel/dma.c to compile.
  19 *
  20 * There may be some comments or restrictions made here which are
  21 * not valid for the PReP platform.  Take what you read
  22 * with a grain of salt.
  23 */
  24
  25#include <asm/io.h>
  26#include <linux/spinlock.h>
  27#include <asm/system.h>
  28
  29#ifndef MAX_DMA_CHANNELS
  30#define MAX_DMA_CHANNELS        8
  31#endif
  32
  33/* The maximum address that we can perform a DMA transfer to on this platform */
  34/* Doesn't really apply... */
  35#define MAX_DMA_ADDRESS         (~0UL)
  36
  37#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
  38
  39#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
  40#define dma_outb        outb_p
  41#else
  42#define dma_outb        outb
  43#endif
  44
  45#define dma_inb         inb
  46
  47/*
  48 * NOTES about DMA transfers:
  49 *
  50 *  controller 1: channels 0-3, byte operations, ports 00-1F
  51 *  controller 2: channels 4-7, word operations, ports C0-DF
  52 *
  53 *  - ALL registers are 8 bits only, regardless of transfer size
  54 *  - channel 4 is not used - cascades 1 into 2.
  55 *  - channels 0-3 are byte - addresses/counts are for physical bytes
  56 *  - channels 5-7 are word - addresses/counts are for physical words
  57 *  - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
  58 *  - transfer count loaded to registers is 1 less than actual count
  59 *  - controller 2 offsets are all even (2x offsets for controller 1)
  60 *  - page registers for 5-7 don't use data bit 0, represent 128K pages
  61 *  - page registers for 0-3 use bit 0, represent 64K pages
  62 *
  63 * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
  64 * On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
  65 * Note that addresses loaded into registers must be _physical_ addresses,
  66 * not logical addresses (which may differ if paging is active).
  67 *
  68 *  Address mapping for channels 0-3:
  69 *
  70 *   A23 ... A16 A15 ... A8  A7 ... A0    (Physical addresses)
  71 *    |  ...  |   |  ... |   |  ... |
  72 *    |  ...  |   |  ... |   |  ... |
  73 *    |  ...  |   |  ... |   |  ... |
  74 *   P7  ...  P0  A7 ... A0  A7 ... A0
  75 * |    Page    | Addr MSB | Addr LSB |   (DMA registers)
  76 *
  77 *  Address mapping for channels 5-7:
  78 *
  79 *   A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0    (Physical addresses)
  80 *    |  ...  |   \   \   ... \  \  \  ... \  \
  81 *    |  ...  |    \   \   ... \  \  \  ... \  (not used)
  82 *    |  ...  |     \   \   ... \  \  \  ... \
  83 *   P7  ...  P1 (0) A7 A6  ... A0 A7 A6 ... A0
  84 * |      Page      |  Addr MSB   |  Addr LSB  |   (DMA registers)
  85 *
  86 * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
  87 * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
  88 * the hardware level, so odd-byte transfers aren't possible).
  89 *
  90 * Transfer count (_not # bytes_) is limited to 64K, represented as actual
  91 * count - 1 : 64K => 0xFFFF, 1 => 0x0000.  Thus, count is always 1 or more,
  92 * and up to 128K bytes may be transferred on channels 5-7 in one operation.
  93 *
  94 */
  95
  96/* 8237 DMA controllers */
  97#define IO_DMA1_BASE    0x00    /* 8 bit slave DMA, channels 0..3 */
  98#define IO_DMA2_BASE    0xC0    /* 16 bit master DMA, ch 4(=slave input)..7 */
  99
 100/* DMA controller registers */
 101#define DMA1_CMD_REG            0x08    /* command register (w) */
 102#define DMA1_STAT_REG           0x08    /* status register (r) */
 103#define DMA1_REQ_REG            0x09    /* request register (w) */
 104#define DMA1_MASK_REG           0x0A    /* single-channel mask (w) */
 105#define DMA1_MODE_REG           0x0B    /* mode register (w) */
 106#define DMA1_CLEAR_FF_REG       0x0C    /* clear pointer flip-flop (w) */
 107#define DMA1_TEMP_REG           0x0D    /* Temporary Register (r) */
 108#define DMA1_RESET_REG          0x0D    /* Master Clear (w) */
 109#define DMA1_CLR_MASK_REG       0x0E    /* Clear Mask */
 110#define DMA1_MASK_ALL_REG       0x0F    /* all-channels mask (w) */
 111
 112#define DMA2_CMD_REG            0xD0    /* command register (w) */
 113#define DMA2_STAT_REG           0xD0    /* status register (r) */
 114#define DMA2_REQ_REG            0xD2    /* request register (w) */
 115#define DMA2_MASK_REG           0xD4    /* single-channel mask (w) */
 116#define DMA2_MODE_REG           0xD6    /* mode register (w) */
 117#define DMA2_CLEAR_FF_REG       0xD8    /* clear pointer flip-flop (w) */
 118#define DMA2_TEMP_REG           0xDA    /* Temporary Register (r) */
 119#define DMA2_RESET_REG          0xDA    /* Master Clear (w) */
 120#define DMA2_CLR_MASK_REG       0xDC    /* Clear Mask */
 121#define DMA2_MASK_ALL_REG       0xDE    /* all-channels mask (w) */
 122
 123#define DMA_ADDR_0              0x00    /* DMA address registers */
 124#define DMA_ADDR_1              0x02
 125#define DMA_ADDR_2              0x04
 126#define DMA_ADDR_3              0x06
 127#define DMA_ADDR_4              0xC0
 128#define DMA_ADDR_5              0xC4
 129#define DMA_ADDR_6              0xC8
 130#define DMA_ADDR_7              0xCC
 131
 132#define DMA_CNT_0               0x01    /* DMA count registers */
 133#define DMA_CNT_1               0x03
 134#define DMA_CNT_2               0x05
 135#define DMA_CNT_3               0x07
 136#define DMA_CNT_4               0xC2
 137#define DMA_CNT_5               0xC6
 138#define DMA_CNT_6               0xCA
 139#define DMA_CNT_7               0xCE
 140
 141#define DMA_LO_PAGE_0           0x87    /* DMA page registers */
 142#define DMA_LO_PAGE_1           0x83
 143#define DMA_LO_PAGE_2           0x81
 144#define DMA_LO_PAGE_3           0x82
 145#define DMA_LO_PAGE_5           0x8B
 146#define DMA_LO_PAGE_6           0x89
 147#define DMA_LO_PAGE_7           0x8A
 148
 149#define DMA_HI_PAGE_0           0x487   /* DMA page registers */
 150#define DMA_HI_PAGE_1           0x483
 151#define DMA_HI_PAGE_2           0x481
 152#define DMA_HI_PAGE_3           0x482
 153#define DMA_HI_PAGE_5           0x48B
 154#define DMA_HI_PAGE_6           0x489
 155#define DMA_HI_PAGE_7           0x48A
 156
 157#define DMA1_EXT_REG            0x40B
 158#define DMA2_EXT_REG            0x4D6
 159
 160#ifndef __powerpc64__
 161    /* in arch/ppc/kernel/setup.c -- Cort */
 162    extern unsigned int DMA_MODE_WRITE;
 163    extern unsigned int DMA_MODE_READ;
 164    extern unsigned long ISA_DMA_THRESHOLD;
 165#else
 166    #define DMA_MODE_READ       0x44    /* I/O to memory, no autoinit, increment, single mode */
 167    #define DMA_MODE_WRITE      0x48    /* memory to I/O, no autoinit, increment, single mode */
 168#endif
 169
 170#define DMA_MODE_CASCADE        0xC0    /* pass thru DREQ->HRQ, DACK<-HLDA only */
 171
 172#define DMA_AUTOINIT            0x10
 173
 174extern spinlock_t dma_spin_lock;
 175
 176static __inline__ unsigned long claim_dma_lock(void)
 177{
 178        unsigned long flags;
 179        spin_lock_irqsave(&dma_spin_lock, flags);
 180        return flags;
 181}
 182
 183static __inline__ void release_dma_lock(unsigned long flags)
 184{
 185        spin_unlock_irqrestore(&dma_spin_lock, flags);
 186}
 187
 188/* enable/disable a specific DMA channel */
 189static __inline__ void enable_dma(unsigned int dmanr)
 190{
 191        unsigned char ucDmaCmd = 0x00;
 192
 193        if (dmanr != 4) {
 194                dma_outb(0, DMA2_MASK_REG);     /* This may not be enabled */
 195                dma_outb(ucDmaCmd, DMA2_CMD_REG);       /* Enable group */
 196        }
 197        if (dmanr <= 3) {
 198                dma_outb(dmanr, DMA1_MASK_REG);
 199                dma_outb(ucDmaCmd, DMA1_CMD_REG);       /* Enable group */
 200        } else {
 201                dma_outb(dmanr & 3, DMA2_MASK_REG);
 202        }
 203}
 204
 205static __inline__ void disable_dma(unsigned int dmanr)
 206{
 207        if (dmanr <= 3)
 208                dma_outb(dmanr | 4, DMA1_MASK_REG);
 209        else
 210                dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
 211}
 212
 213/* Clear the 'DMA Pointer Flip Flop'.
 214 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
 215 * Use this once to initialize the FF to a known state.
 216 * After that, keep track of it. :-)
 217 * --- In order to do that, the DMA routines below should ---
 218 * --- only be used while interrupts are disabled! ---
 219 */
 220static __inline__ void clear_dma_ff(unsigned int dmanr)
 221{
 222        if (dmanr <= 3)
 223                dma_outb(0, DMA1_CLEAR_FF_REG);
 224        else
 225                dma_outb(0, DMA2_CLEAR_FF_REG);
 226}
 227
 228/* set mode (above) for a specific DMA channel */
 229static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
 230{
 231        if (dmanr <= 3)
 232                dma_outb(mode | dmanr, DMA1_MODE_REG);
 233        else
 234                dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
 235}
 236
 237/* Set only the page register bits of the transfer address.
 238 * This is used for successive transfers when we know the contents of
 239 * the lower 16 bits of the DMA current address register, but a 64k boundary
 240 * may have been crossed.
 241 */
 242static __inline__ void set_dma_page(unsigned int dmanr, int pagenr)
 243{
 244        switch (dmanr) {
 245        case 0:
 246                dma_outb(pagenr, DMA_LO_PAGE_0);
 247                dma_outb(pagenr >> 8, DMA_HI_PAGE_0);
 248                break;
 249        case 1:
 250                dma_outb(pagenr, DMA_LO_PAGE_1);
 251                dma_outb(pagenr >> 8, DMA_HI_PAGE_1);
 252                break;
 253        case 2:
 254                dma_outb(pagenr, DMA_LO_PAGE_2);
 255                dma_outb(pagenr >> 8, DMA_HI_PAGE_2);
 256                break;
 257        case 3:
 258                dma_outb(pagenr, DMA_LO_PAGE_3);
 259                dma_outb(pagenr >> 8, DMA_HI_PAGE_3);
 260                break;
 261        case 5:
 262                dma_outb(pagenr & 0xfe, DMA_LO_PAGE_5);
 263                dma_outb(pagenr >> 8, DMA_HI_PAGE_5);
 264                break;
 265        case 6:
 266                dma_outb(pagenr & 0xfe, DMA_LO_PAGE_6);
 267                dma_outb(pagenr >> 8, DMA_HI_PAGE_6);
 268                break;
 269        case 7:
 270                dma_outb(pagenr & 0xfe, DMA_LO_PAGE_7);
 271                dma_outb(pagenr >> 8, DMA_HI_PAGE_7);
 272                break;
 273        }
 274}
 275
 276/* Set transfer address & page bits for specific DMA channel.
 277 * Assumes dma flipflop is clear.
 278 */
 279static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int phys)
 280{
 281        if (dmanr <= 3) {
 282                dma_outb(phys & 0xff,
 283                         ((dmanr & 3) << 1) + IO_DMA1_BASE);
 284                dma_outb((phys >> 8) & 0xff,
 285                         ((dmanr & 3) << 1) + IO_DMA1_BASE);
 286        } else {
 287                dma_outb((phys >> 1) & 0xff,
 288                         ((dmanr & 3) << 2) + IO_DMA2_BASE);
 289                dma_outb((phys >> 9) & 0xff,
 290                         ((dmanr & 3) << 2) + IO_DMA2_BASE);
 291        }
 292        set_dma_page(dmanr, phys >> 16);
 293}
 294
 295
 296/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
 297 * a specific DMA channel.
 298 * You must ensure the parameters are valid.
 299 * NOTE: from a manual: "the number of transfers is one more
 300 * than the initial word count"! This is taken into account.
 301 * Assumes dma flip-flop is clear.
 302 * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
 303 */
 304static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
 305{
 306        count--;
 307        if (dmanr <= 3) {
 308                dma_outb(count & 0xff,
 309                         ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
 310                dma_outb((count >> 8) & 0xff,
 311                         ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
 312        } else {
 313                dma_outb((count >> 1) & 0xff,
 314                         ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
 315                dma_outb((count >> 9) & 0xff,
 316                         ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
 317        }
 318}
 319
 320
 321/* Get DMA residue count. After a DMA transfer, this
 322 * should return zero. Reading this while a DMA transfer is
 323 * still in progress will return unpredictable results.
 324 * If called before the channel has been used, it may return 1.
 325 * Otherwise, it returns the number of _bytes_ left to transfer.
 326 *
 327 * Assumes DMA flip-flop is clear.
 328 */
 329static __inline__ int get_dma_residue(unsigned int dmanr)
 330{
 331        unsigned int io_port = (dmanr <= 3)
 332            ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
 333            : ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
 334
 335        /* using short to get 16-bit wrap around */
 336        unsigned short count;
 337
 338        count = 1 + dma_inb(io_port);
 339        count += dma_inb(io_port) << 8;
 340
 341        return (dmanr <= 3) ? count : (count << 1);
 342}
 343
 344/* These are in kernel/dma.c: */
 345
 346/* reserve a DMA channel */
 347extern int request_dma(unsigned int dmanr, const char *device_id);
 348/* release it again */
 349extern void free_dma(unsigned int dmanr);
 350
 351#ifdef CONFIG_PCI
 352extern int isa_dma_bridge_buggy;
 353#else
 354#define isa_dma_bridge_buggy    (0)
 355#endif
 356
 357#endif  /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
 358
 359#endif /* __KERNEL__ */
 360#endif  /* _ASM_POWERPC_DMA_H */
 361