linux/arch/m68knommu/kernel/dma.c
<<
>>
Prefs
   1/*
   2 * Dynamic DMA mapping support.
   3 *
   4 * We never have any address translations to worry about, so this
   5 * is just alloc/free.
   6 */
   7
   8#include <linux/types.h>
   9#include <linux/gfp.h>
  10#include <linux/mm.h>
  11#include <linux/device.h>
  12#include <linux/dma-mapping.h>
  13#include <asm/cacheflush.h>
  14
  15void *dma_alloc_coherent(struct device *dev, size_t size,
  16                           dma_addr_t *dma_handle, gfp_t gfp)
  17{
  18        void *ret;
  19        /* ignore region specifiers */
  20        gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
  21
  22        if (dev == NULL || (*dev->dma_mask < 0xffffffff))
  23                gfp |= GFP_DMA;
  24        ret = (void *)__get_free_pages(gfp, get_order(size));
  25
  26        if (ret != NULL) {
  27                memset(ret, 0, size);
  28                *dma_handle = virt_to_phys(ret);
  29        }
  30        return ret;
  31}
  32
  33void dma_free_coherent(struct device *dev, size_t size,
  34                         void *vaddr, dma_addr_t dma_handle)
  35{
  36        free_pages((unsigned long)vaddr, get_order(size));
  37}
  38
  39void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
  40                                size_t size, enum dma_data_direction dir)
  41{
  42        switch (dir) {
  43        case DMA_TO_DEVICE:
  44                flush_dcache_range(handle, size);
  45                break;
  46        case DMA_FROM_DEVICE:
  47                /* Should be clear already */
  48                break;
  49        default:
  50                if (printk_ratelimit())
  51                        printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
  52                break;
  53        }
  54}
  55
  56EXPORT_SYMBOL(dma_sync_single_for_device);
  57dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
  58                          enum dma_data_direction dir)
  59{
  60        dma_addr_t handle = virt_to_phys(addr);
  61        flush_dcache_range(handle, size);
  62        return handle;
  63}
  64EXPORT_SYMBOL(dma_map_single);
  65
  66dma_addr_t dma_map_page(struct device *dev, struct page *page,
  67                        unsigned long offset, size_t size,
  68                        enum dma_data_direction dir)
  69{
  70        dma_addr_t handle = page_to_phys(page) + offset;
  71        dma_sync_single_for_device(dev, handle, size, dir);
  72        return handle;
  73}
  74EXPORT_SYMBOL(dma_map_page);
  75