linux/kernel/dma-coherent.c
<<
>>
Prefs
   1/*
   2 * Coherent per-device memory handling.
   3 * Borrowed from i386
   4 */
   5#include <linux/kernel.h>
   6#include <linux/dma-mapping.h>
   7
   8struct dma_coherent_mem {
   9        void            *virt_base;
  10        u32             device_base;
  11        int             size;
  12        int             flags;
  13        unsigned long   *bitmap;
  14};
  15
  16int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
  17                                dma_addr_t device_addr, size_t size, int flags)
  18{
  19        void __iomem *mem_base = NULL;
  20        int pages = size >> PAGE_SHIFT;
  21        int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  22
  23        if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
  24                goto out;
  25        if (!size)
  26                goto out;
  27        if (dev->dma_mem)
  28                goto out;
  29
  30        /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
  31
  32        mem_base = ioremap(bus_addr, size);
  33        if (!mem_base)
  34                goto out;
  35
  36        dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
  37        if (!dev->dma_mem)
  38                goto out;
  39        dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
  40        if (!dev->dma_mem->bitmap)
  41                goto free1_out;
  42
  43        dev->dma_mem->virt_base = mem_base;
  44        dev->dma_mem->device_base = device_addr;
  45        dev->dma_mem->size = pages;
  46        dev->dma_mem->flags = flags;
  47
  48        if (flags & DMA_MEMORY_MAP)
  49                return DMA_MEMORY_MAP;
  50
  51        return DMA_MEMORY_IO;
  52
  53 free1_out:
  54        kfree(dev->dma_mem);
  55 out:
  56        if (mem_base)
  57                iounmap(mem_base);
  58        return 0;
  59}
  60EXPORT_SYMBOL(dma_declare_coherent_memory);
  61
  62void dma_release_declared_memory(struct device *dev)
  63{
  64        struct dma_coherent_mem *mem = dev->dma_mem;
  65
  66        if (!mem)
  67                return;
  68        dev->dma_mem = NULL;
  69        iounmap(mem->virt_base);
  70        kfree(mem->bitmap);
  71        kfree(mem);
  72}
  73EXPORT_SYMBOL(dma_release_declared_memory);
  74
  75void *dma_mark_declared_memory_occupied(struct device *dev,
  76                                        dma_addr_t device_addr, size_t size)
  77{
  78        struct dma_coherent_mem *mem = dev->dma_mem;
  79        int pos, err;
  80
  81        size += device_addr & ~PAGE_MASK;
  82
  83        if (!mem)
  84                return ERR_PTR(-EINVAL);
  85
  86        pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
  87        err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
  88        if (err != 0)
  89                return ERR_PTR(err);
  90        return mem->virt_base + (pos << PAGE_SHIFT);
  91}
  92EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
  93
  94/**
  95 * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
  96 *
  97 * @dev:        device from which we allocate memory
  98 * @size:       size of requested memory area
  99 * @dma_handle: This will be filled with the correct dma handle
 100 * @ret:        This pointer will be filled with the virtual address
 101 *              to allocated area.
 102 *
 103 * This function should be only called from per-arch dma_alloc_coherent()
 104 * to support allocation from per-device coherent memory pools.
 105 *
 106 * Returns 0 if dma_alloc_coherent should continue with allocating from
 107 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
 108 */
 109int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 110                                       dma_addr_t *dma_handle, void **ret)
 111{
 112        struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 113        int order = get_order(size);
 114
 115        if (mem) {
 116                int page = bitmap_find_free_region(mem->bitmap, mem->size,
 117                                                     order);
 118                if (page >= 0) {
 119                        *dma_handle = mem->device_base + (page << PAGE_SHIFT);
 120                        *ret = mem->virt_base + (page << PAGE_SHIFT);
 121                        memset(*ret, 0, size);
 122                } else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
 123                        *ret = NULL;
 124        }
 125        return (mem != NULL);
 126}
 127EXPORT_SYMBOL(dma_alloc_from_coherent);
 128
 129/**
 130 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
 131 * @dev:        device from which the memory was allocated
 132 * @order:      the order of pages allocated
 133 * @vaddr:      virtual address of allocated pages
 134 *
 135 * This checks whether the memory was allocated from the per-device
 136 * coherent memory pool and if so, releases that memory.
 137 *
 138 * Returns 1 if we correctly released the memory, or 0 if
 139 * dma_release_coherent() should proceed with releasing memory from
 140 * generic pools.
 141 */
 142int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
 143{
 144        struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
 145
 146        if (mem && vaddr >= mem->virt_base && vaddr <
 147                   (mem->virt_base + (mem->size << PAGE_SHIFT))) {
 148                int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
 149
 150                bitmap_release_region(mem->bitmap, page, order);
 151                return 1;
 152        }
 153        return 0;
 154}
 155EXPORT_SYMBOL(dma_release_from_coherent);
 156