linux/arch/mips/mm/dma-noncoherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   4 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
   5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
   6 */
   7#include <linux/dma-direct.h>
   8#include <linux/dma-map-ops.h>
   9#include <linux/highmem.h>
  10
  11#include <asm/cache.h>
  12#include <asm/cpu-type.h>
  13#include <asm/io.h>
  14
  15/*
  16 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
  17 * fill random cachelines with stale data at any time, requiring an extra
  18 * flush post-DMA.
  19 *
  20 * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
  21 * terminology calls memory areas with hardware maintained coherency coherent.
  22 *
  23 * Note that the R14000 and R16000 should also be checked for in this condition.
  24 * However this function is only called on non-I/O-coherent systems and only the
  25 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo\xC2\xB2 rsp.
  26 * SGI IP32 aka O2.
  27 */
  28static inline bool cpu_needs_post_dma_flush(void)
  29{
  30        switch (boot_cpu_type()) {
  31        case CPU_R10000:
  32        case CPU_R12000:
  33        case CPU_BMIPS5000:
  34        case CPU_LOONGSON2EF:
  35        case CPU_XBURST:
  36                return true;
  37        default:
  38                /*
  39                 * Presence of MAARs suggests that the CPU supports
  40                 * speculatively prefetching data, and therefore requires
  41                 * the post-DMA flush/invalidate.
  42                 */
  43                return cpu_has_maar;
  44        }
  45}
  46
  47void arch_dma_prep_coherent(struct page *page, size_t size)
  48{
  49        dma_cache_wback_inv((unsigned long)page_address(page), size);
  50}
  51
  52void *arch_dma_set_uncached(void *addr, size_t size)
  53{
  54        return (void *)(__pa(addr) + UNCAC_BASE);
  55}
  56
  57static inline void dma_sync_virt_for_device(void *addr, size_t size,
  58                enum dma_data_direction dir)
  59{
  60        switch (dir) {
  61        case DMA_TO_DEVICE:
  62                dma_cache_wback((unsigned long)addr, size);
  63                break;
  64        case DMA_FROM_DEVICE:
  65                dma_cache_inv((unsigned long)addr, size);
  66                break;
  67        case DMA_BIDIRECTIONAL:
  68                dma_cache_wback_inv((unsigned long)addr, size);
  69                break;
  70        default:
  71                BUG();
  72        }
  73}
  74
  75static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
  76                enum dma_data_direction dir)
  77{
  78        switch (dir) {
  79        case DMA_TO_DEVICE:
  80                break;
  81        case DMA_FROM_DEVICE:
  82        case DMA_BIDIRECTIONAL:
  83                dma_cache_inv((unsigned long)addr, size);
  84                break;
  85        default:
  86                BUG();
  87        }
  88}
  89
  90/*
  91 * A single sg entry may refer to multiple physically contiguous pages.  But
  92 * we still need to process highmem pages individually.  If highmem is not
  93 * configured then the bulk of this loop gets optimized out.
  94 */
  95static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
  96                enum dma_data_direction dir, bool for_device)
  97{
  98        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
  99        unsigned long offset = paddr & ~PAGE_MASK;
 100        size_t left = size;
 101
 102        do {
 103                size_t len = left;
 104                void *addr;
 105
 106                if (PageHighMem(page)) {
 107                        if (offset + len > PAGE_SIZE)
 108                                len = PAGE_SIZE - offset;
 109                }
 110
 111                addr = kmap_atomic(page);
 112                if (for_device)
 113                        dma_sync_virt_for_device(addr + offset, len, dir);
 114                else
 115                        dma_sync_virt_for_cpu(addr + offset, len, dir);
 116                kunmap_atomic(addr);
 117
 118                offset = 0;
 119                page++;
 120                left -= len;
 121        } while (left);
 122}
 123
 124void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 125                enum dma_data_direction dir)
 126{
 127        dma_sync_phys(paddr, size, dir, true);
 128}
 129
 130#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 131void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 132                enum dma_data_direction dir)
 133{
 134        if (cpu_needs_post_dma_flush())
 135                dma_sync_phys(paddr, size, dir, false);
 136}
 137#endif
 138
 139#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 140void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 141                const struct iommu_ops *iommu, bool coherent)
 142{
 143        dev->dma_coherent = coherent;
 144}
 145#endif
 146