linux/arch/mips/mm/dma-noncoherent.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
   4 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
   5 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
   6 */
   7#include <linux/dma-direct.h>
   8#include <linux/dma-map-ops.h>
   9#include <linux/highmem.h>
  10
  11#include <asm/cache.h>
  12#include <asm/cpu-type.h>
  13#include <asm/io.h>
  14
  15/*
  16 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can speculatively
  17 * fill random cachelines with stale data at any time, requiring an extra
  18 * flush post-DMA.
  19 *
  20 * Warning on the terminology - Linux calls an uncached area coherent;  MIPS
  21 * terminology calls memory areas with hardware maintained coherency coherent.
  22 *
  23 * Note that the R14000 and R16000 should also be checked for in this condition.
  24 * However this function is only called on non-I/O-coherent systems and only the
  25 * R10000 and R12000 are used in such systems, the SGI IP28 Indigo\xC2\xB2 rsp.
  26 * SGI IP32 aka O2.
  27 */
  28static inline bool cpu_needs_post_dma_flush(void)
  29{
  30        switch (boot_cpu_type()) {
  31        case CPU_R10000:
  32        case CPU_R12000:
  33        case CPU_BMIPS5000:
  34        case CPU_LOONGSON2EF:
  35                return true;
  36        default:
  37                /*
  38                 * Presence of MAARs suggests that the CPU supports
  39                 * speculatively prefetching data, and therefore requires
  40                 * the post-DMA flush/invalidate.
  41                 */
  42                return cpu_has_maar;
  43        }
  44}
  45
  46void arch_dma_prep_coherent(struct page *page, size_t size)
  47{
  48        dma_cache_wback_inv((unsigned long)page_address(page), size);
  49}
  50
  51void *arch_dma_set_uncached(void *addr, size_t size)
  52{
  53        return (void *)(__pa(addr) + UNCAC_BASE);
  54}
  55
  56static inline void dma_sync_virt_for_device(void *addr, size_t size,
  57                enum dma_data_direction dir)
  58{
  59        switch (dir) {
  60        case DMA_TO_DEVICE:
  61                dma_cache_wback((unsigned long)addr, size);
  62                break;
  63        case DMA_FROM_DEVICE:
  64                dma_cache_inv((unsigned long)addr, size);
  65                break;
  66        case DMA_BIDIRECTIONAL:
  67                dma_cache_wback_inv((unsigned long)addr, size);
  68                break;
  69        default:
  70                BUG();
  71        }
  72}
  73
  74static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
  75                enum dma_data_direction dir)
  76{
  77        switch (dir) {
  78        case DMA_TO_DEVICE:
  79                break;
  80        case DMA_FROM_DEVICE:
  81        case DMA_BIDIRECTIONAL:
  82                dma_cache_inv((unsigned long)addr, size);
  83                break;
  84        default:
  85                BUG();
  86        }
  87}
  88
  89/*
  90 * A single sg entry may refer to multiple physically contiguous pages.  But
  91 * we still need to process highmem pages individually.  If highmem is not
  92 * configured then the bulk of this loop gets optimized out.
  93 */
  94static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
  95                enum dma_data_direction dir, bool for_device)
  96{
  97        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
  98        unsigned long offset = paddr & ~PAGE_MASK;
  99        size_t left = size;
 100
 101        do {
 102                size_t len = left;
 103                void *addr;
 104
 105                if (PageHighMem(page)) {
 106                        if (offset + len > PAGE_SIZE)
 107                                len = PAGE_SIZE - offset;
 108                }
 109
 110                addr = kmap_atomic(page);
 111                if (for_device)
 112                        dma_sync_virt_for_device(addr + offset, len, dir);
 113                else
 114                        dma_sync_virt_for_cpu(addr + offset, len, dir);
 115                kunmap_atomic(addr);
 116
 117                offset = 0;
 118                page++;
 119                left -= len;
 120        } while (left);
 121}
 122
 123void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 124                enum dma_data_direction dir)
 125{
 126        dma_sync_phys(paddr, size, dir, true);
 127}
 128
 129#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 130void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 131                enum dma_data_direction dir)
 132{
 133        if (cpu_needs_post_dma_flush())
 134                dma_sync_phys(paddr, size, dir, false);
 135}
 136#endif
 137
 138#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
 139void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 140                const struct iommu_ops *iommu, bool coherent)
 141{
 142        dev->dma_coherent = coherent;
 143}
 144#endif
 145