1
2
3
4
5
6
7#include <linux/dma-direct.h>
8#include <linux/dma-map-ops.h>
9#include <linux/highmem.h>
10
11#include <asm/cache.h>
12#include <asm/cpu-type.h>
13#include <asm/io.h>
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28static inline bool cpu_needs_post_dma_flush(void)
29{
30 switch (boot_cpu_type()) {
31 case CPU_R10000:
32 case CPU_R12000:
33 case CPU_BMIPS5000:
34 case CPU_LOONGSON2EF:
35 return true;
36 default:
37
38
39
40
41
42 return cpu_has_maar;
43 }
44}
45
46void arch_dma_prep_coherent(struct page *page, size_t size)
47{
48 dma_cache_wback_inv((unsigned long)page_address(page), size);
49}
50
51void *arch_dma_set_uncached(void *addr, size_t size)
52{
53 return (void *)(__pa(addr) + UNCAC_BASE);
54}
55
56static inline void dma_sync_virt_for_device(void *addr, size_t size,
57 enum dma_data_direction dir)
58{
59 switch (dir) {
60 case DMA_TO_DEVICE:
61 dma_cache_wback((unsigned long)addr, size);
62 break;
63 case DMA_FROM_DEVICE:
64 dma_cache_inv((unsigned long)addr, size);
65 break;
66 case DMA_BIDIRECTIONAL:
67 dma_cache_wback_inv((unsigned long)addr, size);
68 break;
69 default:
70 BUG();
71 }
72}
73
74static inline void dma_sync_virt_for_cpu(void *addr, size_t size,
75 enum dma_data_direction dir)
76{
77 switch (dir) {
78 case DMA_TO_DEVICE:
79 break;
80 case DMA_FROM_DEVICE:
81 case DMA_BIDIRECTIONAL:
82 dma_cache_inv((unsigned long)addr, size);
83 break;
84 default:
85 BUG();
86 }
87}
88
89
90
91
92
93
94static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
95 enum dma_data_direction dir, bool for_device)
96{
97 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
98 unsigned long offset = paddr & ~PAGE_MASK;
99 size_t left = size;
100
101 do {
102 size_t len = left;
103 void *addr;
104
105 if (PageHighMem(page)) {
106 if (offset + len > PAGE_SIZE)
107 len = PAGE_SIZE - offset;
108 }
109
110 addr = kmap_atomic(page);
111 if (for_device)
112 dma_sync_virt_for_device(addr + offset, len, dir);
113 else
114 dma_sync_virt_for_cpu(addr + offset, len, dir);
115 kunmap_atomic(addr);
116
117 offset = 0;
118 page++;
119 left -= len;
120 } while (left);
121}
122
123void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
124 enum dma_data_direction dir)
125{
126 dma_sync_phys(paddr, size, dir, true);
127}
128
129#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
130void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
131 enum dma_data_direction dir)
132{
133 if (cpu_needs_post_dma_flush())
134 dma_sync_phys(paddr, size, dir, false);
135}
136#endif
137
138#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
139void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
140 const struct iommu_ops *iommu, bool coherent)
141{
142 dev->dma_coherent = coherent;
143}
144#endif
145