1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
13#include <linux/mm.h>
14
15#include <asm/glue-cache.h>
16#include <asm/shmparam.h>
17#include <asm/cachetype.h>
18#include <asm/outercache.h>
19
20#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
21
22
23
24
25
26#define PG_dcache_clean PG_arch_1
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97struct cpu_cache_fns {
98 void (*flush_icache_all)(void);
99 void (*flush_kern_all)(void);
100 void (*flush_user_all)(void);
101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
102
103 void (*coherent_kern_range)(unsigned long, unsigned long);
104 void (*coherent_user_range)(unsigned long, unsigned long);
105 void (*flush_kern_dcache_area)(void *, size_t);
106
107 void (*dma_map_area)(const void *, size_t, int);
108 void (*dma_unmap_area)(const void *, size_t, int);
109
110 void (*dma_flush_range)(const void *, const void *);
111};
112
113
114
115
116#ifdef MULTI_CACHE
117
118extern struct cpu_cache_fns cpu_cache;
119
120#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
121#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
122#define __cpuc_flush_user_all cpu_cache.flush_user_all
123#define __cpuc_flush_user_range cpu_cache.flush_user_range
124#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
125#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
126#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
127
128
129
130
131
132
133
134#define dmac_map_area cpu_cache.dma_map_area
135#define dmac_unmap_area cpu_cache.dma_unmap_area
136#define dmac_flush_range cpu_cache.dma_flush_range
137
138#else
139
140extern void __cpuc_flush_icache_all(void);
141extern void __cpuc_flush_kern_all(void);
142extern void __cpuc_flush_user_all(void);
143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
144extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
145extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
146extern void __cpuc_flush_dcache_area(void *, size_t);
147
148
149
150
151
152
153
154extern void dmac_map_area(const void *, size_t, int);
155extern void dmac_unmap_area(const void *, size_t, int);
156extern void dmac_flush_range(const void *, const void *);
157
158#endif
159
160
161
162
163
164
165extern void copy_to_user_page(struct vm_area_struct *, struct page *,
166 unsigned long, void *, const void *, unsigned long);
167#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
168 do { \
169 memcpy(dst, src, len); \
170 } while (0)
171
172
173
174
175
176
177#define __flush_icache_all_generic() \
178 asm("mcr p15, 0, %0, c7, c5, 0" \
179 : : "r" (0));
180
181
182#define __flush_icache_all_v7_smp() \
183 asm("mcr p15, 0, %0, c7, c1, 0" \
184 : : "r" (0));
185
186
187
188
189
190#if (defined(CONFIG_CPU_V7) && \
191 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
192 defined(CONFIG_SMP_ON_UP)
193#define __flush_icache_preferred __cpuc_flush_icache_all
194#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
195#define __flush_icache_preferred __flush_icache_all_v7_smp
196#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
197#define __flush_icache_preferred __cpuc_flush_icache_all
198#else
199#define __flush_icache_preferred __flush_icache_all_generic
200#endif
201
202static inline void __flush_icache_all(void)
203{
204 __flush_icache_preferred();
205}
206
207#define flush_cache_all() __cpuc_flush_kern_all()
208
209static inline void vivt_flush_cache_mm(struct mm_struct *mm)
210{
211 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
212 __cpuc_flush_user_all();
213}
214
215static inline void
216vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
217{
218 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
219 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
220 vma->vm_flags);
221}
222
223static inline void
224vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
225{
226 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
227 unsigned long addr = user_addr & PAGE_MASK;
228 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
229 }
230}
231
232#ifndef CONFIG_CPU_CACHE_VIPT
233#define flush_cache_mm(mm) \
234 vivt_flush_cache_mm(mm)
235#define flush_cache_range(vma,start,end) \
236 vivt_flush_cache_range(vma,start,end)
237#define flush_cache_page(vma,addr,pfn) \
238 vivt_flush_cache_page(vma,addr,pfn)
239#else
240extern void flush_cache_mm(struct mm_struct *mm);
241extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
242extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
243#endif
244
245#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
246
247
248
249
250
251
252#define flush_cache_user_range(vma,start,end) \
253 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
254
255
256
257
258
259#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
260
261
262
263
264
265#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
266
267
268
269
270
271
272
273
274
275
276
277
278
279#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
280extern void flush_dcache_page(struct page *);
281
282static inline void flush_kernel_vmap_range(void *addr, int size)
283{
284 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
285 __cpuc_flush_dcache_area(addr, (size_t)size);
286}
287static inline void invalidate_kernel_vmap_range(void *addr, int size)
288{
289 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
290 __cpuc_flush_dcache_area(addr, (size_t)size);
291}
292
293#define ARCH_HAS_FLUSH_ANON_PAGE
294static inline void flush_anon_page(struct vm_area_struct *vma,
295 struct page *page, unsigned long vmaddr)
296{
297 extern void __flush_anon_page(struct vm_area_struct *vma,
298 struct page *, unsigned long);
299 if (PageAnon(page))
300 __flush_anon_page(vma, page, vmaddr);
301}
302
303#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
304static inline void flush_kernel_dcache_page(struct page *page)
305{
306}
307
308#define flush_dcache_mmap_lock(mapping) \
309 spin_lock_irq(&(mapping)->tree_lock)
310#define flush_dcache_mmap_unlock(mapping) \
311 spin_unlock_irq(&(mapping)->tree_lock)
312
313#define flush_icache_user_range(vma,page,addr,len) \
314 flush_dcache_page(page)
315
316
317
318
319
320#define flush_icache_page(vma,page) do { } while (0)
321
322
323
324
325
326
327
328
329static inline void flush_cache_vmap(unsigned long start, unsigned long end)
330{
331 if (!cache_is_vipt_nonaliasing())
332 flush_cache_all();
333 else
334
335
336
337
338 dsb();
339}
340
341static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
342{
343 if (!cache_is_vipt_nonaliasing())
344 flush_cache_all();
345}
346
347#endif
348