1
2
3
4
5
6
7#ifndef _ASMARM_CACHEFLUSH_H
8#define _ASMARM_CACHEFLUSH_H
9
10#include <linux/mm.h>
11
12#include <asm/glue-cache.h>
13#include <asm/shmparam.h>
14#include <asm/cachetype.h>
15#include <asm/outercache.h>
16
17#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
18
19
20
21
22
23#define PG_dcache_clean PG_arch_1
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101struct cpu_cache_fns {
102 void (*flush_icache_all)(void);
103 void (*flush_kern_all)(void);
104 void (*flush_kern_louis)(void);
105 void (*flush_user_all)(void);
106 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
107
108 void (*coherent_kern_range)(unsigned long, unsigned long);
109 int (*coherent_user_range)(unsigned long, unsigned long);
110 void (*flush_kern_dcache_area)(void *, size_t);
111
112 void (*dma_map_area)(const void *, size_t, int);
113 void (*dma_unmap_area)(const void *, size_t, int);
114
115 void (*dma_flush_range)(const void *, const void *);
116} __no_randomize_layout;
117
118
119
120
121#ifdef MULTI_CACHE
122
123extern struct cpu_cache_fns cpu_cache;
124
125#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
126#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
127#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
128#define __cpuc_flush_user_all cpu_cache.flush_user_all
129#define __cpuc_flush_user_range cpu_cache.flush_user_range
130#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
131#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
132#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
133
134
135
136
137
138
139
140#define dmac_flush_range cpu_cache.dma_flush_range
141
142#else
143
144extern void __cpuc_flush_icache_all(void);
145extern void __cpuc_flush_kern_all(void);
146extern void __cpuc_flush_kern_louis(void);
147extern void __cpuc_flush_user_all(void);
148extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
149extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
150extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
151extern void __cpuc_flush_dcache_area(void *, size_t);
152
153
154
155
156
157
158
159extern void dmac_flush_range(const void *, const void *);
160
161#endif
162
163
164
165
166
167
168extern void copy_to_user_page(struct vm_area_struct *, struct page *,
169 unsigned long, void *, const void *, unsigned long);
170#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
171 do { \
172 memcpy(dst, src, len); \
173 } while (0)
174
175
176
177
178
179
180#define __flush_icache_all_generic() \
181 asm("mcr p15, 0, %0, c7, c5, 0" \
182 : : "r" (0));
183
184
185#define __flush_icache_all_v7_smp() \
186 asm("mcr p15, 0, %0, c7, c1, 0" \
187 : : "r" (0));
188
189
190
191
192
193#if (defined(CONFIG_CPU_V7) && \
194 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
195 defined(CONFIG_SMP_ON_UP)
196#define __flush_icache_preferred __cpuc_flush_icache_all
197#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
198#define __flush_icache_preferred __flush_icache_all_v7_smp
199#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
200#define __flush_icache_preferred __cpuc_flush_icache_all
201#else
202#define __flush_icache_preferred __flush_icache_all_generic
203#endif
204
205static inline void __flush_icache_all(void)
206{
207 __flush_icache_preferred();
208 dsb(ishst);
209}
210
211
212
213
214#define flush_cache_louis() __cpuc_flush_kern_louis()
215
216#define flush_cache_all() __cpuc_flush_kern_all()
217
218static inline void vivt_flush_cache_mm(struct mm_struct *mm)
219{
220 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
221 __cpuc_flush_user_all();
222}
223
224static inline void
225vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
226{
227 struct mm_struct *mm = vma->vm_mm;
228
229 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
230 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
231 vma->vm_flags);
232}
233
234static inline void
235vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
236{
237 struct mm_struct *mm = vma->vm_mm;
238
239 if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
240 unsigned long addr = user_addr & PAGE_MASK;
241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
242 }
243}
244
245#ifndef CONFIG_CPU_CACHE_VIPT
246#define flush_cache_mm(mm) \
247 vivt_flush_cache_mm(mm)
248#define flush_cache_range(vma,start,end) \
249 vivt_flush_cache_range(vma,start,end)
250#define flush_cache_page(vma,addr,pfn) \
251 vivt_flush_cache_page(vma,addr,pfn)
252#else
253extern void flush_cache_mm(struct mm_struct *mm);
254extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
255extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
256#endif
257
258#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
259
260
261
262
263
264
265#define flush_icache_user_range(s,e) __cpuc_coherent_user_range(s,e)
266
267
268
269
270
271#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
272
273
274
275
276
277#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
278
279
280
281
282
283
284
285
286
287
288
289
290
291#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
292extern void flush_dcache_page(struct page *);
293
294static inline void flush_kernel_vmap_range(void *addr, int size)
295{
296 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
297 __cpuc_flush_dcache_area(addr, (size_t)size);
298}
299static inline void invalidate_kernel_vmap_range(void *addr, int size)
300{
301 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
302 __cpuc_flush_dcache_area(addr, (size_t)size);
303}
304
305#define ARCH_HAS_FLUSH_ANON_PAGE
306static inline void flush_anon_page(struct vm_area_struct *vma,
307 struct page *page, unsigned long vmaddr)
308{
309 extern void __flush_anon_page(struct vm_area_struct *vma,
310 struct page *, unsigned long);
311 if (PageAnon(page))
312 __flush_anon_page(vma, page, vmaddr);
313}
314
315#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
316extern void flush_kernel_dcache_page(struct page *);
317
318#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
319#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
320
321
322
323
324
325#define flush_icache_page(vma,page) do { } while (0)
326
327
328
329
330
331
332
333
334static inline void flush_cache_vmap(unsigned long start, unsigned long end)
335{
336 if (!cache_is_vipt_nonaliasing())
337 flush_cache_all();
338 else
339
340
341
342
343 dsb(ishst);
344}
345
346static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
347{
348 if (!cache_is_vipt_nonaliasing())
349 flush_cache_all();
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376#define __CACHE_WRITEBACK_ORDER 6
377#define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
378
379
380
381
382
383#define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
384
385
386
387
388
389static inline void __sync_cache_range_w(volatile void *p, size_t size)
390{
391 char *_p = (char *)p;
392
393 __cpuc_clean_dcache_area(_p, size);
394 outer_clean_range(__pa(_p), __pa(_p + size));
395}
396
397
398
399
400
401
402
403static inline void __sync_cache_range_r(volatile void *p, size_t size)
404{
405 char *_p = (char *)p;
406
407#ifdef CONFIG_OUTER_CACHE
408 if (outer_cache.flush_range) {
409
410
411
412
413 __cpuc_clean_dcache_area(_p, size);
414
415
416 outer_flush_range(__pa(_p), __pa(_p + size));
417 }
418#endif
419
420
421 __cpuc_flush_dcache_area(_p, size);
422}
423
424#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
425#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455#define v7_exit_coherency_flush(level) \
456 asm volatile( \
457 ".arch armv7-a \n\t" \
458 "stmfd sp!, {fp, ip} \n\t" \
459 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
460 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
461 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
462 "isb \n\t" \
463 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
464 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
465 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
466 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
467 "isb \n\t" \
468 "dsb \n\t" \
469 "ldmfd sp!, {fp, ip}" \
470 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
471 "r9","r10","lr","memory" )
472
473void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
474 void *kaddr, unsigned long len);
475
476
477#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
478void check_cpu_icache_size(int cpuid);
479#else
480static inline void check_cpu_icache_size(int cpuid) { }
481#endif
482
483#endif
484