1#ifndef _ASM_POWERPC_TLBFLUSH_H
2#define _ASM_POWERPC_TLBFLUSH_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifdef __KERNEL__
22
23#ifdef CONFIG_PPC_MMU_NOHASH
24
25
26
27
28
29
30
31
32#include <linux/mm.h>
33
34#define MMU_NO_CONTEXT ((unsigned int)-1)
35
36extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
37 unsigned long end);
38extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
39
40extern void local_flush_tlb_mm(struct mm_struct *mm);
41extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
42
43#ifdef CONFIG_SMP
44extern void flush_tlb_mm(struct mm_struct *mm);
45extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
46#else
47#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
48#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
49#endif
50#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
51
52#elif defined(CONFIG_PPC_STD_MMU_32)
53
54
55
56
57extern void flush_tlb_mm(struct mm_struct *mm);
58extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
59extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
60extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
61 unsigned long end);
62extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
63static inline void local_flush_tlb_page(struct vm_area_struct *vma,
64 unsigned long vmaddr)
65{
66 flush_tlb_page(vma, vmaddr);
67}
68static inline void local_flush_tlb_mm(struct mm_struct *mm)
69{
70 flush_tlb_mm(mm);
71}
72
73#elif defined(CONFIG_PPC_STD_MMU_64)
74
75
76
77
78
79#include <linux/percpu.h>
80#include <asm/page.h>
81
82#define PPC64_TLB_BATCH_NR 192
83
84struct ppc64_tlb_batch {
85 int active;
86 unsigned long index;
87 struct mm_struct *mm;
88 real_pte_t pte[PPC64_TLB_BATCH_NR];
89 unsigned long vaddr[PPC64_TLB_BATCH_NR];
90 unsigned int psize;
91 int ssize;
92};
93DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
94
95extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
96
97extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
98 pte_t *ptep, unsigned long pte, int huge);
99
100#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
101
102static inline void arch_enter_lazy_mmu_mode(void)
103{
104 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
105
106 batch->active = 1;
107}
108
109static inline void arch_leave_lazy_mmu_mode(void)
110{
111 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
112
113 if (batch->index)
114 __flush_tlb_pending(batch);
115 batch->active = 0;
116}
117
118#define arch_flush_lazy_mmu_mode() do {} while (0)
119
120
121extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
122 int ssize, int local);
123extern void flush_hash_range(unsigned long number, int local);
124
125
126static inline void local_flush_tlb_mm(struct mm_struct *mm)
127{
128}
129
130static inline void flush_tlb_mm(struct mm_struct *mm)
131{
132}
133
134static inline void local_flush_tlb_page(struct vm_area_struct *vma,
135 unsigned long vmaddr)
136{
137}
138
139static inline void flush_tlb_page(struct vm_area_struct *vma,
140 unsigned long vmaddr)
141{
142}
143
144static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
145 unsigned long vmaddr)
146{
147}
148
149static inline void flush_tlb_range(struct vm_area_struct *vma,
150 unsigned long start, unsigned long end)
151{
152}
153
154static inline void flush_tlb_kernel_range(unsigned long start,
155 unsigned long end)
156{
157}
158
159
160extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
161 unsigned long end);
162
163#else
164#error Unsupported MMU type
165#endif
166
167#endif
168#endif
169