1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#undef DEBUG
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/mman.h>
31#include <linux/mm.h>
32#include <linux/swap.h>
33#include <linux/stddef.h>
34#include <linux/vmalloc.h>
35#include <linux/init.h>
36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h>
39#include <linux/idr.h>
40#include <linux/nodemask.h>
41#include <linux/module.h>
42#include <linux/poison.h>
43#include <linux/memblock.h>
44#include <linux/hugetlb.h>
45#include <linux/slab.h>
46
47#include <asm/pgalloc.h>
48#include <asm/page.h>
49#include <asm/prom.h>
50#include <asm/rtas.h>
51#include <asm/io.h>
52#include <asm/mmu_context.h>
53#include <asm/pgtable.h>
54#include <asm/mmu.h>
55#include <asm/uaccess.h>
56#include <asm/smp.h>
57#include <asm/machdep.h>
58#include <asm/tlb.h>
59#include <asm/eeh.h>
60#include <asm/processor.h>
61#include <asm/mmzone.h>
62#include <asm/cputable.h>
63#include <asm/sections.h>
64#include <asm/iommu.h>
65#include <asm/vdso.h>
66
67#include "mmu_decl.h"
68
69#ifdef CONFIG_PPC_STD_MMU_64
70#if PGTABLE_RANGE > USER_VSID_RANGE
71#warning Limited user VSID range means pagetable space is wasted
72#endif
73
74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
75#warning TASK_SIZE is smaller than it needs to be.
76#endif
77#endif
78
79phys_addr_t memstart_addr = ~0;
80EXPORT_SYMBOL_GPL(memstart_addr);
81phys_addr_t kernstart_addr;
82EXPORT_SYMBOL_GPL(kernstart_addr);
83
84static void pgd_ctor(void *addr)
85{
86 memset(addr, 0, PGD_TABLE_SIZE);
87}
88
89static void pmd_ctor(void *addr)
90{
91 memset(addr, 0, PMD_TABLE_SIZE);
92}
93
94struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
95
96
97
98
99
100
101
102
103void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
104{
105 char *name;
106 unsigned long table_size = sizeof(void *) << shift;
107 unsigned long align = table_size;
108
109
110
111
112
113
114
115
116 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
117 HUGEPD_SHIFT_MASK + 1);
118 struct kmem_cache *new;
119
120
121
122
123 BUG_ON(!is_power_of_2(minalign));
124 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
125
126 if (PGT_CACHE(shift))
127 return;
128
129 align = max_t(unsigned long, align, minalign);
130 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
131 new = kmem_cache_create(name, table_size, align, 0, ctor);
132 PGT_CACHE(shift) = new;
133
134 pr_debug("Allocated pgtable cache for order %d\n", shift);
135}
136
137
138void pgtable_cache_init(void)
139{
140 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
141 pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
142 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
143 panic("Couldn't allocate pgtable caches");
144
145
146
147
148
149
150 BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
151}
152
153#ifdef CONFIG_SPARSEMEM_VMEMMAP
154
155
156
157
158
159
160static unsigned long __meminit vmemmap_section_start(unsigned long page)
161{
162 unsigned long offset = page - ((unsigned long)(vmemmap));
163
164
165 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
166}
167
168
169
170
171
172
173static int __meminit vmemmap_populated(unsigned long start, int page_size)
174{
175 unsigned long end = start + page_size;
176
177 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
178 if (pfn_valid(vmemmap_section_start(start)))
179 return 1;
180
181 return 0;
182}
183
184
185
186
187
188
189
190
191#ifdef CONFIG_PPC_BOOK3E
192static void __meminit vmemmap_create_mapping(unsigned long start,
193 unsigned long page_size,
194 unsigned long phys)
195{
196
197 unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
198 _PAGE_KERNEL_RW;
199
200
201 BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
202
203
204 flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
205
206
207
208
209
210 for (i = 0; i < page_size; i += PAGE_SIZE)
211 BUG_ON(map_kernel_page(start + i, phys, flags));
212}
213#else
214static void __meminit vmemmap_create_mapping(unsigned long start,
215 unsigned long page_size,
216 unsigned long phys)
217{
218 int mapped = htab_bolt_mapping(start, start + page_size, phys,
219 PAGE_KERNEL, mmu_vmemmap_psize,
220 mmu_kernel_ssize);
221 BUG_ON(mapped < 0);
222}
223#endif
224
225struct vmemmap_backing *vmemmap_list;
226
227static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
228{
229 static struct vmemmap_backing *next;
230 static int num_left;
231
232
233 if (!next || !num_left) {
234 next = vmemmap_alloc_block(PAGE_SIZE, node);
235 if (unlikely(!next)) {
236 WARN_ON(1);
237 return NULL;
238 }
239 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
240 }
241
242 num_left--;
243
244 return next++;
245}
246
247static __meminit void vmemmap_list_populate(unsigned long phys,
248 unsigned long start,
249 int node)
250{
251 struct vmemmap_backing *vmem_back;
252
253 vmem_back = vmemmap_list_alloc(node);
254 if (unlikely(!vmem_back)) {
255 WARN_ON(1);
256 return;
257 }
258
259 vmem_back->phys = phys;
260 vmem_back->virt_addr = start;
261 vmem_back->list = vmemmap_list;
262
263 vmemmap_list = vmem_back;
264}
265
266int __meminit vmemmap_populate(struct page *start_page,
267 unsigned long nr_pages, int node)
268{
269 unsigned long start = (unsigned long)start_page;
270 unsigned long end = (unsigned long)(start_page + nr_pages);
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
272
273
274 start = _ALIGN_DOWN(start, page_size);
275
276 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
277 start_page, nr_pages, node);
278 pr_debug(" -> map %lx..%lx\n", start, end);
279
280 for (; start < end; start += page_size) {
281 void *p;
282
283 if (vmemmap_populated(start, page_size))
284 continue;
285
286 p = vmemmap_alloc_block(page_size, node);
287 if (!p)
288 return -ENOMEM;
289
290 vmemmap_list_populate(__pa(p), start, node);
291
292 pr_debug(" * %016lx..%016lx allocated at %p\n",
293 start, start + page_size, p);
294
295 vmemmap_create_mapping(start, page_size, __pa(p));
296 }
297
298 return 0;
299}
300#endif
301
302