1
2
3
4
5
6
7
8
9
10
11
12
13static struct page *pcpu_chunk_page(struct pcpu_chunk *chunk,
14 unsigned int cpu, int page_idx)
15{
16
17 WARN_ON(chunk->immutable);
18
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk, cpu, page_idx));
20}
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41static struct page **pcpu_get_pages_and_bitmap(struct pcpu_chunk *chunk,
42 unsigned long **bitmapp,
43 bool may_alloc)
44{
45 static struct page **pages;
46 static unsigned long *bitmap;
47 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]);
48 size_t bitmap_size = BITS_TO_LONGS(pcpu_unit_pages) *
49 sizeof(unsigned long);
50
51 if (!pages || !bitmap) {
52 if (may_alloc && !pages)
53 pages = pcpu_mem_alloc(pages_size);
54 if (may_alloc && !bitmap)
55 bitmap = pcpu_mem_alloc(bitmap_size);
56 if (!pages || !bitmap)
57 return NULL;
58 }
59
60 memset(pages, 0, pages_size);
61 bitmap_copy(bitmap, chunk->populated, pcpu_unit_pages);
62
63 *bitmapp = bitmap;
64 return pages;
65}
66
67
68
69
70
71
72
73
74
75
76
77
78static void pcpu_free_pages(struct pcpu_chunk *chunk,
79 struct page **pages, unsigned long *populated,
80 int page_start, int page_end)
81{
82 unsigned int cpu;
83 int i;
84
85 for_each_possible_cpu(cpu) {
86 for (i = page_start; i < page_end; i++) {
87 struct page *page = pages[pcpu_page_idx(cpu, i)];
88
89 if (page)
90 __free_page(page);
91 }
92 }
93}
94
95
96
97
98
99
100
101
102
103
104
105
106
107static int pcpu_alloc_pages(struct pcpu_chunk *chunk,
108 struct page **pages, unsigned long *populated,
109 int page_start, int page_end)
110{
111 const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
112 unsigned int cpu;
113 int i;
114
115 for_each_possible_cpu(cpu) {
116 for (i = page_start; i < page_end; i++) {
117 struct page **pagep = &pages[pcpu_page_idx(cpu, i)];
118
119 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0);
120 if (!*pagep) {
121 pcpu_free_pages(chunk, pages, populated,
122 page_start, page_end);
123 return -ENOMEM;
124 }
125 }
126 }
127 return 0;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142static void pcpu_pre_unmap_flush(struct pcpu_chunk *chunk,
143 int page_start, int page_end)
144{
145 flush_cache_vunmap(
146 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
147 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
148}
149
150static void __pcpu_unmap_pages(unsigned long addr, int nr_pages)
151{
152 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
153}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
170 struct page **pages, unsigned long *populated,
171 int page_start, int page_end)
172{
173 unsigned int cpu;
174 int i;
175
176 for_each_possible_cpu(cpu) {
177 for (i = page_start; i < page_end; i++) {
178 struct page *page;
179
180 page = pcpu_chunk_page(chunk, cpu, i);
181 WARN_ON(!page);
182 pages[pcpu_page_idx(cpu, i)] = page;
183 }
184 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, cpu, page_start),
185 page_end - page_start);
186 }
187
188 for (i = page_start; i < page_end; i++)
189 __clear_bit(i, populated);
190}
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
206 int page_start, int page_end)
207{
208 flush_tlb_kernel_range(
209 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
210 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
211}
212
213static int __pcpu_map_pages(unsigned long addr, struct page **pages,
214 int nr_pages)
215{
216 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
217 PAGE_KERNEL, pages);
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236static int pcpu_map_pages(struct pcpu_chunk *chunk,
237 struct page **pages, unsigned long *populated,
238 int page_start, int page_end)
239{
240 unsigned int cpu, tcpu;
241 int i, err;
242
243 for_each_possible_cpu(cpu) {
244 err = __pcpu_map_pages(pcpu_chunk_addr(chunk, cpu, page_start),
245 &pages[pcpu_page_idx(cpu, page_start)],
246 page_end - page_start);
247 if (err < 0)
248 goto err;
249 }
250
251
252 for (i = page_start; i < page_end; i++) {
253 for_each_possible_cpu(cpu)
254 pcpu_set_page_chunk(pages[pcpu_page_idx(cpu, i)],
255 chunk);
256 __set_bit(i, populated);
257 }
258
259 return 0;
260
261err:
262 for_each_possible_cpu(tcpu) {
263 if (tcpu == cpu)
264 break;
265 __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start),
266 page_end - page_start);
267 }
268 return err;
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
284 int page_start, int page_end)
285{
286 flush_cache_vmap(
287 pcpu_chunk_addr(chunk, pcpu_first_unit_cpu, page_start),
288 pcpu_chunk_addr(chunk, pcpu_last_unit_cpu, page_end));
289}
290
291
292
293
294
295
296
297
298
299
300
301
302
303static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
304{
305 int page_start = PFN_DOWN(off);
306 int page_end = PFN_UP(off + size);
307 int free_end = page_start, unmap_end = page_start;
308 struct page **pages;
309 unsigned long *populated;
310 unsigned int cpu;
311 int rs, re, rc;
312
313
314 rs = page_start;
315 pcpu_next_pop(chunk, &rs, &re, page_end);
316 if (rs == page_start && re == page_end)
317 goto clear;
318
319
320 WARN_ON(chunk->immutable);
321
322 pages = pcpu_get_pages_and_bitmap(chunk, &populated, true);
323 if (!pages)
324 return -ENOMEM;
325
326
327 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
328 rc = pcpu_alloc_pages(chunk, pages, populated, rs, re);
329 if (rc)
330 goto err_free;
331 free_end = re;
332 }
333
334 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
335 rc = pcpu_map_pages(chunk, pages, populated, rs, re);
336 if (rc)
337 goto err_unmap;
338 unmap_end = re;
339 }
340 pcpu_post_map_flush(chunk, page_start, page_end);
341
342
343 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
344clear:
345 for_each_possible_cpu(cpu)
346 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
347 return 0;
348
349err_unmap:
350 pcpu_pre_unmap_flush(chunk, page_start, unmap_end);
351 pcpu_for_each_unpop_region(chunk, rs, re, page_start, unmap_end)
352 pcpu_unmap_pages(chunk, pages, populated, rs, re);
353 pcpu_post_unmap_tlb_flush(chunk, page_start, unmap_end);
354err_free:
355 pcpu_for_each_unpop_region(chunk, rs, re, page_start, free_end)
356 pcpu_free_pages(chunk, pages, populated, rs, re);
357 return rc;
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
375{
376 int page_start = PFN_DOWN(off);
377 int page_end = PFN_UP(off + size);
378 struct page **pages;
379 unsigned long *populated;
380 int rs, re;
381
382
383 rs = page_start;
384 pcpu_next_unpop(chunk, &rs, &re, page_end);
385 if (rs == page_start && re == page_end)
386 return;
387
388
389 WARN_ON(chunk->immutable);
390
391
392
393
394
395
396 pages = pcpu_get_pages_and_bitmap(chunk, &populated, false);
397 BUG_ON(!pages);
398
399
400 pcpu_pre_unmap_flush(chunk, page_start, page_end);
401
402 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
403 pcpu_unmap_pages(chunk, pages, populated, rs, re);
404
405
406
407 pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
408 pcpu_free_pages(chunk, pages, populated, rs, re);
409
410
411 bitmap_copy(chunk->populated, populated, pcpu_unit_pages);
412}
413
414static struct pcpu_chunk *pcpu_create_chunk(void)
415{
416 struct pcpu_chunk *chunk;
417 struct vm_struct **vms;
418
419 chunk = pcpu_alloc_chunk();
420 if (!chunk)
421 return NULL;
422
423 vms = pcpu_get_vm_areas(pcpu_group_offsets, pcpu_group_sizes,
424 pcpu_nr_groups, pcpu_atom_size, GFP_KERNEL);
425 if (!vms) {
426 pcpu_free_chunk(chunk);
427 return NULL;
428 }
429
430 chunk->data = vms;
431 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
432 return chunk;
433}
434
435static void pcpu_destroy_chunk(struct pcpu_chunk *chunk)
436{
437 if (chunk && chunk->data)
438 pcpu_free_vm_areas(chunk->data, pcpu_nr_groups);
439 pcpu_free_chunk(chunk);
440}
441
442static struct page *pcpu_addr_to_page(void *addr)
443{
444 return vmalloc_to_page(addr);
445}
446
447static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai)
448{
449
450 return 0;
451}
452