1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/err.h>
59#include <linux/list.h>
60#include <linux/log2.h>
61#include <linux/mm.h>
62#include <linux/module.h>
63#include <linux/mutex.h>
64#include <linux/percpu.h>
65#include <linux/pfn.h>
66#include <linux/slab.h>
67#include <linux/spinlock.h>
68#include <linux/vmalloc.h>
69#include <linux/workqueue.h>
70
71#include <asm/cacheflush.h>
72#include <asm/sections.h>
73#include <asm/tlbflush.h>
74#include <asm/io.h>
75
76#define PCPU_SLOT_BASE_SHIFT 5
77#define PCPU_DFL_MAP_ALLOC 16
78
79
80#ifndef __addr_to_pcpu_ptr
81#define __addr_to_pcpu_ptr(addr) \
82 (void __percpu *)((unsigned long)(addr) - \
83 (unsigned long)pcpu_base_addr + \
84 (unsigned long)__per_cpu_start)
85#endif
86#ifndef __pcpu_ptr_to_addr
87#define __pcpu_ptr_to_addr(ptr) \
88 (void __force *)((unsigned long)(ptr) + \
89 (unsigned long)pcpu_base_addr - \
90 (unsigned long)__per_cpu_start)
91#endif
92
93struct pcpu_chunk {
94 struct list_head list;
95 int free_size;
96 int contig_hint;
97 void *base_addr;
98 int map_used;
99 int map_alloc;
100 int *map;
101 void *data;
102 bool immutable;
103 unsigned long populated[];
104};
105
106static int pcpu_unit_pages __read_mostly;
107static int pcpu_unit_size __read_mostly;
108static int pcpu_nr_units __read_mostly;
109static int pcpu_atom_size __read_mostly;
110static int pcpu_nr_slots __read_mostly;
111static size_t pcpu_chunk_struct_size __read_mostly;
112
113
114static unsigned int pcpu_first_unit_cpu __read_mostly;
115static unsigned int pcpu_last_unit_cpu __read_mostly;
116
117
118void *pcpu_base_addr __read_mostly;
119EXPORT_SYMBOL_GPL(pcpu_base_addr);
120
121static const int *pcpu_unit_map __read_mostly;
122const unsigned long *pcpu_unit_offsets __read_mostly;
123
124
125static int pcpu_nr_groups __read_mostly;
126static const unsigned long *pcpu_group_offsets __read_mostly;
127static const size_t *pcpu_group_sizes __read_mostly;
128
129
130
131
132
133
134static struct pcpu_chunk *pcpu_first_chunk;
135
136
137
138
139
140
141
142
143static struct pcpu_chunk *pcpu_reserved_chunk;
144static int pcpu_reserved_chunk_limit;
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170static DEFINE_MUTEX(pcpu_alloc_mutex);
171static DEFINE_SPINLOCK(pcpu_lock);
172
173static struct list_head *pcpu_slot __read_mostly;
174
175
176static void pcpu_reclaim(struct work_struct *work);
177static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
178
179static bool pcpu_addr_in_first_chunk(void *addr)
180{
181 void *first_start = pcpu_first_chunk->base_addr;
182
183 return addr >= first_start && addr < first_start + pcpu_unit_size;
184}
185
186static bool pcpu_addr_in_reserved_chunk(void *addr)
187{
188 void *first_start = pcpu_first_chunk->base_addr;
189
190 return addr >= first_start &&
191 addr < first_start + pcpu_reserved_chunk_limit;
192}
193
194static int __pcpu_size_to_slot(int size)
195{
196 int highbit = fls(size);
197 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
198}
199
200static int pcpu_size_to_slot(int size)
201{
202 if (size == pcpu_unit_size)
203 return pcpu_nr_slots - 1;
204 return __pcpu_size_to_slot(size);
205}
206
207static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
208{
209 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
210 return 0;
211
212 return pcpu_size_to_slot(chunk->free_size);
213}
214
215
216static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
217{
218 page->index = (unsigned long)pcpu;
219}
220
221
222static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
223{
224 return (struct pcpu_chunk *)page->index;
225}
226
227static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
228{
229 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
230}
231
232static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
233 unsigned int cpu, int page_idx)
234{
235 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
236 (page_idx << PAGE_SHIFT);
237}
238
239static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
240 int *rs, int *re, int end)
241{
242 *rs = find_next_zero_bit(chunk->populated, end, *rs);
243 *re = find_next_bit(chunk->populated, end, *rs + 1);
244}
245
246static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
247 int *rs, int *re, int end)
248{
249 *rs = find_next_bit(chunk->populated, end, *rs);
250 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
251}
252
253
254
255
256
257
258
259#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
260 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
261 (rs) < (re); \
262 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
263
264#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
265 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
266 (rs) < (re); \
267 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283static void *pcpu_mem_alloc(size_t size)
284{
285 if (size <= PAGE_SIZE)
286 return kzalloc(size, GFP_KERNEL);
287 else {
288 void *ptr = vmalloc(size);
289 if (ptr)
290 memset(ptr, 0, size);
291 return ptr;
292 }
293}
294
295
296
297
298
299
300
301
302static void pcpu_mem_free(void *ptr, size_t size)
303{
304 if (size <= PAGE_SIZE)
305 kfree(ptr);
306 else
307 vfree(ptr);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
324{
325 int nslot = pcpu_chunk_slot(chunk);
326
327 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
328 if (oslot < nslot)
329 list_move(&chunk->list, &pcpu_slot[nslot]);
330 else
331 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
332 }
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
350{
351 int new_alloc;
352
353 if (chunk->map_alloc >= chunk->map_used + 2)
354 return 0;
355
356 new_alloc = PCPU_DFL_MAP_ALLOC;
357 while (new_alloc < chunk->map_used + 2)
358 new_alloc *= 2;
359
360 return new_alloc;
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
377{
378 int *old = NULL, *new = NULL;
379 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
380 unsigned long flags;
381
382 new = pcpu_mem_alloc(new_size);
383 if (!new)
384 return -ENOMEM;
385
386
387 spin_lock_irqsave(&pcpu_lock, flags);
388
389 if (new_alloc <= chunk->map_alloc)
390 goto out_unlock;
391
392 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
393 memcpy(new, chunk->map, old_size);
394
395
396
397
398
399 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
400 old = chunk->map;
401
402 chunk->map_alloc = new_alloc;
403 chunk->map = new;
404 new = NULL;
405
406out_unlock:
407 spin_unlock_irqrestore(&pcpu_lock, flags);
408
409
410
411
412
413 pcpu_mem_free(old, old_size);
414 pcpu_mem_free(new, new_size);
415
416 return 0;
417}
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
440 int head, int tail)
441{
442 int nr_extra = !!head + !!tail;
443
444 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445
446
447 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
448 sizeof(chunk->map[0]) * (chunk->map_used - i));
449 chunk->map_used += nr_extra;
450
451 if (head) {
452 chunk->map[i + 1] = chunk->map[i] - head;
453 chunk->map[i++] = head;
454 }
455 if (tail) {
456 chunk->map[i++] -= tail;
457 chunk->map[i] = tail;
458 }
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
481{
482 int oslot = pcpu_chunk_slot(chunk);
483 int max_contig = 0;
484 int i, off;
485
486 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
487 bool is_last = i + 1 == chunk->map_used;
488 int head, tail;
489
490
491 head = ALIGN(off, align) - off;
492 BUG_ON(i == 0 && head != 0);
493
494 if (chunk->map[i] < 0)
495 continue;
496 if (chunk->map[i] < head + size) {
497 max_contig = max(chunk->map[i], max_contig);
498 continue;
499 }
500
501
502
503
504
505
506
507 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
508 if (chunk->map[i - 1] > 0)
509 chunk->map[i - 1] += head;
510 else {
511 chunk->map[i - 1] -= head;
512 chunk->free_size -= head;
513 }
514 chunk->map[i] -= head;
515 off += head;
516 head = 0;
517 }
518
519
520 tail = chunk->map[i] - head - size;
521 if (tail < sizeof(int))
522 tail = 0;
523
524
525 if (head || tail) {
526 pcpu_split_block(chunk, i, head, tail);
527 if (head) {
528 i++;
529 off += head;
530 max_contig = max(chunk->map[i - 1], max_contig);
531 }
532 if (tail)
533 max_contig = max(chunk->map[i + 1], max_contig);
534 }
535
536
537 if (is_last)
538 chunk->contig_hint = max_contig;
539 else
540 chunk->contig_hint = max(chunk->contig_hint,
541 max_contig);
542
543 chunk->free_size -= chunk->map[i];
544 chunk->map[i] = -chunk->map[i];
545
546 pcpu_chunk_relocate(chunk, oslot);
547 return off;
548 }
549
550 chunk->contig_hint = max_contig;
551 pcpu_chunk_relocate(chunk, oslot);
552
553
554 return -1;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
570{
571 int oslot = pcpu_chunk_slot(chunk);
572 int i, off;
573
574 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
575 if (off == freeme)
576 break;
577 BUG_ON(off != freeme);
578 BUG_ON(chunk->map[i] > 0);
579
580 chunk->map[i] = -chunk->map[i];
581 chunk->free_size += chunk->map[i];
582
583
584 if (i > 0 && chunk->map[i - 1] >= 0) {
585 chunk->map[i - 1] += chunk->map[i];
586 chunk->map_used--;
587 memmove(&chunk->map[i], &chunk->map[i + 1],
588 (chunk->map_used - i) * sizeof(chunk->map[0]));
589 i--;
590 }
591
592 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
593 chunk->map[i] += chunk->map[i + 1];
594 chunk->map_used--;
595 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
596 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
597 }
598
599 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
600 pcpu_chunk_relocate(chunk, oslot);
601}
602
603static struct pcpu_chunk *pcpu_alloc_chunk(void)
604{
605 struct pcpu_chunk *chunk;
606
607 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
608 if (!chunk)
609 return NULL;
610
611 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
612 if (!chunk->map) {
613 kfree(chunk);
614 return NULL;
615 }
616
617 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
618 chunk->map[chunk->map_used++] = pcpu_unit_size;
619
620 INIT_LIST_HEAD(&chunk->list);
621 chunk->free_size = pcpu_unit_size;
622 chunk->contig_hint = pcpu_unit_size;
623
624 return chunk;
625}
626
627static void pcpu_free_chunk(struct pcpu_chunk *chunk)
628{
629 if (!chunk)
630 return;
631 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
632 kfree(chunk);
633}
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
651static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
652static struct pcpu_chunk *pcpu_create_chunk(void);
653static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
654static struct page *pcpu_addr_to_page(void *addr);
655static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656
657#ifdef CONFIG_NEED_PER_CPU_KM
658#include "percpu-km.c"
659#else
660#include "percpu-vm.c"
661#endif
662
663
664
665
666
667
668
669
670static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
671{
672
673 if (pcpu_addr_in_first_chunk(addr)) {
674
675 if (pcpu_addr_in_reserved_chunk(addr))
676 return pcpu_reserved_chunk;
677 return pcpu_first_chunk;
678 }
679
680
681
682
683
684
685
686
687 addr += pcpu_unit_offsets[raw_smp_processor_id()];
688 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
706{
707 static int warn_limit = 10;
708 struct pcpu_chunk *chunk;
709 const char *err;
710 int slot, off, new_alloc;
711 unsigned long flags;
712
713 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
714 WARN(true, "illegal size (%zu) or align (%zu) for "
715 "percpu allocation\n", size, align);
716 return NULL;
717 }
718
719 mutex_lock(&pcpu_alloc_mutex);
720 spin_lock_irqsave(&pcpu_lock, flags);
721
722
723 if (reserved && pcpu_reserved_chunk) {
724 chunk = pcpu_reserved_chunk;
725
726 if (size > chunk->contig_hint) {
727 err = "alloc from reserved chunk failed";
728 goto fail_unlock;
729 }
730
731 while ((new_alloc = pcpu_need_to_extend(chunk))) {
732 spin_unlock_irqrestore(&pcpu_lock, flags);
733 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
734 err = "failed to extend area map of reserved chunk";
735 goto fail_unlock_mutex;
736 }
737 spin_lock_irqsave(&pcpu_lock, flags);
738 }
739
740 off = pcpu_alloc_area(chunk, size, align);
741 if (off >= 0)
742 goto area_found;
743
744 err = "alloc from reserved chunk failed";
745 goto fail_unlock;
746 }
747
748restart:
749
750 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
751 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
752 if (size > chunk->contig_hint)
753 continue;
754
755 new_alloc = pcpu_need_to_extend(chunk);
756 if (new_alloc) {
757 spin_unlock_irqrestore(&pcpu_lock, flags);
758 if (pcpu_extend_area_map(chunk,
759 new_alloc) < 0) {
760 err = "failed to extend area map";
761 goto fail_unlock_mutex;
762 }
763 spin_lock_irqsave(&pcpu_lock, flags);
764
765
766
767
768 goto restart;
769 }
770
771 off = pcpu_alloc_area(chunk, size, align);
772 if (off >= 0)
773 goto area_found;
774 }
775 }
776
777
778 spin_unlock_irqrestore(&pcpu_lock, flags);
779
780 chunk = pcpu_create_chunk();
781 if (!chunk) {
782 err = "failed to allocate new chunk";
783 goto fail_unlock_mutex;
784 }
785
786 spin_lock_irqsave(&pcpu_lock, flags);
787 pcpu_chunk_relocate(chunk, -1);
788 goto restart;
789
790area_found:
791 spin_unlock_irqrestore(&pcpu_lock, flags);
792
793
794 if (pcpu_populate_chunk(chunk, off, size)) {
795 spin_lock_irqsave(&pcpu_lock, flags);
796 pcpu_free_area(chunk, off);
797 err = "failed to populate";
798 goto fail_unlock;
799 }
800
801 mutex_unlock(&pcpu_alloc_mutex);
802
803
804 return __addr_to_pcpu_ptr(chunk->base_addr + off);
805
806fail_unlock:
807 spin_unlock_irqrestore(&pcpu_lock, flags);
808fail_unlock_mutex:
809 mutex_unlock(&pcpu_alloc_mutex);
810 if (warn_limit) {
811 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
812 "%s\n", size, align, err);
813 dump_stack();
814 if (!--warn_limit)
815 pr_info("PERCPU: limit reached, disable warning\n");
816 }
817 return NULL;
818}
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834void __percpu *__alloc_percpu(size_t size, size_t align)
835{
836 return pcpu_alloc(size, align, false);
837}
838EXPORT_SYMBOL_GPL(__alloc_percpu);
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
856{
857 return pcpu_alloc(size, align, true);
858}
859
860
861
862
863
864
865
866
867
868
869static void pcpu_reclaim(struct work_struct *work)
870{
871 LIST_HEAD(todo);
872 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
873 struct pcpu_chunk *chunk, *next;
874
875 mutex_lock(&pcpu_alloc_mutex);
876 spin_lock_irq(&pcpu_lock);
877
878 list_for_each_entry_safe(chunk, next, head, list) {
879 WARN_ON(chunk->immutable);
880
881
882 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
883 continue;
884
885 list_move(&chunk->list, &todo);
886 }
887
888 spin_unlock_irq(&pcpu_lock);
889
890 list_for_each_entry_safe(chunk, next, &todo, list) {
891 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
892 pcpu_destroy_chunk(chunk);
893 }
894
895 mutex_unlock(&pcpu_alloc_mutex);
896}
897
898
899
900
901
902
903
904
905
906
907void free_percpu(void __percpu *ptr)
908{
909 void *addr;
910 struct pcpu_chunk *chunk;
911 unsigned long flags;
912 int off;
913
914 if (!ptr)
915 return;
916
917 addr = __pcpu_ptr_to_addr(ptr);
918
919 spin_lock_irqsave(&pcpu_lock, flags);
920
921 chunk = pcpu_chunk_addr_search(addr);
922 off = addr - chunk->base_addr;
923
924 pcpu_free_area(chunk, off);
925
926
927 if (chunk->free_size == pcpu_unit_size) {
928 struct pcpu_chunk *pos;
929
930 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
931 if (pos != chunk) {
932 schedule_work(&pcpu_reclaim_work);
933 break;
934 }
935 }
936
937 spin_unlock_irqrestore(&pcpu_lock, flags);
938}
939EXPORT_SYMBOL_GPL(free_percpu);
940
941
942
943
944
945
946
947
948
949
950
951
952bool is_kernel_percpu_address(unsigned long addr)
953{
954 const size_t static_size = __per_cpu_end - __per_cpu_start;
955 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
956 unsigned int cpu;
957
958 for_each_possible_cpu(cpu) {
959 void *start = per_cpu_ptr(base, cpu);
960
961 if ((void *)addr >= start && (void *)addr < start + static_size)
962 return true;
963 }
964 return false;
965}
966
967
968
969
970
971
972
973
974
975
976
977
978
979phys_addr_t per_cpu_ptr_to_phys(void *addr)
980{
981 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
982 bool in_first_chunk = false;
983 unsigned long first_start, first_end;
984 unsigned int cpu;
985
986
987
988
989
990
991 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
992 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
993 pcpu_unit_pages);
994 if ((unsigned long)addr >= first_start &&
995 (unsigned long)addr < first_end) {
996 for_each_possible_cpu(cpu) {
997 void *start = per_cpu_ptr(base, cpu);
998
999 if (addr >= start && addr < start + pcpu_unit_size) {
1000 in_first_chunk = true;
1001 break;
1002 }
1003 }
1004 }
1005
1006 if (in_first_chunk) {
1007 if ((unsigned long)addr < VMALLOC_START ||
1008 (unsigned long)addr >= VMALLOC_END)
1009 return __pa(addr);
1010 else
1011 return page_to_phys(vmalloc_to_page(addr));
1012 } else
1013 return page_to_phys(pcpu_addr_to_page(addr));
1014}
1015
1016static inline size_t pcpu_calc_fc_sizes(size_t static_size,
1017 size_t reserved_size,
1018 ssize_t *dyn_sizep)
1019{
1020 size_t size_sum;
1021
1022 size_sum = PFN_ALIGN(static_size + reserved_size +
1023 (*dyn_sizep >= 0 ? *dyn_sizep : 0));
1024 if (*dyn_sizep != 0)
1025 *dyn_sizep = size_sum - static_size - reserved_size;
1026
1027 return size_sum;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1046 int nr_units)
1047{
1048 struct pcpu_alloc_info *ai;
1049 size_t base_size, ai_size;
1050 void *ptr;
1051 int unit;
1052
1053 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1054 __alignof__(ai->groups[0].cpu_map[0]));
1055 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1056
1057 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1058 if (!ptr)
1059 return NULL;
1060 ai = ptr;
1061 ptr += base_size;
1062
1063 ai->groups[0].cpu_map = ptr;
1064
1065 for (unit = 0; unit < nr_units; unit++)
1066 ai->groups[0].cpu_map[unit] = NR_CPUS;
1067
1068 ai->nr_groups = nr_groups;
1069 ai->__ai_size = PFN_ALIGN(ai_size);
1070
1071 return ai;
1072}
1073
1074
1075
1076
1077
1078
1079
1080void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1081{
1082 free_bootmem(__pa(ai), ai->__ai_size);
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1107 size_t reserved_size, ssize_t dyn_size,
1108 size_t atom_size,
1109 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1110{
1111 static int group_map[NR_CPUS] __initdata;
1112 static int group_cnt[NR_CPUS] __initdata;
1113 const size_t static_size = __per_cpu_end - __per_cpu_start;
1114 int nr_groups = 1, nr_units = 0;
1115 size_t size_sum, min_unit_size, alloc_size;
1116 int upa, max_upa, uninitialized_var(best_upa);
1117 int last_allocs, group, unit;
1118 unsigned int cpu, tcpu;
1119 struct pcpu_alloc_info *ai;
1120 unsigned int *cpu_map;
1121
1122
1123 memset(group_map, 0, sizeof(group_map));
1124 memset(group_cnt, 0, sizeof(group_cnt));
1125
1126
1127
1128
1129
1130
1131
1132 size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
1133 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1134
1135 alloc_size = roundup(min_unit_size, atom_size);
1136 upa = alloc_size / min_unit_size;
1137 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1138 upa--;
1139 max_upa = upa;
1140
1141
1142 for_each_possible_cpu(cpu) {
1143 group = 0;
1144 next_group:
1145 for_each_possible_cpu(tcpu) {
1146 if (cpu == tcpu)
1147 break;
1148 if (group_map[tcpu] == group && cpu_distance_fn &&
1149 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1150 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1151 group++;
1152 nr_groups = max(nr_groups, group + 1);
1153 goto next_group;
1154 }
1155 }
1156 group_map[cpu] = group;
1157 group_cnt[group]++;
1158 }
1159
1160
1161
1162
1163
1164
1165 last_allocs = INT_MAX;
1166 for (upa = max_upa; upa; upa--) {
1167 int allocs = 0, wasted = 0;
1168
1169 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1170 continue;
1171
1172 for (group = 0; group < nr_groups; group++) {
1173 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1174 allocs += this_allocs;
1175 wasted += this_allocs * upa - group_cnt[group];
1176 }
1177
1178
1179
1180
1181
1182
1183 if (wasted > num_possible_cpus() / 3)
1184 continue;
1185
1186
1187 if (allocs > last_allocs)
1188 break;
1189 last_allocs = allocs;
1190 best_upa = upa;
1191 }
1192 upa = best_upa;
1193
1194
1195 for (group = 0; group < nr_groups; group++)
1196 nr_units += roundup(group_cnt[group], upa);
1197
1198 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1199 if (!ai)
1200 return ERR_PTR(-ENOMEM);
1201 cpu_map = ai->groups[0].cpu_map;
1202
1203 for (group = 0; group < nr_groups; group++) {
1204 ai->groups[group].cpu_map = cpu_map;
1205 cpu_map += roundup(group_cnt[group], upa);
1206 }
1207
1208 ai->static_size = static_size;
1209 ai->reserved_size = reserved_size;
1210 ai->dyn_size = dyn_size;
1211 ai->unit_size = alloc_size / upa;
1212 ai->atom_size = atom_size;
1213 ai->alloc_size = alloc_size;
1214
1215 for (group = 0, unit = 0; group_cnt[group]; group++) {
1216 struct pcpu_group_info *gi = &ai->groups[group];
1217
1218
1219
1220
1221
1222
1223 gi->base_offset = unit * ai->unit_size;
1224
1225 for_each_possible_cpu(cpu)
1226 if (group_map[cpu] == group)
1227 gi->cpu_map[gi->nr_units++] = cpu;
1228 gi->nr_units = roundup(gi->nr_units, upa);
1229 unit += gi->nr_units;
1230 }
1231 BUG_ON(unit != nr_units);
1232
1233 return ai;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243static void pcpu_dump_alloc_info(const char *lvl,
1244 const struct pcpu_alloc_info *ai)
1245{
1246 int group_width = 1, cpu_width = 1, width;
1247 char empty_str[] = "--------";
1248 int alloc = 0, alloc_end = 0;
1249 int group, v;
1250 int upa, apl;
1251
1252 v = ai->nr_groups;
1253 while (v /= 10)
1254 group_width++;
1255
1256 v = num_possible_cpus();
1257 while (v /= 10)
1258 cpu_width++;
1259 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1260
1261 upa = ai->alloc_size / ai->unit_size;
1262 width = upa * (cpu_width + 1) + group_width + 3;
1263 apl = rounddown_pow_of_two(max(60 / width, 1));
1264
1265 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1266 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1267 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1268
1269 for (group = 0; group < ai->nr_groups; group++) {
1270 const struct pcpu_group_info *gi = &ai->groups[group];
1271 int unit = 0, unit_end = 0;
1272
1273 BUG_ON(gi->nr_units % upa);
1274 for (alloc_end += gi->nr_units / upa;
1275 alloc < alloc_end; alloc++) {
1276 if (!(alloc % apl)) {
1277 printk("\n");
1278 printk("%spcpu-alloc: ", lvl);
1279 }
1280 printk("[%0*d] ", group_width, group);
1281
1282 for (unit_end += upa; unit < unit_end; unit++)
1283 if (gi->cpu_map[unit] != NR_CPUS)
1284 printk("%0*d ", cpu_width,
1285 gi->cpu_map[unit]);
1286 else
1287 printk("%s ", empty_str);
1288 }
1289 }
1290 printk("\n");
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1350 void *base_addr)
1351{
1352 static char cpus_buf[4096] __initdata;
1353 static int smap[2], dmap[2];
1354 size_t dyn_size = ai->dyn_size;
1355 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1356 struct pcpu_chunk *schunk, *dchunk = NULL;
1357 unsigned long *group_offsets;
1358 size_t *group_sizes;
1359 unsigned long *unit_off;
1360 unsigned int cpu;
1361 int *unit_map;
1362 int group, unit, i;
1363
1364 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1365
1366#define PCPU_SETUP_BUG_ON(cond) do { \
1367 if (unlikely(cond)) { \
1368 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1369 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1370 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1371 BUG(); \
1372 } \
1373} while (0)
1374
1375
1376 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1377 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1378 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1379 PCPU_SETUP_BUG_ON(!ai->static_size);
1380 PCPU_SETUP_BUG_ON(!base_addr);
1381 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1382 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1383 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1384 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1385
1386
1387 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1388 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1389 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1390 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1391
1392 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1393 unit_map[cpu] = UINT_MAX;
1394 pcpu_first_unit_cpu = NR_CPUS;
1395
1396 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1397 const struct pcpu_group_info *gi = &ai->groups[group];
1398
1399 group_offsets[group] = gi->base_offset;
1400 group_sizes[group] = gi->nr_units * ai->unit_size;
1401
1402 for (i = 0; i < gi->nr_units; i++) {
1403 cpu = gi->cpu_map[i];
1404 if (cpu == NR_CPUS)
1405 continue;
1406
1407 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1408 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1409 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1410
1411 unit_map[cpu] = unit + i;
1412 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1413
1414 if (pcpu_first_unit_cpu == NR_CPUS)
1415 pcpu_first_unit_cpu = cpu;
1416 }
1417 }
1418 pcpu_last_unit_cpu = cpu;
1419 pcpu_nr_units = unit;
1420
1421 for_each_possible_cpu(cpu)
1422 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1423
1424
1425#undef PCPU_SETUP_BUG_ON
1426 pcpu_dump_alloc_info(KERN_INFO, ai);
1427
1428 pcpu_nr_groups = ai->nr_groups;
1429 pcpu_group_offsets = group_offsets;
1430 pcpu_group_sizes = group_sizes;
1431 pcpu_unit_map = unit_map;
1432 pcpu_unit_offsets = unit_off;
1433
1434
1435 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1436 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1437 pcpu_atom_size = ai->atom_size;
1438 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1439 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1440
1441
1442
1443
1444
1445 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1446 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1447 for (i = 0; i < pcpu_nr_slots; i++)
1448 INIT_LIST_HEAD(&pcpu_slot[i]);
1449
1450
1451
1452
1453
1454
1455
1456
1457 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1458 INIT_LIST_HEAD(&schunk->list);
1459 schunk->base_addr = base_addr;
1460 schunk->map = smap;
1461 schunk->map_alloc = ARRAY_SIZE(smap);
1462 schunk->immutable = true;
1463 bitmap_fill(schunk->populated, pcpu_unit_pages);
1464
1465 if (ai->reserved_size) {
1466 schunk->free_size = ai->reserved_size;
1467 pcpu_reserved_chunk = schunk;
1468 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1469 } else {
1470 schunk->free_size = dyn_size;
1471 dyn_size = 0;
1472 }
1473 schunk->contig_hint = schunk->free_size;
1474
1475 schunk->map[schunk->map_used++] = -ai->static_size;
1476 if (schunk->free_size)
1477 schunk->map[schunk->map_used++] = schunk->free_size;
1478
1479
1480 if (dyn_size) {
1481 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1482 INIT_LIST_HEAD(&dchunk->list);
1483 dchunk->base_addr = base_addr;
1484 dchunk->map = dmap;
1485 dchunk->map_alloc = ARRAY_SIZE(dmap);
1486 dchunk->immutable = true;
1487 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1488
1489 dchunk->contig_hint = dchunk->free_size = dyn_size;
1490 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1491 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1492 }
1493
1494
1495 pcpu_first_chunk = dchunk ?: schunk;
1496 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1497
1498
1499 pcpu_base_addr = base_addr;
1500 return 0;
1501}
1502
1503const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1504 [PCPU_FC_AUTO] = "auto",
1505 [PCPU_FC_EMBED] = "embed",
1506 [PCPU_FC_PAGE] = "page",
1507};
1508
1509enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1510
1511static int __init percpu_alloc_setup(char *str)
1512{
1513 if (0)
1514 ;
1515#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1516 else if (!strcmp(str, "embed"))
1517 pcpu_chosen_fc = PCPU_FC_EMBED;
1518#endif
1519#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1520 else if (!strcmp(str, "page"))
1521 pcpu_chosen_fc = PCPU_FC_PAGE;
1522#endif
1523 else
1524 pr_warning("PERCPU: unknown allocator %s specified\n", str);
1525
1526 return 0;
1527}
1528early_param("percpu_alloc", percpu_alloc_setup);
1529
1530#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1531 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
1568 size_t atom_size,
1569 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1570 pcpu_fc_alloc_fn_t alloc_fn,
1571 pcpu_fc_free_fn_t free_fn)
1572{
1573 void *base = (void *)ULONG_MAX;
1574 void **areas = NULL;
1575 struct pcpu_alloc_info *ai;
1576 size_t size_sum, areas_size, max_distance;
1577 int group, i, rc;
1578
1579 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1580 cpu_distance_fn);
1581 if (IS_ERR(ai))
1582 return PTR_ERR(ai);
1583
1584 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1585 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1586
1587 areas = alloc_bootmem_nopanic(areas_size);
1588 if (!areas) {
1589 rc = -ENOMEM;
1590 goto out_free;
1591 }
1592
1593
1594 for (group = 0; group < ai->nr_groups; group++) {
1595 struct pcpu_group_info *gi = &ai->groups[group];
1596 unsigned int cpu = NR_CPUS;
1597 void *ptr;
1598
1599 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1600 cpu = gi->cpu_map[i];
1601 BUG_ON(cpu == NR_CPUS);
1602
1603
1604 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1605 if (!ptr) {
1606 rc = -ENOMEM;
1607 goto out_free_areas;
1608 }
1609 areas[group] = ptr;
1610
1611 base = min(ptr, base);
1612
1613 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1614 if (gi->cpu_map[i] == NR_CPUS) {
1615
1616 free_fn(ptr, ai->unit_size);
1617 continue;
1618 }
1619
1620 memcpy(ptr, __per_cpu_load, ai->static_size);
1621 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1622 }
1623 }
1624
1625
1626 max_distance = 0;
1627 for (group = 0; group < ai->nr_groups; group++) {
1628 ai->groups[group].base_offset = areas[group] - base;
1629 max_distance = max_t(size_t, max_distance,
1630 ai->groups[group].base_offset);
1631 }
1632 max_distance += ai->unit_size;
1633
1634
1635 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1636 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1637 "space 0x%lx\n",
1638 max_distance, VMALLOC_END - VMALLOC_START);
1639#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1640
1641 rc = -EINVAL;
1642 goto out_free;
1643#endif
1644 }
1645
1646 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1647 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1648 ai->dyn_size, ai->unit_size);
1649
1650 rc = pcpu_setup_first_chunk(ai, base);
1651 goto out_free;
1652
1653out_free_areas:
1654 for (group = 0; group < ai->nr_groups; group++)
1655 free_fn(areas[group],
1656 ai->groups[group].nr_units * ai->unit_size);
1657out_free:
1658 pcpu_free_alloc_info(ai);
1659 if (areas)
1660 free_bootmem(__pa(areas), areas_size);
1661 return rc;
1662}
1663#endif
1664
1665
1666#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683int __init pcpu_page_first_chunk(size_t reserved_size,
1684 pcpu_fc_alloc_fn_t alloc_fn,
1685 pcpu_fc_free_fn_t free_fn,
1686 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1687{
1688 static struct vm_struct vm;
1689 struct pcpu_alloc_info *ai;
1690 char psize_str[16];
1691 int unit_pages;
1692 size_t pages_size;
1693 struct page **pages;
1694 int unit, i, j, rc;
1695
1696 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1697
1698 ai = pcpu_build_alloc_info(reserved_size, -1, PAGE_SIZE, NULL);
1699 if (IS_ERR(ai))
1700 return PTR_ERR(ai);
1701 BUG_ON(ai->nr_groups != 1);
1702 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1703
1704 unit_pages = ai->unit_size >> PAGE_SHIFT;
1705
1706
1707 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1708 sizeof(pages[0]));
1709 pages = alloc_bootmem(pages_size);
1710
1711
1712 j = 0;
1713 for (unit = 0; unit < num_possible_cpus(); unit++)
1714 for (i = 0; i < unit_pages; i++) {
1715 unsigned int cpu = ai->groups[0].cpu_map[unit];
1716 void *ptr;
1717
1718 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1719 if (!ptr) {
1720 pr_warning("PERCPU: failed to allocate %s page "
1721 "for cpu%u\n", psize_str, cpu);
1722 goto enomem;
1723 }
1724 pages[j++] = virt_to_page(ptr);
1725 }
1726
1727
1728 vm.flags = VM_ALLOC;
1729 vm.size = num_possible_cpus() * ai->unit_size;
1730 vm_area_register_early(&vm, PAGE_SIZE);
1731
1732 for (unit = 0; unit < num_possible_cpus(); unit++) {
1733 unsigned long unit_addr =
1734 (unsigned long)vm.addr + unit * ai->unit_size;
1735
1736 for (i = 0; i < unit_pages; i++)
1737 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1738
1739
1740 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1741 unit_pages);
1742 if (rc < 0)
1743 panic("failed to map percpu area, err=%d\n", rc);
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1755 }
1756
1757
1758 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1759 unit_pages, psize_str, vm.addr, ai->static_size,
1760 ai->reserved_size, ai->dyn_size);
1761
1762 rc = pcpu_setup_first_chunk(ai, vm.addr);
1763 goto out_free_ar;
1764
1765enomem:
1766 while (--j >= 0)
1767 free_fn(page_address(pages[j]), PAGE_SIZE);
1768 rc = -ENOMEM;
1769out_free_ar:
1770 free_bootmem(__pa(pages), pages_size);
1771 pcpu_free_alloc_info(ai);
1772 return rc;
1773}
1774#endif
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1789unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1790EXPORT_SYMBOL(__per_cpu_offset);
1791
1792static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1793 size_t align)
1794{
1795 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1796}
1797
1798static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1799{
1800 free_bootmem(__pa(ptr), size);
1801}
1802
1803void __init setup_per_cpu_areas(void)
1804{
1805 unsigned long delta;
1806 unsigned int cpu;
1807 int rc;
1808
1809
1810
1811
1812
1813 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1814 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1815 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1816 if (rc < 0)
1817 panic("Failed to initialized percpu areas.");
1818
1819 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1820 for_each_possible_cpu(cpu)
1821 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1822}
1823#endif
1824