1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/bitmap.h>
57#include <linux/bootmem.h>
58#include <linux/list.h>
59#include <linux/mm.h>
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/percpu.h>
63#include <linux/pfn.h>
64#include <linux/rbtree.h>
65#include <linux/slab.h>
66#include <linux/spinlock.h>
67#include <linux/vmalloc.h>
68#include <linux/workqueue.h>
69
70#include <asm/cacheflush.h>
71#include <asm/sections.h>
72#include <asm/tlbflush.h>
73
74#define PCPU_SLOT_BASE_SHIFT 5
75#define PCPU_DFL_MAP_ALLOC 16
76
77
78#ifndef __addr_to_pcpu_ptr
79#define __addr_to_pcpu_ptr(addr) \
80 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
81 + (unsigned long)__per_cpu_start)
82#endif
83#ifndef __pcpu_ptr_to_addr
84#define __pcpu_ptr_to_addr(ptr) \
85 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
86 - (unsigned long)__per_cpu_start)
87#endif
88
89struct pcpu_chunk {
90 struct list_head list;
91 struct rb_node rb_node;
92 int free_size;
93 int contig_hint;
94 struct vm_struct *vm;
95 int map_used;
96 int map_alloc;
97 int *map;
98 bool immutable;
99 struct page **page;
100 struct page *page_ar[];
101};
102
103static int pcpu_unit_pages __read_mostly;
104static int pcpu_unit_size __read_mostly;
105static int pcpu_chunk_size __read_mostly;
106static int pcpu_nr_slots __read_mostly;
107static size_t pcpu_chunk_struct_size __read_mostly;
108
109
110void *pcpu_base_addr __read_mostly;
111EXPORT_SYMBOL_GPL(pcpu_base_addr);
112
113
114static struct pcpu_chunk *pcpu_reserved_chunk;
115
116static int pcpu_reserved_chunk_limit;
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139static DEFINE_MUTEX(pcpu_alloc_mutex);
140static DEFINE_SPINLOCK(pcpu_lock);
141
142static struct list_head *pcpu_slot __read_mostly;
143static struct rb_root pcpu_addr_root = RB_ROOT;
144
145
146static void pcpu_reclaim(struct work_struct *work);
147static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
148
149static int __pcpu_size_to_slot(int size)
150{
151 int highbit = fls(size);
152 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
153}
154
155static int pcpu_size_to_slot(int size)
156{
157 if (size == pcpu_unit_size)
158 return pcpu_nr_slots - 1;
159 return __pcpu_size_to_slot(size);
160}
161
162static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
163{
164 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
165 return 0;
166
167 return pcpu_size_to_slot(chunk->free_size);
168}
169
170static int pcpu_page_idx(unsigned int cpu, int page_idx)
171{
172 return cpu * pcpu_unit_pages + page_idx;
173}
174
175static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
176 unsigned int cpu, int page_idx)
177{
178 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
179}
180
181static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
182 unsigned int cpu, int page_idx)
183{
184 return (unsigned long)chunk->vm->addr +
185 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
186}
187
188static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
189 int page_idx)
190{
191 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208static void *pcpu_mem_alloc(size_t size)
209{
210 if (size <= PAGE_SIZE)
211 return kzalloc(size, GFP_KERNEL);
212 else {
213 void *ptr = vmalloc(size);
214 if (ptr)
215 memset(ptr, 0, size);
216 return ptr;
217 }
218}
219
220
221
222
223
224
225
226
227static void pcpu_mem_free(void *ptr, size_t size)
228{
229 if (size <= PAGE_SIZE)
230 kfree(ptr);
231 else
232 vfree(ptr);
233}
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
249{
250 int nslot = pcpu_chunk_slot(chunk);
251
252 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
253 if (oslot < nslot)
254 list_move(&chunk->list, &pcpu_slot[nslot]);
255 else
256 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
257 }
258}
259
260static struct rb_node **pcpu_chunk_rb_search(void *addr,
261 struct rb_node **parentp)
262{
263 struct rb_node **p = &pcpu_addr_root.rb_node;
264 struct rb_node *parent = NULL;
265 struct pcpu_chunk *chunk;
266
267 while (*p) {
268 parent = *p;
269 chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
270
271 if (addr < chunk->vm->addr)
272 p = &(*p)->rb_left;
273 else if (addr > chunk->vm->addr)
274 p = &(*p)->rb_right;
275 else
276 break;
277 }
278
279 if (parentp)
280 *parentp = parent;
281 return p;
282}
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
299{
300 struct rb_node *n, *parent;
301 struct pcpu_chunk *chunk;
302
303
304 if (pcpu_reserved_chunk) {
305 void *start = pcpu_reserved_chunk->vm->addr;
306
307 if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
308 return pcpu_reserved_chunk;
309 }
310
311
312 n = *pcpu_chunk_rb_search(addr, &parent);
313 if (!n) {
314
315 n = parent;
316 BUG_ON(!n);
317 }
318 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
319
320 if (addr < chunk->vm->addr) {
321
322 n = rb_prev(n);
323 BUG_ON(!n);
324 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
325 }
326
327 return chunk;
328}
329
330
331
332
333
334
335
336
337
338
339static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
340{
341 struct rb_node **p, *parent;
342
343 p = pcpu_chunk_rb_search(new->vm->addr, &parent);
344 BUG_ON(*p);
345 rb_link_node(&new->rb_node, parent, p);
346 rb_insert_color(&new->rb_node, &pcpu_addr_root);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
365{
366 int new_alloc;
367 int *new;
368 size_t size;
369
370
371 if (chunk->map_alloc >= chunk->map_used + 2)
372 return 0;
373
374 spin_unlock_irq(&pcpu_lock);
375
376 new_alloc = PCPU_DFL_MAP_ALLOC;
377 while (new_alloc < chunk->map_used + 2)
378 new_alloc *= 2;
379
380 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
381 if (!new) {
382 spin_lock_irq(&pcpu_lock);
383 return -ENOMEM;
384 }
385
386
387
388
389
390
391 spin_lock_irq(&pcpu_lock);
392 BUG_ON(new_alloc < chunk->map_used + 2);
393
394 size = chunk->map_alloc * sizeof(chunk->map[0]);
395 memcpy(new, chunk->map, size);
396
397
398
399
400
401 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
402 pcpu_mem_free(chunk->map, size);
403
404 chunk->map_alloc = new_alloc;
405 chunk->map = new;
406 return 0;
407}
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
430 int head, int tail)
431{
432 int nr_extra = !!head + !!tail;
433
434 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
435
436
437 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
438 sizeof(chunk->map[0]) * (chunk->map_used - i));
439 chunk->map_used += nr_extra;
440
441 if (head) {
442 chunk->map[i + 1] = chunk->map[i] - head;
443 chunk->map[i++] = head;
444 }
445 if (tail) {
446 chunk->map[i++] -= tail;
447 chunk->map[i] = tail;
448 }
449}
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
471{
472 int oslot = pcpu_chunk_slot(chunk);
473 int max_contig = 0;
474 int i, off;
475
476 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
477 bool is_last = i + 1 == chunk->map_used;
478 int head, tail;
479
480
481 head = ALIGN(off, align) - off;
482 BUG_ON(i == 0 && head != 0);
483
484 if (chunk->map[i] < 0)
485 continue;
486 if (chunk->map[i] < head + size) {
487 max_contig = max(chunk->map[i], max_contig);
488 continue;
489 }
490
491
492
493
494
495
496
497 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
498 if (chunk->map[i - 1] > 0)
499 chunk->map[i - 1] += head;
500 else {
501 chunk->map[i - 1] -= head;
502 chunk->free_size -= head;
503 }
504 chunk->map[i] -= head;
505 off += head;
506 head = 0;
507 }
508
509
510 tail = chunk->map[i] - head - size;
511 if (tail < sizeof(int))
512 tail = 0;
513
514
515 if (head || tail) {
516 pcpu_split_block(chunk, i, head, tail);
517 if (head) {
518 i++;
519 off += head;
520 max_contig = max(chunk->map[i - 1], max_contig);
521 }
522 if (tail)
523 max_contig = max(chunk->map[i + 1], max_contig);
524 }
525
526
527 if (is_last)
528 chunk->contig_hint = max_contig;
529 else
530 chunk->contig_hint = max(chunk->contig_hint,
531 max_contig);
532
533 chunk->free_size -= chunk->map[i];
534 chunk->map[i] = -chunk->map[i];
535
536 pcpu_chunk_relocate(chunk, oslot);
537 return off;
538 }
539
540 chunk->contig_hint = max_contig;
541 pcpu_chunk_relocate(chunk, oslot);
542
543
544 return -1;
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
560{
561 int oslot = pcpu_chunk_slot(chunk);
562 int i, off;
563
564 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
565 if (off == freeme)
566 break;
567 BUG_ON(off != freeme);
568 BUG_ON(chunk->map[i] > 0);
569
570 chunk->map[i] = -chunk->map[i];
571 chunk->free_size += chunk->map[i];
572
573
574 if (i > 0 && chunk->map[i - 1] >= 0) {
575 chunk->map[i - 1] += chunk->map[i];
576 chunk->map_used--;
577 memmove(&chunk->map[i], &chunk->map[i + 1],
578 (chunk->map_used - i) * sizeof(chunk->map[0]));
579 i--;
580 }
581
582 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
583 chunk->map[i] += chunk->map[i + 1];
584 chunk->map_used--;
585 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
586 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
587 }
588
589 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
590 pcpu_chunk_relocate(chunk, oslot);
591}
592
593
594
595
596
597
598
599
600
601
602
603
604static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
605 bool flush)
606{
607 unsigned int last = num_possible_cpus() - 1;
608 unsigned int cpu;
609
610
611 WARN_ON(chunk->immutable);
612
613
614
615
616
617
618 if (flush)
619 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
620 pcpu_chunk_addr(chunk, last, page_end));
621
622 for_each_possible_cpu(cpu)
623 unmap_kernel_range_noflush(
624 pcpu_chunk_addr(chunk, cpu, page_start),
625 (page_end - page_start) << PAGE_SHIFT);
626
627
628 if (flush)
629 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
630 pcpu_chunk_addr(chunk, last, page_end));
631}
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
648 bool flush)
649{
650 int page_start = PFN_DOWN(off);
651 int page_end = PFN_UP(off + size);
652 int unmap_start = -1;
653 int uninitialized_var(unmap_end);
654 unsigned int cpu;
655 int i;
656
657 for (i = page_start; i < page_end; i++) {
658 for_each_possible_cpu(cpu) {
659 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
660
661 if (!*pagep)
662 continue;
663
664 __free_page(*pagep);
665
666
667
668
669
670
671 *pagep = NULL;
672
673 unmap_start = unmap_start < 0 ? i : unmap_start;
674 unmap_end = i + 1;
675 }
676 }
677
678 if (unmap_start >= 0)
679 pcpu_unmap(chunk, unmap_start, unmap_end, flush);
680}
681
682
683
684
685
686
687
688
689
690
691static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
692{
693 unsigned int last = num_possible_cpus() - 1;
694 unsigned int cpu;
695 int err;
696
697
698 WARN_ON(chunk->immutable);
699
700 for_each_possible_cpu(cpu) {
701 err = map_kernel_range_noflush(
702 pcpu_chunk_addr(chunk, cpu, page_start),
703 (page_end - page_start) << PAGE_SHIFT,
704 PAGE_KERNEL,
705 pcpu_chunk_pagep(chunk, cpu, page_start));
706 if (err < 0)
707 return err;
708 }
709
710
711 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
712 pcpu_chunk_addr(chunk, last, page_end));
713 return 0;
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
729{
730 const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
731 int page_start = PFN_DOWN(off);
732 int page_end = PFN_UP(off + size);
733 int map_start = -1;
734 int uninitialized_var(map_end);
735 unsigned int cpu;
736 int i;
737
738 for (i = page_start; i < page_end; i++) {
739 if (pcpu_chunk_page_occupied(chunk, i)) {
740 if (map_start >= 0) {
741 if (pcpu_map(chunk, map_start, map_end))
742 goto err;
743 map_start = -1;
744 }
745 continue;
746 }
747
748 map_start = map_start < 0 ? i : map_start;
749 map_end = i + 1;
750
751 for_each_possible_cpu(cpu) {
752 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
753
754 *pagep = alloc_pages_node(cpu_to_node(cpu),
755 alloc_mask, 0);
756 if (!*pagep)
757 goto err;
758 }
759 }
760
761 if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
762 goto err;
763
764 for_each_possible_cpu(cpu)
765 memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
766 size);
767
768 return 0;
769err:
770
771 pcpu_depopulate_chunk(chunk, off, size, true);
772 return -ENOMEM;
773}
774
775static void free_pcpu_chunk(struct pcpu_chunk *chunk)
776{
777 if (!chunk)
778 return;
779 if (chunk->vm)
780 free_vm_area(chunk->vm);
781 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
782 kfree(chunk);
783}
784
785static struct pcpu_chunk *alloc_pcpu_chunk(void)
786{
787 struct pcpu_chunk *chunk;
788
789 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
790 if (!chunk)
791 return NULL;
792
793 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
794 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
795 chunk->map[chunk->map_used++] = pcpu_unit_size;
796 chunk->page = chunk->page_ar;
797
798 chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
799 if (!chunk->vm) {
800 free_pcpu_chunk(chunk);
801 return NULL;
802 }
803
804 INIT_LIST_HEAD(&chunk->list);
805 chunk->free_size = pcpu_unit_size;
806 chunk->contig_hint = pcpu_unit_size;
807
808 return chunk;
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825static void *pcpu_alloc(size_t size, size_t align, bool reserved)
826{
827 struct pcpu_chunk *chunk;
828 int slot, off;
829
830 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
831 WARN(true, "illegal size (%zu) or align (%zu) for "
832 "percpu allocation\n", size, align);
833 return NULL;
834 }
835
836 mutex_lock(&pcpu_alloc_mutex);
837 spin_lock_irq(&pcpu_lock);
838
839
840 if (reserved && pcpu_reserved_chunk) {
841 chunk = pcpu_reserved_chunk;
842 if (size > chunk->contig_hint ||
843 pcpu_extend_area_map(chunk) < 0)
844 goto fail_unlock;
845 off = pcpu_alloc_area(chunk, size, align);
846 if (off >= 0)
847 goto area_found;
848 goto fail_unlock;
849 }
850
851restart:
852
853 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
854 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
855 if (size > chunk->contig_hint)
856 continue;
857
858 switch (pcpu_extend_area_map(chunk)) {
859 case 0:
860 break;
861 case 1:
862 goto restart;
863 default:
864 goto fail_unlock;
865 }
866
867 off = pcpu_alloc_area(chunk, size, align);
868 if (off >= 0)
869 goto area_found;
870 }
871 }
872
873
874 spin_unlock_irq(&pcpu_lock);
875
876 chunk = alloc_pcpu_chunk();
877 if (!chunk)
878 goto fail_unlock_mutex;
879
880 spin_lock_irq(&pcpu_lock);
881 pcpu_chunk_relocate(chunk, -1);
882 pcpu_chunk_addr_insert(chunk);
883 goto restart;
884
885area_found:
886 spin_unlock_irq(&pcpu_lock);
887
888
889 if (pcpu_populate_chunk(chunk, off, size)) {
890 spin_lock_irq(&pcpu_lock);
891 pcpu_free_area(chunk, off);
892 goto fail_unlock;
893 }
894
895 mutex_unlock(&pcpu_alloc_mutex);
896
897 return __addr_to_pcpu_ptr(chunk->vm->addr + off);
898
899fail_unlock:
900 spin_unlock_irq(&pcpu_lock);
901fail_unlock_mutex:
902 mutex_unlock(&pcpu_alloc_mutex);
903 return NULL;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920void *__alloc_percpu(size_t size, size_t align)
921{
922 return pcpu_alloc(size, align, false);
923}
924EXPORT_SYMBOL_GPL(__alloc_percpu);
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941void *__alloc_reserved_percpu(size_t size, size_t align)
942{
943 return pcpu_alloc(size, align, true);
944}
945
946
947
948
949
950
951
952
953
954
955static void pcpu_reclaim(struct work_struct *work)
956{
957 LIST_HEAD(todo);
958 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
959 struct pcpu_chunk *chunk, *next;
960
961 mutex_lock(&pcpu_alloc_mutex);
962 spin_lock_irq(&pcpu_lock);
963
964 list_for_each_entry_safe(chunk, next, head, list) {
965 WARN_ON(chunk->immutable);
966
967
968 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
969 continue;
970
971 rb_erase(&chunk->rb_node, &pcpu_addr_root);
972 list_move(&chunk->list, &todo);
973 }
974
975 spin_unlock_irq(&pcpu_lock);
976 mutex_unlock(&pcpu_alloc_mutex);
977
978 list_for_each_entry_safe(chunk, next, &todo, list) {
979 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
980 free_pcpu_chunk(chunk);
981 }
982}
983
984
985
986
987
988
989
990
991
992
993void free_percpu(void *ptr)
994{
995 void *addr = __pcpu_ptr_to_addr(ptr);
996 struct pcpu_chunk *chunk;
997 unsigned long flags;
998 int off;
999
1000 if (!ptr)
1001 return;
1002
1003 spin_lock_irqsave(&pcpu_lock, flags);
1004
1005 chunk = pcpu_chunk_addr_search(addr);
1006 off = addr - chunk->vm->addr;
1007
1008 pcpu_free_area(chunk, off);
1009
1010
1011 if (chunk->free_size == pcpu_unit_size) {
1012 struct pcpu_chunk *pos;
1013
1014 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1015 if (pos != chunk) {
1016 schedule_work(&pcpu_reclaim_work);
1017 break;
1018 }
1019 }
1020
1021 spin_unlock_irqrestore(&pcpu_lock, flags);
1022}
1023EXPORT_SYMBOL_GPL(free_percpu);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1085 size_t static_size, size_t reserved_size,
1086 ssize_t dyn_size, ssize_t unit_size,
1087 void *base_addr,
1088 pcpu_populate_pte_fn_t populate_pte_fn)
1089{
1090 static struct vm_struct first_vm;
1091 static int smap[2], dmap[2];
1092 size_t size_sum = static_size + reserved_size +
1093 (dyn_size >= 0 ? dyn_size : 0);
1094 struct pcpu_chunk *schunk, *dchunk = NULL;
1095 unsigned int cpu;
1096 int nr_pages;
1097 int err, i;
1098
1099
1100 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1101 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1102 BUG_ON(!static_size);
1103 if (unit_size >= 0) {
1104 BUG_ON(unit_size < size_sum);
1105 BUG_ON(unit_size & ~PAGE_MASK);
1106 BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
1107 } else
1108 BUG_ON(base_addr);
1109 BUG_ON(base_addr && populate_pte_fn);
1110
1111 if (unit_size >= 0)
1112 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
1113 else
1114 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
1115 PFN_UP(size_sum));
1116
1117 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1118 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
1119 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
1120 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
1121
1122 if (dyn_size < 0)
1123 dyn_size = pcpu_unit_size - static_size - reserved_size;
1124
1125
1126
1127
1128
1129 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1130 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1131 for (i = 0; i < pcpu_nr_slots; i++)
1132 INIT_LIST_HEAD(&pcpu_slot[i]);
1133
1134
1135
1136
1137
1138
1139
1140
1141 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1142 INIT_LIST_HEAD(&schunk->list);
1143 schunk->vm = &first_vm;
1144 schunk->map = smap;
1145 schunk->map_alloc = ARRAY_SIZE(smap);
1146 schunk->page = schunk->page_ar;
1147
1148 if (reserved_size) {
1149 schunk->free_size = reserved_size;
1150 pcpu_reserved_chunk = schunk;
1151 } else {
1152 schunk->free_size = dyn_size;
1153 dyn_size = 0;
1154 }
1155 schunk->contig_hint = schunk->free_size;
1156
1157 schunk->map[schunk->map_used++] = -static_size;
1158 if (schunk->free_size)
1159 schunk->map[schunk->map_used++] = schunk->free_size;
1160
1161 pcpu_reserved_chunk_limit = static_size + schunk->free_size;
1162
1163
1164 if (dyn_size) {
1165 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
1166 INIT_LIST_HEAD(&dchunk->list);
1167 dchunk->vm = &first_vm;
1168 dchunk->map = dmap;
1169 dchunk->map_alloc = ARRAY_SIZE(dmap);
1170 dchunk->page = schunk->page_ar;
1171
1172 dchunk->contig_hint = dchunk->free_size = dyn_size;
1173 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1174 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1175 }
1176
1177
1178 first_vm.flags = VM_ALLOC;
1179 first_vm.size = pcpu_chunk_size;
1180
1181 if (!base_addr)
1182 vm_area_register_early(&first_vm, PAGE_SIZE);
1183 else {
1184
1185
1186
1187
1188
1189
1190 first_vm.addr = base_addr;
1191 schunk->immutable = true;
1192 if (dchunk)
1193 dchunk->immutable = true;
1194 }
1195
1196
1197 nr_pages = -1;
1198 for_each_possible_cpu(cpu) {
1199 for (i = 0; i < pcpu_unit_pages; i++) {
1200 struct page *page = get_page_fn(cpu, i);
1201
1202 if (!page)
1203 break;
1204 *pcpu_chunk_pagep(schunk, cpu, i) = page;
1205 }
1206
1207 BUG_ON(i < PFN_UP(static_size));
1208
1209 if (nr_pages < 0)
1210 nr_pages = i;
1211 else
1212 BUG_ON(nr_pages != i);
1213 }
1214
1215
1216 if (populate_pte_fn) {
1217 for_each_possible_cpu(cpu)
1218 for (i = 0; i < nr_pages; i++)
1219 populate_pte_fn(pcpu_chunk_addr(schunk,
1220 cpu, i));
1221
1222 err = pcpu_map(schunk, 0, nr_pages);
1223 if (err)
1224 panic("failed to setup static percpu area, err=%d\n",
1225 err);
1226 }
1227
1228
1229 if (!dchunk) {
1230 pcpu_chunk_relocate(schunk, -1);
1231 pcpu_chunk_addr_insert(schunk);
1232 } else {
1233 pcpu_chunk_relocate(dchunk, -1);
1234 pcpu_chunk_addr_insert(dchunk);
1235 }
1236
1237
1238 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1239 return pcpu_unit_size;
1240}
1241
1242
1243
1244
1245static void *pcpue_ptr __initdata;
1246static size_t pcpue_size __initdata;
1247static size_t pcpue_unit_size __initdata;
1248
1249static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
1250{
1251 size_t off = (size_t)pageno << PAGE_SHIFT;
1252
1253 if (off >= pcpue_size)
1254 return NULL;
1255
1256 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
1257}
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
1288 ssize_t dyn_size, ssize_t unit_size)
1289{
1290 unsigned int cpu;
1291
1292
1293 pcpue_size = PFN_ALIGN(static_size + reserved_size +
1294 (dyn_size >= 0 ? dyn_size : 0));
1295 if (dyn_size != 0)
1296 dyn_size = pcpue_size - static_size - reserved_size;
1297
1298 if (unit_size >= 0) {
1299 BUG_ON(unit_size < pcpue_size);
1300 pcpue_unit_size = unit_size;
1301 } else
1302 pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
1303
1304 pcpue_ptr = __alloc_bootmem_nopanic(
1305 num_possible_cpus() * pcpue_unit_size,
1306 PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1307 if (!pcpue_ptr)
1308 return -ENOMEM;
1309
1310
1311 for_each_possible_cpu(cpu) {
1312 void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
1313
1314 free_bootmem(__pa(ptr + pcpue_size),
1315 pcpue_unit_size - pcpue_size);
1316 memcpy(ptr, __per_cpu_load, static_size);
1317 }
1318
1319
1320 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
1321 pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
1322
1323 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
1324 reserved_size, dyn_size,
1325 pcpue_unit_size, pcpue_ptr, NULL);
1326}
1327