1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/module.h>
8
9#ifndef cache_line_size
10#define cache_line_size() L1_CACHE_BYTES
11#endif
12
13
14
15
16
17
18
19
20
21static void percpu_depopulate(void *__pdata, int cpu)
22{
23 struct percpu_data *pdata = __percpu_disguise(__pdata);
24
25 kfree(pdata->ptrs[cpu]);
26 pdata->ptrs[cpu] = NULL;
27}
28
29
30
31
32
33
34static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
35{
36 int cpu;
37 for_each_cpu_mask_nr(cpu, *mask)
38 percpu_depopulate(__pdata, cpu);
39}
40
41#define percpu_depopulate_mask(__pdata, mask) \
42 __percpu_depopulate_mask((__pdata), &(mask))
43
44
45
46
47
48
49
50
51
52
53
54
55static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
56{
57 struct percpu_data *pdata = __percpu_disguise(__pdata);
58 int node = cpu_to_node(cpu);
59
60
61
62
63 size = roundup(size, cache_line_size());
64
65 BUG_ON(pdata->ptrs[cpu]);
66 if (node_online(node))
67 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
68 else
69 pdata->ptrs[cpu] = kzalloc(size, gfp);
70 return pdata->ptrs[cpu];
71}
72
73
74
75
76
77
78
79
80
81
82static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
83 cpumask_t *mask)
84{
85 cpumask_t populated;
86 int cpu;
87
88 cpus_clear(populated);
89 for_each_cpu_mask_nr(cpu, *mask)
90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
91 __percpu_depopulate_mask(__pdata, &populated);
92 return -ENOMEM;
93 } else
94 cpu_set(cpu, populated);
95 return 0;
96}
97
98#define percpu_populate_mask(__pdata, size, gfp, mask) \
99 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
100
101
102
103
104
105
106
107
108
109void *__alloc_percpu(size_t size, size_t align)
110{
111
112
113
114 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
115 void *pdata = kzalloc(sz, GFP_KERNEL);
116 void *__pdata = __percpu_disguise(pdata);
117
118
119
120
121
122
123 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
124
125 if (unlikely(!pdata))
126 return NULL;
127 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
128 &cpu_possible_map)))
129 return __pdata;
130 kfree(pdata);
131 return NULL;
132}
133EXPORT_SYMBOL_GPL(__alloc_percpu);
134
135
136
137
138
139
140
141
142void free_percpu(void *__pdata)
143{
144 if (unlikely(!__pdata))
145 return;
146 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
147 kfree(__percpu_disguise(__pdata));
148}
149EXPORT_SYMBOL_GPL(free_percpu);
150