1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/slab.h>
32#include <linux/export.h>
33#include <linux/bitmap.h>
34#include <linux/rculist.h>
35#include <linux/interrupt.h>
36#include <linux/genalloc.h>
37
38static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
39{
40 unsigned long val, nval;
41
42 nval = *addr;
43 do {
44 val = nval;
45 if (val & mask_to_set)
46 return -EBUSY;
47 cpu_relax();
48 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
49
50 return 0;
51}
52
53static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
54{
55 unsigned long val, nval;
56
57 nval = *addr;
58 do {
59 val = nval;
60 if ((val & mask_to_clear) != mask_to_clear)
61 return -EBUSY;
62 cpu_relax();
63 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
64
65 return 0;
66}
67
68
69
70
71
72
73
74
75
76
77
78
79static int bitmap_set_ll(unsigned long *map, int start, int nr)
80{
81 unsigned long *p = map + BIT_WORD(start);
82 const int size = start + nr;
83 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
84 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
85
86 while (nr - bits_to_set >= 0) {
87 if (set_bits_ll(p, mask_to_set))
88 return nr;
89 nr -= bits_to_set;
90 bits_to_set = BITS_PER_LONG;
91 mask_to_set = ~0UL;
92 p++;
93 }
94 if (nr) {
95 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
96 if (set_bits_ll(p, mask_to_set))
97 return nr;
98 }
99
100 return 0;
101}
102
103
104
105
106
107
108
109
110
111
112
113
114static int bitmap_clear_ll(unsigned long *map, int start, int nr)
115{
116 unsigned long *p = map + BIT_WORD(start);
117 const int size = start + nr;
118 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
119 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
120
121 while (nr - bits_to_clear >= 0) {
122 if (clear_bits_ll(p, mask_to_clear))
123 return nr;
124 nr -= bits_to_clear;
125 bits_to_clear = BITS_PER_LONG;
126 mask_to_clear = ~0UL;
127 p++;
128 }
129 if (nr) {
130 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
131 if (clear_bits_ll(p, mask_to_clear))
132 return nr;
133 }
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
147{
148 struct gen_pool *pool;
149
150 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
151 if (pool != NULL) {
152 spin_lock_init(&pool->lock);
153 INIT_LIST_HEAD(&pool->chunks);
154 pool->min_alloc_order = min_alloc_order;
155 pool->algo = gen_pool_first_fit;
156 pool->data = NULL;
157 }
158 return pool;
159}
160EXPORT_SYMBOL(gen_pool_create);
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
176 size_t size, int nid)
177{
178 struct gen_pool_chunk *chunk;
179 int nbits = size >> pool->min_alloc_order;
180 int nbytes = sizeof(struct gen_pool_chunk) +
181 BITS_TO_LONGS(nbits) * sizeof(long);
182
183 chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
184 if (unlikely(chunk == NULL))
185 return -ENOMEM;
186
187 chunk->phys_addr = phys;
188 chunk->start_addr = virt;
189 chunk->end_addr = virt + size;
190 atomic_set(&chunk->avail, size);
191
192 spin_lock(&pool->lock);
193 list_add_rcu(&chunk->next_chunk, &pool->chunks);
194 spin_unlock(&pool->lock);
195
196 return 0;
197}
198EXPORT_SYMBOL(gen_pool_add_virt);
199
200
201
202
203
204
205
206
207phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
208{
209 struct gen_pool_chunk *chunk;
210 phys_addr_t paddr = -1;
211
212 rcu_read_lock();
213 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
214 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
215 paddr = chunk->phys_addr + (addr - chunk->start_addr);
216 break;
217 }
218 }
219 rcu_read_unlock();
220
221 return paddr;
222}
223EXPORT_SYMBOL(gen_pool_virt_to_phys);
224
225
226
227
228
229
230
231
232void gen_pool_destroy(struct gen_pool *pool)
233{
234 struct list_head *_chunk, *_next_chunk;
235 struct gen_pool_chunk *chunk;
236 int order = pool->min_alloc_order;
237 int bit, end_bit;
238
239 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
240 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
241 list_del(&chunk->next_chunk);
242
243 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
244 bit = find_next_bit(chunk->bits, end_bit, 0);
245 BUG_ON(bit < end_bit);
246
247 kfree(chunk);
248 }
249 kfree(pool);
250 return;
251}
252EXPORT_SYMBOL(gen_pool_destroy);
253
254
255
256
257
258
259
260
261
262
263
264unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
265{
266 struct gen_pool_chunk *chunk;
267 unsigned long addr = 0;
268 int order = pool->min_alloc_order;
269 int nbits, start_bit = 0, end_bit, remain;
270
271#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
272 BUG_ON(in_nmi());
273#endif
274
275 if (size == 0)
276 return 0;
277
278 nbits = (size + (1UL << order) - 1) >> order;
279 rcu_read_lock();
280 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
281 if (size > atomic_read(&chunk->avail))
282 continue;
283
284 end_bit = (chunk->end_addr - chunk->start_addr) >> order;
285retry:
286 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
287 pool->data);
288 if (start_bit >= end_bit)
289 continue;
290 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
291 if (remain) {
292 remain = bitmap_clear_ll(chunk->bits, start_bit,
293 nbits - remain);
294 BUG_ON(remain);
295 goto retry;
296 }
297
298 addr = chunk->start_addr + ((unsigned long)start_bit << order);
299 size = nbits << order;
300 atomic_sub(size, &chunk->avail);
301 break;
302 }
303 rcu_read_unlock();
304 return addr;
305}
306EXPORT_SYMBOL(gen_pool_alloc);
307
308
309
310
311
312
313
314
315
316
317
318void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
319{
320 struct gen_pool_chunk *chunk;
321 int order = pool->min_alloc_order;
322 int start_bit, nbits, remain;
323
324#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
325 BUG_ON(in_nmi());
326#endif
327
328 nbits = (size + (1UL << order) - 1) >> order;
329 rcu_read_lock();
330 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
331 if (addr >= chunk->start_addr && addr < chunk->end_addr) {
332 BUG_ON(addr + size > chunk->end_addr);
333 start_bit = (addr - chunk->start_addr) >> order;
334 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
335 BUG_ON(remain);
336 size = nbits << order;
337 atomic_add(size, &chunk->avail);
338 rcu_read_unlock();
339 return;
340 }
341 }
342 rcu_read_unlock();
343 BUG();
344}
345EXPORT_SYMBOL(gen_pool_free);
346
347
348
349
350
351
352
353
354
355
356void gen_pool_for_each_chunk(struct gen_pool *pool,
357 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
358 void *data)
359{
360 struct gen_pool_chunk *chunk;
361
362 rcu_read_lock();
363 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
364 func(pool, chunk, data);
365 rcu_read_unlock();
366}
367EXPORT_SYMBOL(gen_pool_for_each_chunk);
368
369
370
371
372
373
374
375size_t gen_pool_avail(struct gen_pool *pool)
376{
377 struct gen_pool_chunk *chunk;
378 size_t avail = 0;
379
380 rcu_read_lock();
381 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
382 avail += atomic_read(&chunk->avail);
383 rcu_read_unlock();
384 return avail;
385}
386EXPORT_SYMBOL_GPL(gen_pool_avail);
387
388
389
390
391
392
393
394size_t gen_pool_size(struct gen_pool *pool)
395{
396 struct gen_pool_chunk *chunk;
397 size_t size = 0;
398
399 rcu_read_lock();
400 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
401 size += chunk->end_addr - chunk->start_addr;
402 rcu_read_unlock();
403 return size;
404}
405EXPORT_SYMBOL_GPL(gen_pool_size);
406
407
408
409
410
411
412
413
414
415
416
417void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
418{
419 rcu_read_lock();
420
421 pool->algo = algo;
422 if (!pool->algo)
423 pool->algo = gen_pool_first_fit;
424
425 pool->data = data;
426
427 rcu_read_unlock();
428}
429EXPORT_SYMBOL(gen_pool_set_algo);
430
431
432
433
434
435
436
437
438
439
440unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
441 unsigned long start, unsigned int nr, void *data)
442{
443 return bitmap_find_next_zero_area(map, size, start, nr, 0);
444}
445EXPORT_SYMBOL(gen_pool_first_fit);
446
447
448
449
450
451
452
453
454
455
456
457
458
459unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
460 unsigned long start, unsigned int nr, void *data)
461{
462 unsigned long start_bit = size;
463 unsigned long len = size + 1;
464 unsigned long index;
465
466 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
467
468 while (index < size) {
469 int next_bit = find_next_bit(map, size, index + nr);
470 if ((next_bit - index) < len) {
471 len = next_bit - index;
472 start_bit = index;
473 if (len == nr)
474 return start_bit;
475 }
476 index = bitmap_find_next_zero_area(map, size,
477 next_bit + 1, nr, 0);
478 }
479
480 return start_bit;
481}
482EXPORT_SYMBOL(gen_pool_best_fit);
483