linux/lib/genalloc.c
<<
>>
Prefs
   1/*
   2 * Basic general purpose allocator for managing special purpose
   3 * memory, for example, memory that is not managed by the regular
   4 * kmalloc/kfree interface.  Uses for this includes on-device special
   5 * memory, uncached memory etc.
   6 *
   7 * It is safe to use the allocator in NMI handlers and other special
   8 * unblockable contexts that could otherwise deadlock on locks.  This
   9 * is implemented by using atomic operations and retries on any
  10 * conflicts.  The disadvantage is that there may be livelocks in
  11 * extreme cases.  For better scalability, one allocator can be used
  12 * for each CPU.
  13 *
  14 * The lockless operation only works if there is enough memory
  15 * available.  If new memory is added to the pool a lock has to be
  16 * still taken.  So any user relying on locklessness has to ensure
  17 * that sufficient memory is preallocated.
  18 *
  19 * The basic atomic operation of this allocator is cmpxchg on long.
  20 * On architectures that don't have NMI-safe cmpxchg implementation,
  21 * the allocator can NOT be used in NMI handler.  So code uses the
  22 * allocator in NMI handler should depend on
  23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  24 *
  25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
  26 *
  27 * This source code is licensed under the GNU General Public License,
  28 * Version 2.  See the file COPYING for more details.
  29 */
  30
  31#include <linux/slab.h>
  32#include <linux/export.h>
  33#include <linux/bitmap.h>
  34#include <linux/rculist.h>
  35#include <linux/interrupt.h>
  36#include <linux/genalloc.h>
  37
  38static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
  39{
  40        unsigned long val, nval;
  41
  42        nval = *addr;
  43        do {
  44                val = nval;
  45                if (val & mask_to_set)
  46                        return -EBUSY;
  47                cpu_relax();
  48        } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
  49
  50        return 0;
  51}
  52
  53static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  54{
  55        unsigned long val, nval;
  56
  57        nval = *addr;
  58        do {
  59                val = nval;
  60                if ((val & mask_to_clear) != mask_to_clear)
  61                        return -EBUSY;
  62                cpu_relax();
  63        } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
  64
  65        return 0;
  66}
  67
  68/*
  69 * bitmap_set_ll - set the specified number of bits at the specified position
  70 * @map: pointer to a bitmap
  71 * @start: a bit position in @map
  72 * @nr: number of bits to set
  73 *
  74 * Set @nr bits start from @start in @map lock-lessly. Several users
  75 * can set/clear the same bitmap simultaneously without lock. If two
  76 * users set the same bit, one user will return remain bits, otherwise
  77 * return 0.
  78 */
  79static int bitmap_set_ll(unsigned long *map, int start, int nr)
  80{
  81        unsigned long *p = map + BIT_WORD(start);
  82        const int size = start + nr;
  83        int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
  84        unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
  85
  86        while (nr - bits_to_set >= 0) {
  87                if (set_bits_ll(p, mask_to_set))
  88                        return nr;
  89                nr -= bits_to_set;
  90                bits_to_set = BITS_PER_LONG;
  91                mask_to_set = ~0UL;
  92                p++;
  93        }
  94        if (nr) {
  95                mask_to_set &= BITMAP_LAST_WORD_MASK(size);
  96                if (set_bits_ll(p, mask_to_set))
  97                        return nr;
  98        }
  99
 100        return 0;
 101}
 102
 103/*
 104 * bitmap_clear_ll - clear the specified number of bits at the specified position
 105 * @map: pointer to a bitmap
 106 * @start: a bit position in @map
 107 * @nr: number of bits to set
 108 *
 109 * Clear @nr bits start from @start in @map lock-lessly. Several users
 110 * can set/clear the same bitmap simultaneously without lock. If two
 111 * users clear the same bit, one user will return remain bits,
 112 * otherwise return 0.
 113 */
 114static int bitmap_clear_ll(unsigned long *map, int start, int nr)
 115{
 116        unsigned long *p = map + BIT_WORD(start);
 117        const int size = start + nr;
 118        int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
 119        unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
 120
 121        while (nr - bits_to_clear >= 0) {
 122                if (clear_bits_ll(p, mask_to_clear))
 123                        return nr;
 124                nr -= bits_to_clear;
 125                bits_to_clear = BITS_PER_LONG;
 126                mask_to_clear = ~0UL;
 127                p++;
 128        }
 129        if (nr) {
 130                mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
 131                if (clear_bits_ll(p, mask_to_clear))
 132                        return nr;
 133        }
 134
 135        return 0;
 136}
 137
 138/**
 139 * gen_pool_create - create a new special memory pool
 140 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 141 * @nid: node id of the node the pool structure should be allocated on, or -1
 142 *
 143 * Create a new special memory pool that can be used to manage special purpose
 144 * memory not managed by the regular kmalloc/kfree interface.
 145 */
 146struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
 147{
 148        struct gen_pool *pool;
 149
 150        pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
 151        if (pool != NULL) {
 152                spin_lock_init(&pool->lock);
 153                INIT_LIST_HEAD(&pool->chunks);
 154                pool->min_alloc_order = min_alloc_order;
 155                pool->algo = gen_pool_first_fit;
 156                pool->data = NULL;
 157        }
 158        return pool;
 159}
 160EXPORT_SYMBOL(gen_pool_create);
 161
 162/**
 163 * gen_pool_add_virt - add a new chunk of special memory to the pool
 164 * @pool: pool to add new memory chunk to
 165 * @virt: virtual starting address of memory chunk to add to pool
 166 * @phys: physical starting address of memory chunk to add to pool
 167 * @size: size in bytes of the memory chunk to add to pool
 168 * @nid: node id of the node the chunk structure and bitmap should be
 169 *       allocated on, or -1
 170 *
 171 * Add a new chunk of special memory to the specified pool.
 172 *
 173 * Returns 0 on success or a -ve errno on failure.
 174 */
 175int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
 176                 size_t size, int nid)
 177{
 178        struct gen_pool_chunk *chunk;
 179        int nbits = size >> pool->min_alloc_order;
 180        int nbytes = sizeof(struct gen_pool_chunk) +
 181                                BITS_TO_LONGS(nbits) * sizeof(long);
 182
 183        chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
 184        if (unlikely(chunk == NULL))
 185                return -ENOMEM;
 186
 187        chunk->phys_addr = phys;
 188        chunk->start_addr = virt;
 189        chunk->end_addr = virt + size;
 190        atomic_set(&chunk->avail, size);
 191
 192        spin_lock(&pool->lock);
 193        list_add_rcu(&chunk->next_chunk, &pool->chunks);
 194        spin_unlock(&pool->lock);
 195
 196        return 0;
 197}
 198EXPORT_SYMBOL(gen_pool_add_virt);
 199
 200/**
 201 * gen_pool_virt_to_phys - return the physical address of memory
 202 * @pool: pool to allocate from
 203 * @addr: starting address of memory
 204 *
 205 * Returns the physical address on success, or -1 on error.
 206 */
 207phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
 208{
 209        struct gen_pool_chunk *chunk;
 210        phys_addr_t paddr = -1;
 211
 212        rcu_read_lock();
 213        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 214                if (addr >= chunk->start_addr && addr < chunk->end_addr) {
 215                        paddr = chunk->phys_addr + (addr - chunk->start_addr);
 216                        break;
 217                }
 218        }
 219        rcu_read_unlock();
 220
 221        return paddr;
 222}
 223EXPORT_SYMBOL(gen_pool_virt_to_phys);
 224
 225/**
 226 * gen_pool_destroy - destroy a special memory pool
 227 * @pool: pool to destroy
 228 *
 229 * Destroy the specified special memory pool. Verifies that there are no
 230 * outstanding allocations.
 231 */
 232void gen_pool_destroy(struct gen_pool *pool)
 233{
 234        struct list_head *_chunk, *_next_chunk;
 235        struct gen_pool_chunk *chunk;
 236        int order = pool->min_alloc_order;
 237        int bit, end_bit;
 238
 239        list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
 240                chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 241                list_del(&chunk->next_chunk);
 242
 243                end_bit = (chunk->end_addr - chunk->start_addr) >> order;
 244                bit = find_next_bit(chunk->bits, end_bit, 0);
 245                BUG_ON(bit < end_bit);
 246
 247                kfree(chunk);
 248        }
 249        kfree(pool);
 250        return;
 251}
 252EXPORT_SYMBOL(gen_pool_destroy);
 253
 254/**
 255 * gen_pool_alloc - allocate special memory from the pool
 256 * @pool: pool to allocate from
 257 * @size: number of bytes to allocate from the pool
 258 *
 259 * Allocate the requested number of bytes from the specified pool.
 260 * Uses the pool allocation function (with first-fit algorithm by default).
 261 * Can not be used in NMI handler on architectures without
 262 * NMI-safe cmpxchg implementation.
 263 */
 264unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 265{
 266        struct gen_pool_chunk *chunk;
 267        unsigned long addr = 0;
 268        int order = pool->min_alloc_order;
 269        int nbits, start_bit = 0, end_bit, remain;
 270
 271#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 272        BUG_ON(in_nmi());
 273#endif
 274
 275        if (size == 0)
 276                return 0;
 277
 278        nbits = (size + (1UL << order) - 1) >> order;
 279        rcu_read_lock();
 280        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 281                if (size > atomic_read(&chunk->avail))
 282                        continue;
 283
 284                end_bit = (chunk->end_addr - chunk->start_addr) >> order;
 285retry:
 286                start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
 287                                pool->data);
 288                if (start_bit >= end_bit)
 289                        continue;
 290                remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
 291                if (remain) {
 292                        remain = bitmap_clear_ll(chunk->bits, start_bit,
 293                                                 nbits - remain);
 294                        BUG_ON(remain);
 295                        goto retry;
 296                }
 297
 298                addr = chunk->start_addr + ((unsigned long)start_bit << order);
 299                size = nbits << order;
 300                atomic_sub(size, &chunk->avail);
 301                break;
 302        }
 303        rcu_read_unlock();
 304        return addr;
 305}
 306EXPORT_SYMBOL(gen_pool_alloc);
 307
 308/**
 309 * gen_pool_free - free allocated special memory back to the pool
 310 * @pool: pool to free to
 311 * @addr: starting address of memory to free back to pool
 312 * @size: size in bytes of memory to free
 313 *
 314 * Free previously allocated special memory back to the specified
 315 * pool.  Can not be used in NMI handler on architectures without
 316 * NMI-safe cmpxchg implementation.
 317 */
 318void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
 319{
 320        struct gen_pool_chunk *chunk;
 321        int order = pool->min_alloc_order;
 322        int start_bit, nbits, remain;
 323
 324#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 325        BUG_ON(in_nmi());
 326#endif
 327
 328        nbits = (size + (1UL << order) - 1) >> order;
 329        rcu_read_lock();
 330        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 331                if (addr >= chunk->start_addr && addr < chunk->end_addr) {
 332                        BUG_ON(addr + size > chunk->end_addr);
 333                        start_bit = (addr - chunk->start_addr) >> order;
 334                        remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 335                        BUG_ON(remain);
 336                        size = nbits << order;
 337                        atomic_add(size, &chunk->avail);
 338                        rcu_read_unlock();
 339                        return;
 340                }
 341        }
 342        rcu_read_unlock();
 343        BUG();
 344}
 345EXPORT_SYMBOL(gen_pool_free);
 346
 347/**
 348 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
 349 * @pool:       the generic memory pool
 350 * @func:       func to call
 351 * @data:       additional data used by @func
 352 *
 353 * Call @func for every chunk of generic memory pool.  The @func is
 354 * called with rcu_read_lock held.
 355 */
 356void gen_pool_for_each_chunk(struct gen_pool *pool,
 357        void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
 358        void *data)
 359{
 360        struct gen_pool_chunk *chunk;
 361
 362        rcu_read_lock();
 363        list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
 364                func(pool, chunk, data);
 365        rcu_read_unlock();
 366}
 367EXPORT_SYMBOL(gen_pool_for_each_chunk);
 368
 369/**
 370 * gen_pool_avail - get available free space of the pool
 371 * @pool: pool to get available free space
 372 *
 373 * Return available free space of the specified pool.
 374 */
 375size_t gen_pool_avail(struct gen_pool *pool)
 376{
 377        struct gen_pool_chunk *chunk;
 378        size_t avail = 0;
 379
 380        rcu_read_lock();
 381        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 382                avail += atomic_read(&chunk->avail);
 383        rcu_read_unlock();
 384        return avail;
 385}
 386EXPORT_SYMBOL_GPL(gen_pool_avail);
 387
 388/**
 389 * gen_pool_size - get size in bytes of memory managed by the pool
 390 * @pool: pool to get size
 391 *
 392 * Return size in bytes of memory managed by the pool.
 393 */
 394size_t gen_pool_size(struct gen_pool *pool)
 395{
 396        struct gen_pool_chunk *chunk;
 397        size_t size = 0;
 398
 399        rcu_read_lock();
 400        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 401                size += chunk->end_addr - chunk->start_addr;
 402        rcu_read_unlock();
 403        return size;
 404}
 405EXPORT_SYMBOL_GPL(gen_pool_size);
 406
 407/**
 408 * gen_pool_set_algo - set the allocation algorithm
 409 * @pool: pool to change allocation algorithm
 410 * @algo: custom algorithm function
 411 * @data: additional data used by @algo
 412 *
 413 * Call @algo for each memory allocation in the pool.
 414 * If @algo is NULL use gen_pool_first_fit as default
 415 * memory allocation function.
 416 */
 417void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
 418{
 419        rcu_read_lock();
 420
 421        pool->algo = algo;
 422        if (!pool->algo)
 423                pool->algo = gen_pool_first_fit;
 424
 425        pool->data = data;
 426
 427        rcu_read_unlock();
 428}
 429EXPORT_SYMBOL(gen_pool_set_algo);
 430
 431/**
 432 * gen_pool_first_fit - find the first available region
 433 * of memory matching the size requirement (no alignment constraint)
 434 * @map: The address to base the search on
 435 * @size: The bitmap size in bits
 436 * @start: The bitnumber to start searching at
 437 * @nr: The number of zeroed bits we're looking for
 438 * @data: additional data - unused
 439 */
 440unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
 441                unsigned long start, unsigned int nr, void *data)
 442{
 443        return bitmap_find_next_zero_area(map, size, start, nr, 0);
 444}
 445EXPORT_SYMBOL(gen_pool_first_fit);
 446
 447/**
 448 * gen_pool_best_fit - find the best fitting region of memory
 449 * macthing the size requirement (no alignment constraint)
 450 * @map: The address to base the search on
 451 * @size: The bitmap size in bits
 452 * @start: The bitnumber to start searching at
 453 * @nr: The number of zeroed bits we're looking for
 454 * @data: additional data - unused
 455 *
 456 * Iterate over the bitmap to find the smallest free region
 457 * which we can allocate the memory.
 458 */
 459unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
 460                unsigned long start, unsigned int nr, void *data)
 461{
 462        unsigned long start_bit = size;
 463        unsigned long len = size + 1;
 464        unsigned long index;
 465
 466        index = bitmap_find_next_zero_area(map, size, start, nr, 0);
 467
 468        while (index < size) {
 469                int next_bit = find_next_bit(map, size, index + nr);
 470                if ((next_bit - index) < len) {
 471                        len = next_bit - index;
 472                        start_bit = index;
 473                        if (len == nr)
 474                                return start_bit;
 475                }
 476                index = bitmap_find_next_zero_area(map, size,
 477                                                   next_bit + 1, nr, 0);
 478        }
 479
 480        return start_bit;
 481}
 482EXPORT_SYMBOL(gen_pool_best_fit);
 483
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.