linux/lib/genalloc.c
<<
>>
Prefs
   1/*
   2 * Basic general purpose allocator for managing special purpose
   3 * memory, for example, memory that is not managed by the regular
   4 * kmalloc/kfree interface.  Uses for this includes on-device special
   5 * memory, uncached memory etc.
   6 *
   7 * It is safe to use the allocator in NMI handlers and other special
   8 * unblockable contexts that could otherwise deadlock on locks.  This
   9 * is implemented by using atomic operations and retries on any
  10 * conflicts.  The disadvantage is that there may be livelocks in
  11 * extreme cases.  For better scalability, one allocator can be used
  12 * for each CPU.
  13 *
  14 * The lockless operation only works if there is enough memory
  15 * available.  If new memory is added to the pool a lock has to be
  16 * still taken.  So any user relying on locklessness has to ensure
  17 * that sufficient memory is preallocated.
  18 *
  19 * The basic atomic operation of this allocator is cmpxchg on long.
  20 * On architectures that don't have NMI-safe cmpxchg implementation,
  21 * the allocator can NOT be used in NMI handler.  So code uses the
  22 * allocator in NMI handler should depend on
  23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
  24 *
  25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
  26 *
  27 * This source code is licensed under the GNU General Public License,
  28 * Version 2.  See the file COPYING for more details.
  29 */
  30
  31#include <linux/slab.h>
  32#include <linux/export.h>
  33#include <linux/bitmap.h>
  34#include <linux/rculist.h>
  35#include <linux/interrupt.h>
  36#include <linux/genalloc.h>
  37#include <linux/of_address.h>
  38#include <linux/of_device.h>
  39
  40static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
  41{
  42        unsigned long val, nval;
  43
  44        nval = *addr;
  45        do {
  46                val = nval;
  47                if (val & mask_to_set)
  48                        return -EBUSY;
  49                cpu_relax();
  50        } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
  51
  52        return 0;
  53}
  54
  55static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
  56{
  57        unsigned long val, nval;
  58
  59        nval = *addr;
  60        do {
  61                val = nval;
  62                if ((val & mask_to_clear) != mask_to_clear)
  63                        return -EBUSY;
  64                cpu_relax();
  65        } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
  66
  67        return 0;
  68}
  69
  70/*
  71 * bitmap_set_ll - set the specified number of bits at the specified position
  72 * @map: pointer to a bitmap
  73 * @start: a bit position in @map
  74 * @nr: number of bits to set
  75 *
  76 * Set @nr bits start from @start in @map lock-lessly. Several users
  77 * can set/clear the same bitmap simultaneously without lock. If two
  78 * users set the same bit, one user will return remain bits, otherwise
  79 * return 0.
  80 */
  81static int bitmap_set_ll(unsigned long *map, int start, int nr)
  82{
  83        unsigned long *p = map + BIT_WORD(start);
  84        const int size = start + nr;
  85        int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
  86        unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
  87
  88        while (nr - bits_to_set >= 0) {
  89                if (set_bits_ll(p, mask_to_set))
  90                        return nr;
  91                nr -= bits_to_set;
  92                bits_to_set = BITS_PER_LONG;
  93                mask_to_set = ~0UL;
  94                p++;
  95        }
  96        if (nr) {
  97                mask_to_set &= BITMAP_LAST_WORD_MASK(size);
  98                if (set_bits_ll(p, mask_to_set))
  99                        return nr;
 100        }
 101
 102        return 0;
 103}
 104
 105/*
 106 * bitmap_clear_ll - clear the specified number of bits at the specified position
 107 * @map: pointer to a bitmap
 108 * @start: a bit position in @map
 109 * @nr: number of bits to set
 110 *
 111 * Clear @nr bits start from @start in @map lock-lessly. Several users
 112 * can set/clear the same bitmap simultaneously without lock. If two
 113 * users clear the same bit, one user will return remain bits,
 114 * otherwise return 0.
 115 */
 116static int bitmap_clear_ll(unsigned long *map, int start, int nr)
 117{
 118        unsigned long *p = map + BIT_WORD(start);
 119        const int size = start + nr;
 120        int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
 121        unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
 122
 123        while (nr - bits_to_clear >= 0) {
 124                if (clear_bits_ll(p, mask_to_clear))
 125                        return nr;
 126                nr -= bits_to_clear;
 127                bits_to_clear = BITS_PER_LONG;
 128                mask_to_clear = ~0UL;
 129                p++;
 130        }
 131        if (nr) {
 132                mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
 133                if (clear_bits_ll(p, mask_to_clear))
 134                        return nr;
 135        }
 136
 137        return 0;
 138}
 139
 140/**
 141 * gen_pool_create - create a new special memory pool
 142 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 143 * @nid: node id of the node the pool structure should be allocated on, or -1
 144 *
 145 * Create a new special memory pool that can be used to manage special purpose
 146 * memory not managed by the regular kmalloc/kfree interface.
 147 */
 148struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
 149{
 150        struct gen_pool *pool;
 151
 152        pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
 153        if (pool != NULL) {
 154                spin_lock_init(&pool->lock);
 155                INIT_LIST_HEAD(&pool->chunks);
 156                pool->min_alloc_order = min_alloc_order;
 157                pool->algo = gen_pool_first_fit;
 158                pool->data = NULL;
 159        }
 160        return pool;
 161}
 162EXPORT_SYMBOL(gen_pool_create);
 163
 164/**
 165 * gen_pool_add_virt - add a new chunk of special memory to the pool
 166 * @pool: pool to add new memory chunk to
 167 * @virt: virtual starting address of memory chunk to add to pool
 168 * @phys: physical starting address of memory chunk to add to pool
 169 * @size: size in bytes of the memory chunk to add to pool
 170 * @nid: node id of the node the chunk structure and bitmap should be
 171 *       allocated on, or -1
 172 *
 173 * Add a new chunk of special memory to the specified pool.
 174 *
 175 * Returns 0 on success or a -ve errno on failure.
 176 */
 177int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
 178                 size_t size, int nid)
 179{
 180        struct gen_pool_chunk *chunk;
 181        int nbits = size >> pool->min_alloc_order;
 182        int nbytes = sizeof(struct gen_pool_chunk) +
 183                                BITS_TO_LONGS(nbits) * sizeof(long);
 184
 185        chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
 186        if (unlikely(chunk == NULL))
 187                return -ENOMEM;
 188
 189        chunk->phys_addr = phys;
 190        chunk->start_addr = virt;
 191        chunk->end_addr = virt + size;
 192        atomic_set(&chunk->avail, size);
 193
 194        spin_lock(&pool->lock);
 195        list_add_rcu(&chunk->next_chunk, &pool->chunks);
 196        spin_unlock(&pool->lock);
 197
 198        return 0;
 199}
 200EXPORT_SYMBOL(gen_pool_add_virt);
 201
 202/**
 203 * gen_pool_virt_to_phys - return the physical address of memory
 204 * @pool: pool to allocate from
 205 * @addr: starting address of memory
 206 *
 207 * Returns the physical address on success, or -1 on error.
 208 */
 209phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
 210{
 211        struct gen_pool_chunk *chunk;
 212        phys_addr_t paddr = -1;
 213
 214        rcu_read_lock();
 215        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 216                if (addr >= chunk->start_addr && addr < chunk->end_addr) {
 217                        paddr = chunk->phys_addr + (addr - chunk->start_addr);
 218                        break;
 219                }
 220        }
 221        rcu_read_unlock();
 222
 223        return paddr;
 224}
 225EXPORT_SYMBOL(gen_pool_virt_to_phys);
 226
 227/**
 228 * gen_pool_destroy - destroy a special memory pool
 229 * @pool: pool to destroy
 230 *
 231 * Destroy the specified special memory pool. Verifies that there are no
 232 * outstanding allocations.
 233 */
 234void gen_pool_destroy(struct gen_pool *pool)
 235{
 236        struct list_head *_chunk, *_next_chunk;
 237        struct gen_pool_chunk *chunk;
 238        int order = pool->min_alloc_order;
 239        int bit, end_bit;
 240
 241        list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
 242                chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
 243                list_del(&chunk->next_chunk);
 244
 245                end_bit = (chunk->end_addr - chunk->start_addr) >> order;
 246                bit = find_next_bit(chunk->bits, end_bit, 0);
 247                BUG_ON(bit < end_bit);
 248
 249                kfree(chunk);
 250        }
 251        kfree(pool);
 252        return;
 253}
 254EXPORT_SYMBOL(gen_pool_destroy);
 255
 256/**
 257 * gen_pool_alloc - allocate special memory from the pool
 258 * @pool: pool to allocate from
 259 * @size: number of bytes to allocate from the pool
 260 *
 261 * Allocate the requested number of bytes from the specified pool.
 262 * Uses the pool allocation function (with first-fit algorithm by default).
 263 * Can not be used in NMI handler on architectures without
 264 * NMI-safe cmpxchg implementation.
 265 */
 266unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 267{
 268        struct gen_pool_chunk *chunk;
 269        unsigned long addr = 0;
 270        int order = pool->min_alloc_order;
 271        int nbits, start_bit = 0, end_bit, remain;
 272
 273#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 274        BUG_ON(in_nmi());
 275#endif
 276
 277        if (size == 0)
 278                return 0;
 279
 280        nbits = (size + (1UL << order) - 1) >> order;
 281        rcu_read_lock();
 282        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 283                if (size > atomic_read(&chunk->avail))
 284                        continue;
 285
 286                end_bit = (chunk->end_addr - chunk->start_addr) >> order;
 287retry:
 288                start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
 289                                pool->data);
 290                if (start_bit >= end_bit)
 291                        continue;
 292                remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
 293                if (remain) {
 294                        remain = bitmap_clear_ll(chunk->bits, start_bit,
 295                                                 nbits - remain);
 296                        BUG_ON(remain);
 297                        goto retry;
 298                }
 299
 300                addr = chunk->start_addr + ((unsigned long)start_bit << order);
 301                size = nbits << order;
 302                atomic_sub(size, &chunk->avail);
 303                break;
 304        }
 305        rcu_read_unlock();
 306        return addr;
 307}
 308EXPORT_SYMBOL(gen_pool_alloc);
 309
 310/**
 311 * gen_pool_free - free allocated special memory back to the pool
 312 * @pool: pool to free to
 313 * @addr: starting address of memory to free back to pool
 314 * @size: size in bytes of memory to free
 315 *
 316 * Free previously allocated special memory back to the specified
 317 * pool.  Can not be used in NMI handler on architectures without
 318 * NMI-safe cmpxchg implementation.
 319 */
 320void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
 321{
 322        struct gen_pool_chunk *chunk;
 323        int order = pool->min_alloc_order;
 324        int start_bit, nbits, remain;
 325
 326#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
 327        BUG_ON(in_nmi());
 328#endif
 329
 330        nbits = (size + (1UL << order) - 1) >> order;
 331        rcu_read_lock();
 332        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
 333                if (addr >= chunk->start_addr && addr < chunk->end_addr) {
 334                        BUG_ON(addr + size > chunk->end_addr);
 335                        start_bit = (addr - chunk->start_addr) >> order;
 336                        remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
 337                        BUG_ON(remain);
 338                        size = nbits << order;
 339                        atomic_add(size, &chunk->avail);
 340                        rcu_read_unlock();
 341                        return;
 342                }
 343        }
 344        rcu_read_unlock();
 345        BUG();
 346}
 347EXPORT_SYMBOL(gen_pool_free);
 348
 349/**
 350 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
 351 * @pool:       the generic memory pool
 352 * @func:       func to call
 353 * @data:       additional data used by @func
 354 *
 355 * Call @func for every chunk of generic memory pool.  The @func is
 356 * called with rcu_read_lock held.
 357 */
 358void gen_pool_for_each_chunk(struct gen_pool *pool,
 359        void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
 360        void *data)
 361{
 362        struct gen_pool_chunk *chunk;
 363
 364        rcu_read_lock();
 365        list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
 366                func(pool, chunk, data);
 367        rcu_read_unlock();
 368}
 369EXPORT_SYMBOL(gen_pool_for_each_chunk);
 370
 371/**
 372 * gen_pool_avail - get available free space of the pool
 373 * @pool: pool to get available free space
 374 *
 375 * Return available free space of the specified pool.
 376 */
 377size_t gen_pool_avail(struct gen_pool *pool)
 378{
 379        struct gen_pool_chunk *chunk;
 380        size_t avail = 0;
 381
 382        rcu_read_lock();
 383        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 384                avail += atomic_read(&chunk->avail);
 385        rcu_read_unlock();
 386        return avail;
 387}
 388EXPORT_SYMBOL_GPL(gen_pool_avail);
 389
 390/**
 391 * gen_pool_size - get size in bytes of memory managed by the pool
 392 * @pool: pool to get size
 393 *
 394 * Return size in bytes of memory managed by the pool.
 395 */
 396size_t gen_pool_size(struct gen_pool *pool)
 397{
 398        struct gen_pool_chunk *chunk;
 399        size_t size = 0;
 400
 401        rcu_read_lock();
 402        list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
 403                size += chunk->end_addr - chunk->start_addr;
 404        rcu_read_unlock();
 405        return size;
 406}
 407EXPORT_SYMBOL_GPL(gen_pool_size);
 408
 409/**
 410 * gen_pool_set_algo - set the allocation algorithm
 411 * @pool: pool to change allocation algorithm
 412 * @algo: custom algorithm function
 413 * @data: additional data used by @algo
 414 *
 415 * Call @algo for each memory allocation in the pool.
 416 * If @algo is NULL use gen_pool_first_fit as default
 417 * memory allocation function.
 418 */
 419void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
 420{
 421        rcu_read_lock();
 422
 423        pool->algo = algo;
 424        if (!pool->algo)
 425                pool->algo = gen_pool_first_fit;
 426
 427        pool->data = data;
 428
 429        rcu_read_unlock();
 430}
 431EXPORT_SYMBOL(gen_pool_set_algo);
 432
 433/**
 434 * gen_pool_first_fit - find the first available region
 435 * of memory matching the size requirement (no alignment constraint)
 436 * @map: The address to base the search on
 437 * @size: The bitmap size in bits
 438 * @start: The bitnumber to start searching at
 439 * @nr: The number of zeroed bits we're looking for
 440 * @data: additional data - unused
 441 */
 442unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
 443                unsigned long start, unsigned int nr, void *data)
 444{
 445        return bitmap_find_next_zero_area(map, size, start, nr, 0);
 446}
 447EXPORT_SYMBOL(gen_pool_first_fit);
 448
 449/**
 450 * gen_pool_best_fit - find the best fitting region of memory
 451 * macthing the size requirement (no alignment constraint)
 452 * @map: The address to base the search on
 453 * @size: The bitmap size in bits
 454 * @start: The bitnumber to start searching at
 455 * @nr: The number of zeroed bits we're looking for
 456 * @data: additional data - unused
 457 *
 458 * Iterate over the bitmap to find the smallest free region
 459 * which we can allocate the memory.
 460 */
 461unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
 462                unsigned long start, unsigned int nr, void *data)
 463{
 464        unsigned long start_bit = size;
 465        unsigned long len = size + 1;
 466        unsigned long index;
 467
 468        index = bitmap_find_next_zero_area(map, size, start, nr, 0);
 469
 470        while (index < size) {
 471                int next_bit = find_next_bit(map, size, index + nr);
 472                if ((next_bit - index) < len) {
 473                        len = next_bit - index;
 474                        start_bit = index;
 475                        if (len == nr)
 476                                return start_bit;
 477                }
 478                index = bitmap_find_next_zero_area(map, size,
 479                                                   next_bit + 1, nr, 0);
 480        }
 481
 482        return start_bit;
 483}
 484EXPORT_SYMBOL(gen_pool_best_fit);
 485
 486static void devm_gen_pool_release(struct device *dev, void *res)
 487{
 488        gen_pool_destroy(*(struct gen_pool **)res);
 489}
 490
 491/**
 492 * devm_gen_pool_create - managed gen_pool_create
 493 * @dev: device that provides the gen_pool
 494 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
 495 * @nid: node id of the node the pool structure should be allocated on, or -1
 496 *
 497 * Create a new special memory pool that can be used to manage special purpose
 498 * memory not managed by the regular kmalloc/kfree interface. The pool will be
 499 * automatically destroyed by the device management code.
 500 */
 501struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
 502                int nid)
 503{
 504        struct gen_pool **ptr, *pool;
 505
 506        ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
 507
 508        pool = gen_pool_create(min_alloc_order, nid);
 509        if (pool) {
 510                *ptr = pool;
 511                devres_add(dev, ptr);
 512        } else {
 513                devres_free(ptr);
 514        }
 515
 516        return pool;
 517}
 518
 519/**
 520 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
 521 * @dev: device to retrieve the gen_pool from
 522 * @name: Optional name for the gen_pool, usually NULL
 523 *
 524 * Returns the gen_pool for the device if one is present, or NULL.
 525 */
 526struct gen_pool *dev_get_gen_pool(struct device *dev)
 527{
 528        struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
 529                                        NULL);
 530
 531        if (!p)
 532                return NULL;
 533        return *p;
 534}
 535EXPORT_SYMBOL_GPL(dev_get_gen_pool);
 536
 537#ifdef CONFIG_OF
 538/**
 539 * of_get_named_gen_pool - find a pool by phandle property
 540 * @np: device node
 541 * @propname: property name containing phandle(s)
 542 * @index: index into the phandle array
 543 *
 544 * Returns the pool that contains the chunk starting at the physical
 545 * address of the device tree node pointed at by the phandle property,
 546 * or NULL if not found.
 547 */
 548struct gen_pool *of_get_named_gen_pool(struct device_node *np,
 549        const char *propname, int index)
 550{
 551        struct platform_device *pdev;
 552        struct device_node *np_pool;
 553
 554        np_pool = of_parse_phandle(np, propname, index);
 555        if (!np_pool)
 556                return NULL;
 557        pdev = of_find_device_by_node(np_pool);
 558        if (!pdev)
 559                return NULL;
 560        return dev_get_gen_pool(&pdev->dev);
 561}
 562EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
 563#endif /* CONFIG_OF */
 564
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.