linux/drivers/base/dma-contiguous.c
<<
>>
Prefs
   1/*
   2 * Contiguous Memory Allocator for DMA mapping framework
   3 * Copyright (c) 2010-2011 by Samsung Electronics.
   4 * Written by:
   5 *      Marek Szyprowski <m.szyprowski@samsung.com>
   6 *      Michal Nazarewicz <mina86@mina86.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; either version 2 of the
  11 * License or (at your optional) any later version of the license.
  12 */
  13
  14#define pr_fmt(fmt) "cma: " fmt
  15
  16#ifdef CONFIG_CMA_DEBUG
  17#ifndef DEBUG
  18#  define DEBUG
  19#endif
  20#endif
  21
  22#include <asm/page.h>
  23#include <asm/dma-contiguous.h>
  24
  25#include <linux/memblock.h>
  26#include <linux/err.h>
  27#include <linux/mm.h>
  28#include <linux/mutex.h>
  29#include <linux/page-isolation.h>
  30#include <linux/sizes.h>
  31#include <linux/slab.h>
  32#include <linux/swap.h>
  33#include <linux/mm_types.h>
  34#include <linux/dma-contiguous.h>
  35
  36struct cma {
  37        unsigned long   base_pfn;
  38        unsigned long   count;
  39        unsigned long   *bitmap;
  40};
  41
  42struct cma *dma_contiguous_default_area;
  43
  44#ifdef CONFIG_CMA_SIZE_MBYTES
  45#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  46#else
  47#define CMA_SIZE_MBYTES 0
  48#endif
  49
  50/*
  51 * Default global CMA area size can be defined in kernel's .config.
  52 * This is usefull mainly for distro maintainers to create a kernel
  53 * that works correctly for most supported systems.
  54 * The size can be set in bytes or as a percentage of the total memory
  55 * in the system.
  56 *
  57 * Users, who want to set the size of global CMA area for their system
  58 * should use cma= kernel parameter.
  59 */
  60static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
  61static phys_addr_t size_cmdline = -1;
  62
  63static int __init early_cma(char *p)
  64{
  65        pr_debug("%s(%s)\n", __func__, p);
  66        size_cmdline = memparse(p, &p);
  67        return 0;
  68}
  69early_param("cma", early_cma);
  70
  71#ifdef CONFIG_CMA_SIZE_PERCENTAGE
  72
  73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
  74{
  75        struct memblock_region *reg;
  76        unsigned long total_pages = 0;
  77
  78        /*
  79         * We cannot use memblock_phys_mem_size() here, because
  80         * memblock_analyze() has not been called yet.
  81         */
  82        for_each_memblock(memory, reg)
  83                total_pages += memblock_region_memory_end_pfn(reg) -
  84                               memblock_region_memory_base_pfn(reg);
  85
  86        return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
  87}
  88
  89#else
  90
  91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
  92{
  93        return 0;
  94}
  95
  96#endif
  97
  98/**
  99 * dma_contiguous_reserve() - reserve area for contiguous memory handling
 100 * @limit: End address of the reserved memory (optional, 0 for any).
 101 *
 102 * This function reserves memory from early allocator. It should be
 103 * called by arch specific code once the early allocator (memblock or bootmem)
 104 * has been activated and all other subsystems have already allocated/reserved
 105 * memory.
 106 */
 107void __init dma_contiguous_reserve(phys_addr_t limit)
 108{
 109        phys_addr_t selected_size = 0;
 110
 111        pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
 112
 113        if (size_cmdline != -1) {
 114                selected_size = size_cmdline;
 115        } else {
 116#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
 117                selected_size = size_bytes;
 118#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
 119                selected_size = cma_early_percent_memory();
 120#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
 121                selected_size = min(size_bytes, cma_early_percent_memory());
 122#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
 123                selected_size = max(size_bytes, cma_early_percent_memory());
 124#endif
 125        }
 126
 127        if (selected_size) {
 128                pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 129                         (unsigned long)selected_size / SZ_1M);
 130
 131                dma_declare_contiguous(NULL, selected_size, 0, limit);
 132        }
 133};
 134
 135static DEFINE_MUTEX(cma_mutex);
 136
 137static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
 138{
 139        unsigned long pfn = base_pfn;
 140        unsigned i = count >> pageblock_order;
 141        struct zone *zone;
 142
 143        WARN_ON_ONCE(!pfn_valid(pfn));
 144        zone = page_zone(pfn_to_page(pfn));
 145
 146        do {
 147                unsigned j;
 148                base_pfn = pfn;
 149                for (j = pageblock_nr_pages; j; --j, pfn++) {
 150                        WARN_ON_ONCE(!pfn_valid(pfn));
 151                        if (page_zone(pfn_to_page(pfn)) != zone)
 152                                return -EINVAL;
 153                }
 154                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 155        } while (--i);
 156        return 0;
 157}
 158
 159static __init struct cma *cma_create_area(unsigned long base_pfn,
 160                                     unsigned long count)
 161{
 162        int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
 163        struct cma *cma;
 164        int ret = -ENOMEM;
 165
 166        pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
 167
 168        cma = kmalloc(sizeof *cma, GFP_KERNEL);
 169        if (!cma)
 170                return ERR_PTR(-ENOMEM);
 171
 172        cma->base_pfn = base_pfn;
 173        cma->count = count;
 174        cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 175
 176        if (!cma->bitmap)
 177                goto no_mem;
 178
 179        ret = cma_activate_area(base_pfn, count);
 180        if (ret)
 181                goto error;
 182
 183        pr_debug("%s: returned %p\n", __func__, (void *)cma);
 184        return cma;
 185
 186error:
 187        kfree(cma->bitmap);
 188no_mem:
 189        kfree(cma);
 190        return ERR_PTR(ret);
 191}
 192
 193static struct cma_reserved {
 194        phys_addr_t start;
 195        unsigned long size;
 196        struct device *dev;
 197} cma_reserved[MAX_CMA_AREAS] __initdata;
 198static unsigned cma_reserved_count __initdata;
 199
 200static int __init cma_init_reserved_areas(void)
 201{
 202        struct cma_reserved *r = cma_reserved;
 203        unsigned i = cma_reserved_count;
 204
 205        pr_debug("%s()\n", __func__);
 206
 207        for (; i; --i, ++r) {
 208                struct cma *cma;
 209                cma = cma_create_area(PFN_DOWN(r->start),
 210                                      r->size >> PAGE_SHIFT);
 211                if (!IS_ERR(cma))
 212                        dev_set_cma_area(r->dev, cma);
 213        }
 214        return 0;
 215}
 216core_initcall(cma_init_reserved_areas);
 217
 218/**
 219 * dma_declare_contiguous() - reserve area for contiguous memory handling
 220 *                            for particular device
 221 * @dev:   Pointer to device structure.
 222 * @size:  Size of the reserved memory.
 223 * @base:  Start address of the reserved memory (optional, 0 for any).
 224 * @limit: End address of the reserved memory (optional, 0 for any).
 225 *
 226 * This function reserves memory for specified device. It should be
 227 * called by board specific code when early allocator (memblock or bootmem)
 228 * is still activate.
 229 */
 230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
 231                                  phys_addr_t base, phys_addr_t limit)
 232{
 233        struct cma_reserved *r = &cma_reserved[cma_reserved_count];
 234        phys_addr_t alignment;
 235
 236        pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
 237                 (unsigned long)size, (unsigned long)base,
 238                 (unsigned long)limit);
 239
 240        /* Sanity checks */
 241        if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
 242                pr_err("Not enough slots for CMA reserved regions!\n");
 243                return -ENOSPC;
 244        }
 245
 246        if (!size)
 247                return -EINVAL;
 248
 249        /* Sanitise input arguments */
 250        alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
 251        base = ALIGN(base, alignment);
 252        size = ALIGN(size, alignment);
 253        limit &= ~(alignment - 1);
 254
 255        /* Reserve memory */
 256        if (base) {
 257                if (memblock_is_region_reserved(base, size) ||
 258                    memblock_reserve(base, size) < 0) {
 259                        base = -EBUSY;
 260                        goto err;
 261                }
 262        } else {
 263                /*
 264                 * Use __memblock_alloc_base() since
 265                 * memblock_alloc_base() panic()s.
 266                 */
 267                phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
 268                if (!addr) {
 269                        base = -ENOMEM;
 270                        goto err;
 271                } else {
 272                        base = addr;
 273                }
 274        }
 275
 276        /*
 277         * Each reserved area must be initialised later, when more kernel
 278         * subsystems (like slab allocator) are available.
 279         */
 280        r->start = base;
 281        r->size = size;
 282        r->dev = dev;
 283        cma_reserved_count++;
 284        pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
 285                (unsigned long)base);
 286
 287        /* Architecture specific contiguous memory fixup. */
 288        dma_contiguous_early_fixup(base, size);
 289        return 0;
 290err:
 291        pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
 292        return base;
 293}
 294
 295/**
 296 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 297 * @dev:   Pointer to device for which the allocation is performed.
 298 * @count: Requested number of pages.
 299 * @align: Requested alignment of pages (in PAGE_SIZE order).
 300 *
 301 * This function allocates memory buffer for specified device. It uses
 302 * device specific contiguous memory area if available or the default
 303 * global one. Requires architecture specific get_dev_cma_area() helper
 304 * function.
 305 */
 306struct page *dma_alloc_from_contiguous(struct device *dev, int count,
 307                                       unsigned int align)
 308{
 309        unsigned long mask, pfn, pageno, start = 0;
 310        struct cma *cma = dev_get_cma_area(dev);
 311        struct page *page = NULL;
 312        int ret;
 313
 314        if (!cma || !cma->count)
 315                return NULL;
 316
 317        if (align > CONFIG_CMA_ALIGNMENT)
 318                align = CONFIG_CMA_ALIGNMENT;
 319
 320        pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
 321                 count, align);
 322
 323        if (!count)
 324                return NULL;
 325
 326        mask = (1 << align) - 1;
 327
 328        mutex_lock(&cma_mutex);
 329
 330        for (;;) {
 331                pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
 332                                                    start, count, mask);
 333                if (pageno >= cma->count)
 334                        break;
 335
 336                pfn = cma->base_pfn + pageno;
 337                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
 338                if (ret == 0) {
 339                        bitmap_set(cma->bitmap, pageno, count);
 340                        page = pfn_to_page(pfn);
 341                        break;
 342                } else if (ret != -EBUSY) {
 343                        break;
 344                }
 345                pr_debug("%s(): memory range at %p is busy, retrying\n",
 346                         __func__, pfn_to_page(pfn));
 347                /* try again with a bit different memory target */
 348                start = pageno + mask + 1;
 349        }
 350
 351        mutex_unlock(&cma_mutex);
 352        pr_debug("%s(): returned %p\n", __func__, page);
 353        return page;
 354}
 355
 356/**
 357 * dma_release_from_contiguous() - release allocated pages
 358 * @dev:   Pointer to device for which the pages were allocated.
 359 * @pages: Allocated pages.
 360 * @count: Number of allocated pages.
 361 *
 362 * This function releases memory allocated by dma_alloc_from_contiguous().
 363 * It returns false when provided pages do not belong to contiguous area and
 364 * true otherwise.
 365 */
 366bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 367                                 int count)
 368{
 369        struct cma *cma = dev_get_cma_area(dev);
 370        unsigned long pfn;
 371
 372        if (!cma || !pages)
 373                return false;
 374
 375        pr_debug("%s(page %p)\n", __func__, (void *)pages);
 376
 377        pfn = page_to_pfn(pages);
 378
 379        if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
 380                return false;
 381
 382        VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 383
 384        mutex_lock(&cma_mutex);
 385        bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
 386        free_contig_range(pfn, count);
 387        mutex_unlock(&cma_mutex);
 388
 389        return true;
 390}
 391
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.