linux/drivers/base/dma-contiguous.c
<<
>>
Prefs
   1/*
   2 * Contiguous Memory Allocator for DMA mapping framework
   3 * Copyright (c) 2010-2011 by Samsung Electronics.
   4 * Written by:
   5 *      Marek Szyprowski <m.szyprowski@samsung.com>
   6 *      Michal Nazarewicz <mina86@mina86.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; either version 2 of the
  11 * License or (at your optional) any later version of the license.
  12 */
  13
  14#define pr_fmt(fmt) "cma: " fmt
  15
  16#ifdef CONFIG_CMA_DEBUG
  17#ifndef DEBUG
  18#  define DEBUG
  19#endif
  20#endif
  21
  22#include <asm/page.h>
  23#include <asm/dma-contiguous.h>
  24
  25#include <linux/memblock.h>
  26#include <linux/err.h>
  27#include <linux/mm.h>
  28#include <linux/mutex.h>
  29#include <linux/page-isolation.h>
  30#include <linux/slab.h>
  31#include <linux/swap.h>
  32#include <linux/mm_types.h>
  33#include <linux/dma-contiguous.h>
  34
  35#ifndef SZ_1M
  36#define SZ_1M (1 << 20)
  37#endif
  38
  39struct cma {
  40        unsigned long   base_pfn;
  41        unsigned long   count;
  42        unsigned long   *bitmap;
  43};
  44
  45struct cma *dma_contiguous_default_area;
  46
  47#ifdef CONFIG_CMA_SIZE_MBYTES
  48#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  49#else
  50#define CMA_SIZE_MBYTES 0
  51#endif
  52
  53/*
  54 * Default global CMA area size can be defined in kernel's .config.
  55 * This is usefull mainly for distro maintainers to create a kernel
  56 * that works correctly for most supported systems.
  57 * The size can be set in bytes or as a percentage of the total memory
  58 * in the system.
  59 *
  60 * Users, who want to set the size of global CMA area for their system
  61 * should use cma= kernel parameter.
  62 */
  63static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
  64static long size_cmdline = -1;
  65
  66static int __init early_cma(char *p)
  67{
  68        pr_debug("%s(%s)\n", __func__, p);
  69        size_cmdline = memparse(p, &p);
  70        return 0;
  71}
  72early_param("cma", early_cma);
  73
  74#ifdef CONFIG_CMA_SIZE_PERCENTAGE
  75
  76static unsigned long __init __maybe_unused cma_early_percent_memory(void)
  77{
  78        struct memblock_region *reg;
  79        unsigned long total_pages = 0;
  80
  81        /*
  82         * We cannot use memblock_phys_mem_size() here, because
  83         * memblock_analyze() has not been called yet.
  84         */
  85        for_each_memblock(memory, reg)
  86                total_pages += memblock_region_memory_end_pfn(reg) -
  87                               memblock_region_memory_base_pfn(reg);
  88
  89        return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
  90}
  91
  92#else
  93
  94static inline __maybe_unused unsigned long cma_early_percent_memory(void)
  95{
  96        return 0;
  97}
  98
  99#endif
 100
 101/**
 102 * dma_contiguous_reserve() - reserve area for contiguous memory handling
 103 * @limit: End address of the reserved memory (optional, 0 for any).
 104 *
 105 * This function reserves memory from early allocator. It should be
 106 * called by arch specific code once the early allocator (memblock or bootmem)
 107 * has been activated and all other subsystems have already allocated/reserved
 108 * memory.
 109 */
 110void __init dma_contiguous_reserve(phys_addr_t limit)
 111{
 112        unsigned long selected_size = 0;
 113
 114        pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
 115
 116        if (size_cmdline != -1) {
 117                selected_size = size_cmdline;
 118        } else {
 119#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
 120                selected_size = size_bytes;
 121#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
 122                selected_size = cma_early_percent_memory();
 123#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
 124                selected_size = min(size_bytes, cma_early_percent_memory());
 125#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
 126                selected_size = max(size_bytes, cma_early_percent_memory());
 127#endif
 128        }
 129
 130        if (selected_size) {
 131                pr_debug("%s: reserving %ld MiB for global area\n", __func__,
 132                         selected_size / SZ_1M);
 133
 134                dma_declare_contiguous(NULL, selected_size, 0, limit);
 135        }
 136};
 137
 138static DEFINE_MUTEX(cma_mutex);
 139
 140static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
 141{
 142        unsigned long pfn = base_pfn;
 143        unsigned i = count >> pageblock_order;
 144        struct zone *zone;
 145
 146        WARN_ON_ONCE(!pfn_valid(pfn));
 147        zone = page_zone(pfn_to_page(pfn));
 148
 149        do {
 150                unsigned j;
 151                base_pfn = pfn;
 152                for (j = pageblock_nr_pages; j; --j, pfn++) {
 153                        WARN_ON_ONCE(!pfn_valid(pfn));
 154                        if (page_zone(pfn_to_page(pfn)) != zone)
 155                                return -EINVAL;
 156                }
 157                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
 158        } while (--i);
 159        return 0;
 160}
 161
 162static __init struct cma *cma_create_area(unsigned long base_pfn,
 163                                     unsigned long count)
 164{
 165        int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
 166        struct cma *cma;
 167        int ret = -ENOMEM;
 168
 169        pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
 170
 171        cma = kmalloc(sizeof *cma, GFP_KERNEL);
 172        if (!cma)
 173                return ERR_PTR(-ENOMEM);
 174
 175        cma->base_pfn = base_pfn;
 176        cma->count = count;
 177        cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 178
 179        if (!cma->bitmap)
 180                goto no_mem;
 181
 182        ret = cma_activate_area(base_pfn, count);
 183        if (ret)
 184                goto error;
 185
 186        pr_debug("%s: returned %p\n", __func__, (void *)cma);
 187        return cma;
 188
 189error:
 190        kfree(cma->bitmap);
 191no_mem:
 192        kfree(cma);
 193        return ERR_PTR(ret);
 194}
 195
 196static struct cma_reserved {
 197        phys_addr_t start;
 198        unsigned long size;
 199        struct device *dev;
 200} cma_reserved[MAX_CMA_AREAS] __initdata;
 201static unsigned cma_reserved_count __initdata;
 202
 203static int __init cma_init_reserved_areas(void)
 204{
 205        struct cma_reserved *r = cma_reserved;
 206        unsigned i = cma_reserved_count;
 207
 208        pr_debug("%s()\n", __func__);
 209
 210        for (; i; --i, ++r) {
 211                struct cma *cma;
 212                cma = cma_create_area(PFN_DOWN(r->start),
 213                                      r->size >> PAGE_SHIFT);
 214                if (!IS_ERR(cma))
 215                        dev_set_cma_area(r->dev, cma);
 216        }
 217        return 0;
 218}
 219core_initcall(cma_init_reserved_areas);
 220
 221/**
 222 * dma_declare_contiguous() - reserve area for contiguous memory handling
 223 *                            for particular device
 224 * @dev:   Pointer to device structure.
 225 * @size:  Size of the reserved memory.
 226 * @base:  Start address of the reserved memory (optional, 0 for any).
 227 * @limit: End address of the reserved memory (optional, 0 for any).
 228 *
 229 * This function reserves memory for specified device. It should be
 230 * called by board specific code when early allocator (memblock or bootmem)
 231 * is still activate.
 232 */
 233int __init dma_declare_contiguous(struct device *dev, unsigned long size,
 234                                  phys_addr_t base, phys_addr_t limit)
 235{
 236        struct cma_reserved *r = &cma_reserved[cma_reserved_count];
 237        unsigned long alignment;
 238
 239        pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
 240                 (unsigned long)size, (unsigned long)base,
 241                 (unsigned long)limit);
 242
 243        /* Sanity checks */
 244        if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
 245                pr_err("Not enough slots for CMA reserved regions!\n");
 246                return -ENOSPC;
 247        }
 248
 249        if (!size)
 250                return -EINVAL;
 251
 252        /* Sanitise input arguments */
 253        alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
 254        base = ALIGN(base, alignment);
 255        size = ALIGN(size, alignment);
 256        limit &= ~(alignment - 1);
 257
 258        /* Reserve memory */
 259        if (base) {
 260                if (memblock_is_region_reserved(base, size) ||
 261                    memblock_reserve(base, size) < 0) {
 262                        base = -EBUSY;
 263                        goto err;
 264                }
 265        } else {
 266                /*
 267                 * Use __memblock_alloc_base() since
 268                 * memblock_alloc_base() panic()s.
 269                 */
 270                phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
 271                if (!addr) {
 272                        base = -ENOMEM;
 273                        goto err;
 274                } else if (addr + size > ~(unsigned long)0) {
 275                        memblock_free(addr, size);
 276                        base = -EINVAL;
 277                        goto err;
 278                } else {
 279                        base = addr;
 280                }
 281        }
 282
 283        /*
 284         * Each reserved area must be initialised later, when more kernel
 285         * subsystems (like slab allocator) are available.
 286         */
 287        r->start = base;
 288        r->size = size;
 289        r->dev = dev;
 290        cma_reserved_count++;
 291        pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
 292                (unsigned long)base);
 293
 294        /* Architecture specific contiguous memory fixup. */
 295        dma_contiguous_early_fixup(base, size);
 296        return 0;
 297err:
 298        pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
 299        return base;
 300}
 301
 302/**
 303 * dma_alloc_from_contiguous() - allocate pages from contiguous area
 304 * @dev:   Pointer to device for which the allocation is performed.
 305 * @count: Requested number of pages.
 306 * @align: Requested alignment of pages (in PAGE_SIZE order).
 307 *
 308 * This function allocates memory buffer for specified device. It uses
 309 * device specific contiguous memory area if available or the default
 310 * global one. Requires architecture specific get_dev_cma_area() helper
 311 * function.
 312 */
 313struct page *dma_alloc_from_contiguous(struct device *dev, int count,
 314                                       unsigned int align)
 315{
 316        unsigned long mask, pfn, pageno, start = 0;
 317        struct cma *cma = dev_get_cma_area(dev);
 318        int ret;
 319
 320        if (!cma || !cma->count)
 321                return NULL;
 322
 323        if (align > CONFIG_CMA_ALIGNMENT)
 324                align = CONFIG_CMA_ALIGNMENT;
 325
 326        pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
 327                 count, align);
 328
 329        if (!count)
 330                return NULL;
 331
 332        mask = (1 << align) - 1;
 333
 334        mutex_lock(&cma_mutex);
 335
 336        for (;;) {
 337                pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
 338                                                    start, count, mask);
 339                if (pageno >= cma->count) {
 340                        ret = -ENOMEM;
 341                        goto error;
 342                }
 343
 344                pfn = cma->base_pfn + pageno;
 345                ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
 346                if (ret == 0) {
 347                        bitmap_set(cma->bitmap, pageno, count);
 348                        break;
 349                } else if (ret != -EBUSY) {
 350                        goto error;
 351                }
 352                pr_debug("%s(): memory range at %p is busy, retrying\n",
 353                         __func__, pfn_to_page(pfn));
 354                /* try again with a bit different memory target */
 355                start = pageno + mask + 1;
 356        }
 357
 358        mutex_unlock(&cma_mutex);
 359
 360        pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
 361        return pfn_to_page(pfn);
 362error:
 363        mutex_unlock(&cma_mutex);
 364        return NULL;
 365}
 366
 367/**
 368 * dma_release_from_contiguous() - release allocated pages
 369 * @dev:   Pointer to device for which the pages were allocated.
 370 * @pages: Allocated pages.
 371 * @count: Number of allocated pages.
 372 *
 373 * This function releases memory allocated by dma_alloc_from_contiguous().
 374 * It returns false when provided pages do not belong to contiguous area and
 375 * true otherwise.
 376 */
 377bool dma_release_from_contiguous(struct device *dev, struct page *pages,
 378                                 int count)
 379{
 380        struct cma *cma = dev_get_cma_area(dev);
 381        unsigned long pfn;
 382
 383        if (!cma || !pages)
 384                return false;
 385
 386        pr_debug("%s(page %p)\n", __func__, (void *)pages);
 387
 388        pfn = page_to_pfn(pages);
 389
 390        if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
 391                return false;
 392
 393        VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
 394
 395        mutex_lock(&cma_mutex);
 396        bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
 397        free_contig_range(pfn, count);
 398        mutex_unlock(&cma_mutex);
 399
 400        return true;
 401}
 402
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.