linux/sound/core/memalloc.c
<<
>>
Prefs
   1/*
   2 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   3 *                   Takashi Iwai <tiwai@suse.de>
   4 * 
   5 *  Generic memory allocators
   6 *
   7 *
   8 *   This program is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU General Public License as published by
  10 *   the Free Software Foundation; either version 2 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This program is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *   GNU General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU General Public License
  19 *   along with this program; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  21 *
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/proc_fs.h>
  26#include <linux/init.h>
  27#include <linux/pci.h>
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/seq_file.h>
  31#include <asm/uaccess.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/moduleparam.h>
  34#include <linux/mutex.h>
  35#include <sound/memalloc.h>
  36
  37
  38MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
  39MODULE_DESCRIPTION("Memory allocator for ALSA system.");
  40MODULE_LICENSE("GPL");
  41
  42
  43/*
  44 */
  45
  46static DEFINE_MUTEX(list_mutex);
  47static LIST_HEAD(mem_list_head);
  48
  49/* buffer preservation list */
  50struct snd_mem_list {
  51        struct snd_dma_buffer buffer;
  52        unsigned int id;
  53        struct list_head list;
  54};
  55
  56/* id for pre-allocated buffers */
  57#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
  58
  59/*
  60 *
  61 *  Generic memory allocators
  62 *
  63 */
  64
  65static long snd_allocated_pages; /* holding the number of allocated pages */
  66
  67static inline void inc_snd_pages(int order)
  68{
  69        snd_allocated_pages += 1 << order;
  70}
  71
  72static inline void dec_snd_pages(int order)
  73{
  74        snd_allocated_pages -= 1 << order;
  75}
  76
  77/**
  78 * snd_malloc_pages - allocate pages with the given size
  79 * @size: the size to allocate in bytes
  80 * @gfp_flags: the allocation conditions, GFP_XXX
  81 *
  82 * Allocates the physically contiguous pages with the given size.
  83 *
  84 * Returns the pointer of the buffer, or NULL if no enoguh memory.
  85 */
  86void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
  87{
  88        int pg;
  89        void *res;
  90
  91        if (WARN_ON(!size))
  92                return NULL;
  93        if (WARN_ON(!gfp_flags))
  94                return NULL;
  95        gfp_flags |= __GFP_COMP;        /* compound page lets parts be mapped */
  96        pg = get_order(size);
  97        if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
  98                inc_snd_pages(pg);
  99        return res;
 100}
 101
 102/**
 103 * snd_free_pages - release the pages
 104 * @ptr: the buffer pointer to release
 105 * @size: the allocated buffer size
 106 *
 107 * Releases the buffer allocated via snd_malloc_pages().
 108 */
 109void snd_free_pages(void *ptr, size_t size)
 110{
 111        int pg;
 112
 113        if (ptr == NULL)
 114                return;
 115        pg = get_order(size);
 116        dec_snd_pages(pg);
 117        free_pages((unsigned long) ptr, pg);
 118}
 119
 120/*
 121 *
 122 *  Bus-specific memory allocators
 123 *
 124 */
 125
 126#ifdef CONFIG_HAS_DMA
 127/* allocate the coherent DMA pages */
 128static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
 129{
 130        int pg;
 131        void *res;
 132        gfp_t gfp_flags;
 133
 134        if (WARN_ON(!dma))
 135                return NULL;
 136        pg = get_order(size);
 137        gfp_flags = GFP_KERNEL
 138                | __GFP_COMP    /* compound page lets parts be mapped */
 139                | __GFP_NORETRY /* don't trigger OOM-killer */
 140                | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
 141        res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
 142        if (res != NULL)
 143                inc_snd_pages(pg);
 144
 145        return res;
 146}
 147
 148/* free the coherent DMA pages */
 149static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
 150                               dma_addr_t dma)
 151{
 152        int pg;
 153
 154        if (ptr == NULL)
 155                return;
 156        pg = get_order(size);
 157        dec_snd_pages(pg);
 158        dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
 159}
 160#endif /* CONFIG_HAS_DMA */
 161
 162/*
 163 *
 164 *  ALSA generic memory management
 165 *
 166 */
 167
 168
 169/**
 170 * snd_dma_alloc_pages - allocate the buffer area according to the given type
 171 * @type: the DMA buffer type
 172 * @device: the device pointer
 173 * @size: the buffer size to allocate
 174 * @dmab: buffer allocation record to store the allocated data
 175 *
 176 * Calls the memory-allocator function for the corresponding
 177 * buffer type.
 178 * 
 179 * Returns zero if the buffer with the given size is allocated successfuly,
 180 * other a negative value at error.
 181 */
 182int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 183                        struct snd_dma_buffer *dmab)
 184{
 185        if (WARN_ON(!size))
 186                return -ENXIO;
 187        if (WARN_ON(!dmab))
 188                return -ENXIO;
 189
 190        dmab->dev.type = type;
 191        dmab->dev.dev = device;
 192        dmab->bytes = 0;
 193        switch (type) {
 194        case SNDRV_DMA_TYPE_CONTINUOUS:
 195                dmab->area = snd_malloc_pages(size, (unsigned long)device);
 196                dmab->addr = 0;
 197                break;
 198#ifdef CONFIG_HAS_DMA
 199        case SNDRV_DMA_TYPE_DEV:
 200                dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
 201                break;
 202        case SNDRV_DMA_TYPE_DEV_SG:
 203                snd_malloc_sgbuf_pages(device, size, dmab, NULL);
 204                break;
 205#endif
 206        default:
 207                printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
 208                dmab->area = NULL;
 209                dmab->addr = 0;
 210                return -ENXIO;
 211        }
 212        if (! dmab->area)
 213                return -ENOMEM;
 214        dmab->bytes = size;
 215        return 0;
 216}
 217
 218/**
 219 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
 220 * @type: the DMA buffer type
 221 * @device: the device pointer
 222 * @size: the buffer size to allocate
 223 * @dmab: buffer allocation record to store the allocated data
 224 *
 225 * Calls the memory-allocator function for the corresponding
 226 * buffer type.  When no space is left, this function reduces the size and
 227 * tries to allocate again.  The size actually allocated is stored in
 228 * res_size argument.
 229 * 
 230 * Returns zero if the buffer with the given size is allocated successfuly,
 231 * other a negative value at error.
 232 */
 233int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
 234                                 struct snd_dma_buffer *dmab)
 235{
 236        int err;
 237
 238        while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
 239                size_t aligned_size;
 240                if (err != -ENOMEM)
 241                        return err;
 242                if (size <= PAGE_SIZE)
 243                        return -ENOMEM;
 244                aligned_size = PAGE_SIZE << get_order(size);
 245                if (size != aligned_size)
 246                        size = aligned_size;
 247                else
 248                        size >>= 1;
 249        }
 250        if (! dmab->area)
 251                return -ENOMEM;
 252        return 0;
 253}
 254
 255
 256/**
 257 * snd_dma_free_pages - release the allocated buffer
 258 * @dmab: the buffer allocation record to release
 259 *
 260 * Releases the allocated buffer via snd_dma_alloc_pages().
 261 */
 262void snd_dma_free_pages(struct snd_dma_buffer *dmab)
 263{
 264        switch (dmab->dev.type) {
 265        case SNDRV_DMA_TYPE_CONTINUOUS:
 266                snd_free_pages(dmab->area, dmab->bytes);
 267                break;
 268#ifdef CONFIG_HAS_DMA
 269        case SNDRV_DMA_TYPE_DEV:
 270                snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 271                break;
 272        case SNDRV_DMA_TYPE_DEV_SG:
 273                snd_free_sgbuf_pages(dmab);
 274                break;
 275#endif
 276        default:
 277                printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
 278        }
 279}
 280
 281
 282/**
 283 * snd_dma_get_reserved - get the reserved buffer for the given device
 284 * @dmab: the buffer allocation record to store
 285 * @id: the buffer id
 286 *
 287 * Looks for the reserved-buffer list and re-uses if the same buffer
 288 * is found in the list.  When the buffer is found, it's removed from the free list.
 289 *
 290 * Returns the size of buffer if the buffer is found, or zero if not found.
 291 */
 292size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
 293{
 294        struct snd_mem_list *mem;
 295
 296        if (WARN_ON(!dmab))
 297                return 0;
 298
 299        mutex_lock(&list_mutex);
 300        list_for_each_entry(mem, &mem_list_head, list) {
 301                if (mem->id == id &&
 302                    (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
 303                     ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
 304                        struct device *dev = dmab->dev.dev;
 305                        list_del(&mem->list);
 306                        *dmab = mem->buffer;
 307                        if (dmab->dev.dev == NULL)
 308                                dmab->dev.dev = dev;
 309                        kfree(mem);
 310                        mutex_unlock(&list_mutex);
 311                        return dmab->bytes;
 312                }
 313        }
 314        mutex_unlock(&list_mutex);
 315        return 0;
 316}
 317
 318/**
 319 * snd_dma_reserve_buf - reserve the buffer
 320 * @dmab: the buffer to reserve
 321 * @id: the buffer id
 322 *
 323 * Reserves the given buffer as a reserved buffer.
 324 * 
 325 * Returns zero if successful, or a negative code at error.
 326 */
 327int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
 328{
 329        struct snd_mem_list *mem;
 330
 331        if (WARN_ON(!dmab))
 332                return -EINVAL;
 333        mem = kmalloc(sizeof(*mem), GFP_KERNEL);
 334        if (! mem)
 335                return -ENOMEM;
 336        mutex_lock(&list_mutex);
 337        mem->buffer = *dmab;
 338        mem->id = id;
 339        list_add_tail(&mem->list, &mem_list_head);
 340        mutex_unlock(&list_mutex);
 341        return 0;
 342}
 343
 344/*
 345 * purge all reserved buffers
 346 */
 347static void free_all_reserved_pages(void)
 348{
 349        struct list_head *p;
 350        struct snd_mem_list *mem;
 351
 352        mutex_lock(&list_mutex);
 353        while (! list_empty(&mem_list_head)) {
 354                p = mem_list_head.next;
 355                mem = list_entry(p, struct snd_mem_list, list);
 356                list_del(p);
 357                snd_dma_free_pages(&mem->buffer);
 358                kfree(mem);
 359        }
 360        mutex_unlock(&list_mutex);
 361}
 362
 363
 364#ifdef CONFIG_PROC_FS
 365/*
 366 * proc file interface
 367 */
 368#define SND_MEM_PROC_FILE       "driver/snd-page-alloc"
 369static struct proc_dir_entry *snd_mem_proc;
 370
 371static int snd_mem_proc_read(struct seq_file *seq, void *offset)
 372{
 373        long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
 374        struct snd_mem_list *mem;
 375        int devno;
 376        static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
 377
 378        mutex_lock(&list_mutex);
 379        seq_printf(seq, "pages  : %li bytes (%li pages per %likB)\n",
 380                   pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
 381        devno = 0;
 382        list_for_each_entry(mem, &mem_list_head, list) {
 383                devno++;
 384                seq_printf(seq, "buffer %d : ID %08x : type %s\n",
 385                           devno, mem->id, types[mem->buffer.dev.type]);
 386                seq_printf(seq, "  addr = 0x%lx, size = %d bytes\n",
 387                           (unsigned long)mem->buffer.addr,
 388                           (int)mem->buffer.bytes);
 389        }
 390        mutex_unlock(&list_mutex);
 391        return 0;
 392}
 393
 394static int snd_mem_proc_open(struct inode *inode, struct file *file)
 395{
 396        return single_open(file, snd_mem_proc_read, NULL);
 397}
 398
 399/* FIXME: for pci only - other bus? */
 400#ifdef CONFIG_PCI
 401#define gettoken(bufp) strsep(bufp, " \t\n")
 402
 403static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
 404                                  size_t count, loff_t * ppos)
 405{
 406        char buf[128];
 407        char *token, *p;
 408
 409        if (count > sizeof(buf) - 1)
 410                return -EINVAL;
 411        if (copy_from_user(buf, buffer, count))
 412                return -EFAULT;
 413        buf[count] = '\0';
 414
 415        p = buf;
 416        token = gettoken(&p);
 417        if (! token || *token == '#')
 418                return count;
 419        if (strcmp(token, "add") == 0) {
 420                char *endp;
 421                int vendor, device, size, buffers;
 422                long mask;
 423                int i, alloced;
 424                struct pci_dev *pci;
 425
 426                if ((token = gettoken(&p)) == NULL ||
 427                    (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
 428                    (token = gettoken(&p)) == NULL ||
 429                    (device = simple_strtol(token, NULL, 0)) <= 0 ||
 430                    (token = gettoken(&p)) == NULL ||
 431                    (mask = simple_strtol(token, NULL, 0)) < 0 ||
 432                    (token = gettoken(&p)) == NULL ||
 433                    (size = memparse(token, &endp)) < 64*1024 ||
 434                    size > 16*1024*1024 /* too big */ ||
 435                    (token = gettoken(&p)) == NULL ||
 436                    (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
 437                    buffers > 4) {
 438                        printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
 439                        return count;
 440                }
 441                vendor &= 0xffff;
 442                device &= 0xffff;
 443
 444                alloced = 0;
 445                pci = NULL;
 446                while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
 447                        if (mask > 0 && mask < 0xffffffff) {
 448                                if (pci_set_dma_mask(pci, mask) < 0 ||
 449                                    pci_set_consistent_dma_mask(pci, mask) < 0) {
 450                                        printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
 451                                        pci_dev_put(pci);
 452                                        return count;
 453                                }
 454                        }
 455                        for (i = 0; i < buffers; i++) {
 456                                struct snd_dma_buffer dmab;
 457                                memset(&dmab, 0, sizeof(dmab));
 458                                if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
 459                                                        size, &dmab) < 0) {
 460                                        printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
 461                                        pci_dev_put(pci);
 462                                        return count;
 463                                }
 464                                snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
 465                        }
 466                        alloced++;
 467                }
 468                if (! alloced) {
 469                        for (i = 0; i < buffers; i++) {
 470                                struct snd_dma_buffer dmab;
 471                                memset(&dmab, 0, sizeof(dmab));
 472                                /* FIXME: We can allocate only in ZONE_DMA
 473                                 * without a device pointer!
 474                                 */
 475                                if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
 476                                                        size, &dmab) < 0) {
 477                                        printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
 478                                        break;
 479                                }
 480                                snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
 481                        }
 482                }
 483        } else if (strcmp(token, "erase") == 0)
 484                /* FIXME: need for releasing each buffer chunk? */
 485                free_all_reserved_pages();
 486        else
 487                printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
 488        return count;
 489}
 490#endif /* CONFIG_PCI */
 491
 492static const struct file_operations snd_mem_proc_fops = {
 493        .owner          = THIS_MODULE,
 494        .open           = snd_mem_proc_open,
 495        .read           = seq_read,
 496#ifdef CONFIG_PCI
 497        .write          = snd_mem_proc_write,
 498#endif
 499        .llseek         = seq_lseek,
 500        .release        = single_release,
 501};
 502
 503#endif /* CONFIG_PROC_FS */
 504
 505/*
 506 * module entry
 507 */
 508
 509static int __init snd_mem_init(void)
 510{
 511#ifdef CONFIG_PROC_FS
 512        snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
 513                                   &snd_mem_proc_fops);
 514#endif
 515        return 0;
 516}
 517
 518static void __exit snd_mem_exit(void)
 519{
 520        remove_proc_entry(SND_MEM_PROC_FILE, NULL);
 521        free_all_reserved_pages();
 522        if (snd_allocated_pages > 0)
 523                printk(KERN_ERR "snd-malloc: Memory leak?  pages not freed = %li\n", snd_allocated_pages);
 524}
 525
 526
 527module_init(snd_mem_init)
 528module_exit(snd_mem_exit)
 529
 530
 531/*
 532 * exports
 533 */
 534EXPORT_SYMBOL(snd_dma_alloc_pages);
 535EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
 536EXPORT_SYMBOL(snd_dma_free_pages);
 537
 538EXPORT_SYMBOL(snd_dma_get_reserved_buf);
 539EXPORT_SYMBOL(snd_dma_reserve_buf);
 540
 541EXPORT_SYMBOL(snd_malloc_pages);
 542EXPORT_SYMBOL(snd_free_pages);
 543