linux/sound/core/memalloc.c
<<
>>
Prefs
   1/*
   2 *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
   3 *                   Takashi Iwai <tiwai@suse.de>
   4 * 
   5 *  Generic memory allocators
   6 *
   7 *
   8 *   This program is free software; you can redistribute it and/or modify
   9 *   it under the terms of the GNU General Public License as published by
  10 *   the Free Software Foundation; either version 2 of the License, or
  11 *   (at your option) any later version.
  12 *
  13 *   This program is distributed in the hope that it will be useful,
  14 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 *   GNU General Public License for more details.
  17 *
  18 *   You should have received a copy of the GNU General Public License
  19 *   along with this program; if not, write to the Free Software
  20 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  21 *
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/proc_fs.h>
  26#include <linux/init.h>
  27#include <linux/pci.h>
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/seq_file.h>
  31#include <asm/uaccess.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/moduleparam.h>
  34#include <linux/mutex.h>
  35#include <sound/memalloc.h>
  36#ifdef CONFIG_SBUS
  37#include <asm/sbus.h>
  38#endif
  39
  40
  41MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
  42MODULE_DESCRIPTION("Memory allocator for ALSA system.");
  43MODULE_LICENSE("GPL");
  44
  45
  46/*
  47 */
  48
  49void *snd_malloc_sgbuf_pages(struct device *device,
  50                             size_t size, struct snd_dma_buffer *dmab,
  51                             size_t *res_size);
  52int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab);
  53
  54/*
  55 */
  56
  57static DEFINE_MUTEX(list_mutex);
  58static LIST_HEAD(mem_list_head);
  59
  60/* buffer preservation list */
  61struct snd_mem_list {
  62        struct snd_dma_buffer buffer;
  63        unsigned int id;
  64        struct list_head list;
  65};
  66
  67/* id for pre-allocated buffers */
  68#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
  69
  70#ifdef CONFIG_SND_DEBUG
  71#define __ASTRING__(x) #x
  72#define snd_assert(expr, args...) do {\
  73        if (!(expr)) {\
  74                printk(KERN_ERR "snd-malloc: BUG? (%s) (called from %p)\n", __ASTRING__(expr), __builtin_return_address(0));\
  75                args;\
  76        }\
  77} while (0)
  78#else
  79#define snd_assert(expr, args...) /**/
  80#endif
  81
  82/*
  83 *
  84 *  Generic memory allocators
  85 *
  86 */
  87
  88static long snd_allocated_pages; /* holding the number of allocated pages */
  89
  90static inline void inc_snd_pages(int order)
  91{
  92        snd_allocated_pages += 1 << order;
  93}
  94
  95static inline void dec_snd_pages(int order)
  96{
  97        snd_allocated_pages -= 1 << order;
  98}
  99
 100/**
 101 * snd_malloc_pages - allocate pages with the given size
 102 * @size: the size to allocate in bytes
 103 * @gfp_flags: the allocation conditions, GFP_XXX
 104 *
 105 * Allocates the physically contiguous pages with the given size.
 106 *
 107 * Returns the pointer of the buffer, or NULL if no enoguh memory.
 108 */
 109void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
 110{
 111        int pg;
 112        void *res;
 113
 114        snd_assert(size > 0, return NULL);
 115        snd_assert(gfp_flags != 0, return NULL);
 116        gfp_flags |= __GFP_COMP;        /* compound page lets parts be mapped */
 117        pg = get_order(size);
 118        if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
 119                inc_snd_pages(pg);
 120        return res;
 121}
 122
 123/**
 124 * snd_free_pages - release the pages
 125 * @ptr: the buffer pointer to release
 126 * @size: the allocated buffer size
 127 *
 128 * Releases the buffer allocated via snd_malloc_pages().
 129 */
 130void snd_free_pages(void *ptr, size_t size)
 131{
 132        int pg;
 133
 134        if (ptr == NULL)
 135                return;
 136        pg = get_order(size);
 137        dec_snd_pages(pg);
 138        free_pages((unsigned long) ptr, pg);
 139}
 140
 141/*
 142 *
 143 *  Bus-specific memory allocators
 144 *
 145 */
 146
 147#ifdef CONFIG_HAS_DMA
 148/* allocate the coherent DMA pages */
 149static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
 150{
 151        int pg;
 152        void *res;
 153        gfp_t gfp_flags;
 154
 155        snd_assert(size > 0, return NULL);
 156        snd_assert(dma != NULL, return NULL);
 157        pg = get_order(size);
 158        gfp_flags = GFP_KERNEL
 159                | __GFP_COMP    /* compound page lets parts be mapped */
 160                | __GFP_NORETRY /* don't trigger OOM-killer */
 161                | __GFP_NOWARN; /* no stack trace print - this call is non-critical */
 162        res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
 163        if (res != NULL)
 164                inc_snd_pages(pg);
 165
 166        return res;
 167}
 168
 169/* free the coherent DMA pages */
 170static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
 171                               dma_addr_t dma)
 172{
 173        int pg;
 174
 175        if (ptr == NULL)
 176                return;
 177        pg = get_order(size);
 178        dec_snd_pages(pg);
 179        dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
 180}
 181#endif /* CONFIG_HAS_DMA */
 182
 183#ifdef CONFIG_SBUS
 184
 185static void *snd_malloc_sbus_pages(struct device *dev, size_t size,
 186                                   dma_addr_t *dma_addr)
 187{
 188        struct sbus_dev *sdev = (struct sbus_dev *)dev;
 189        int pg;
 190        void *res;
 191
 192        snd_assert(size > 0, return NULL);
 193        snd_assert(dma_addr != NULL, return NULL);
 194        pg = get_order(size);
 195        res = sbus_alloc_consistent(sdev, PAGE_SIZE * (1 << pg), dma_addr);
 196        if (res != NULL)
 197                inc_snd_pages(pg);
 198        return res;
 199}
 200
 201static void snd_free_sbus_pages(struct device *dev, size_t size,
 202                                void *ptr, dma_addr_t dma_addr)
 203{
 204        struct sbus_dev *sdev = (struct sbus_dev *)dev;
 205        int pg;
 206
 207        if (ptr == NULL)
 208                return;
 209        pg = get_order(size);
 210        dec_snd_pages(pg);
 211        sbus_free_consistent(sdev, PAGE_SIZE * (1 << pg), ptr, dma_addr);
 212}
 213
 214#endif /* CONFIG_SBUS */
 215
 216/*
 217 *
 218 *  ALSA generic memory management
 219 *
 220 */
 221
 222
 223/**
 224 * snd_dma_alloc_pages - allocate the buffer area according to the given type
 225 * @type: the DMA buffer type
 226 * @device: the device pointer
 227 * @size: the buffer size to allocate
 228 * @dmab: buffer allocation record to store the allocated data
 229 *
 230 * Calls the memory-allocator function for the corresponding
 231 * buffer type.
 232 * 
 233 * Returns zero if the buffer with the given size is allocated successfuly,
 234 * other a negative value at error.
 235 */
 236int snd_dma_alloc_pages(int type, struct device *device, size_t size,
 237                        struct snd_dma_buffer *dmab)
 238{
 239        snd_assert(size > 0, return -ENXIO);
 240        snd_assert(dmab != NULL, return -ENXIO);
 241
 242        dmab->dev.type = type;
 243        dmab->dev.dev = device;
 244        dmab->bytes = 0;
 245        switch (type) {
 246        case SNDRV_DMA_TYPE_CONTINUOUS:
 247                dmab->area = snd_malloc_pages(size, (unsigned long)device);
 248                dmab->addr = 0;
 249                break;
 250#ifdef CONFIG_SBUS
 251        case SNDRV_DMA_TYPE_SBUS:
 252                dmab->area = snd_malloc_sbus_pages(device, size, &dmab->addr);
 253                break;
 254#endif
 255#ifdef CONFIG_HAS_DMA
 256        case SNDRV_DMA_TYPE_DEV:
 257                dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
 258                break;
 259        case SNDRV_DMA_TYPE_DEV_SG:
 260                snd_malloc_sgbuf_pages(device, size, dmab, NULL);
 261                break;
 262#endif
 263        default:
 264                printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
 265                dmab->area = NULL;
 266                dmab->addr = 0;
 267                return -ENXIO;
 268        }
 269        if (! dmab->area)
 270                return -ENOMEM;
 271        dmab->bytes = size;
 272        return 0;
 273}
 274
 275/**
 276 * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
 277 * @type: the DMA buffer type
 278 * @device: the device pointer
 279 * @size: the buffer size to allocate
 280 * @dmab: buffer allocation record to store the allocated data
 281 *
 282 * Calls the memory-allocator function for the corresponding
 283 * buffer type.  When no space is left, this function reduces the size and
 284 * tries to allocate again.  The size actually allocated is stored in
 285 * res_size argument.
 286 * 
 287 * Returns zero if the buffer with the given size is allocated successfuly,
 288 * other a negative value at error.
 289 */
 290int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
 291                                 struct snd_dma_buffer *dmab)
 292{
 293        int err;
 294
 295        snd_assert(size > 0, return -ENXIO);
 296        snd_assert(dmab != NULL, return -ENXIO);
 297
 298        while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
 299                if (err != -ENOMEM)
 300                        return err;
 301                size >>= 1;
 302                if (size <= PAGE_SIZE)
 303                        return -ENOMEM;
 304        }
 305        if (! dmab->area)
 306                return -ENOMEM;
 307        return 0;
 308}
 309
 310
 311/**
 312 * snd_dma_free_pages - release the allocated buffer
 313 * @dmab: the buffer allocation record to release
 314 *
 315 * Releases the allocated buffer via snd_dma_alloc_pages().
 316 */
 317void snd_dma_free_pages(struct snd_dma_buffer *dmab)
 318{
 319        switch (dmab->dev.type) {
 320        case SNDRV_DMA_TYPE_CONTINUOUS:
 321                snd_free_pages(dmab->area, dmab->bytes);
 322                break;
 323#ifdef CONFIG_SBUS
 324        case SNDRV_DMA_TYPE_SBUS:
 325                snd_free_sbus_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 326                break;
 327#endif
 328#ifdef CONFIG_HAS_DMA
 329        case SNDRV_DMA_TYPE_DEV:
 330                snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
 331                break;
 332        case SNDRV_DMA_TYPE_DEV_SG:
 333                snd_free_sgbuf_pages(dmab);
 334                break;
 335#endif
 336        default:
 337                printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
 338        }
 339}
 340
 341
 342/**
 343 * snd_dma_get_reserved - get the reserved buffer for the given device
 344 * @dmab: the buffer allocation record to store
 345 * @id: the buffer id
 346 *
 347 * Looks for the reserved-buffer list and re-uses if the same buffer
 348 * is found in the list.  When the buffer is found, it's removed from the free list.
 349 *
 350 * Returns the size of buffer if the buffer is found, or zero if not found.
 351 */
 352size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
 353{
 354        struct snd_mem_list *mem;
 355
 356        snd_assert(dmab, return 0);
 357
 358        mutex_lock(&list_mutex);
 359        list_for_each_entry(mem, &mem_list_head, list) {
 360                if (mem->id == id &&
 361                    (mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
 362                     ! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
 363                        struct device *dev = dmab->dev.dev;
 364                        list_del(&mem->list);
 365                        *dmab = mem->buffer;
 366                        if (dmab->dev.dev == NULL)
 367                                dmab->dev.dev = dev;
 368                        kfree(mem);
 369                        mutex_unlock(&list_mutex);
 370                        return dmab->bytes;
 371                }
 372        }
 373        mutex_unlock(&list_mutex);
 374        return 0;
 375}
 376
 377/**
 378 * snd_dma_reserve_buf - reserve the buffer
 379 * @dmab: the buffer to reserve
 380 * @id: the buffer id
 381 *
 382 * Reserves the given buffer as a reserved buffer.
 383 * 
 384 * Returns zero if successful, or a negative code at error.
 385 */
 386int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
 387{
 388        struct snd_mem_list *mem;
 389
 390        snd_assert(dmab, return -EINVAL);
 391        mem = kmalloc(sizeof(*mem), GFP_KERNEL);
 392        if (! mem)
 393                return -ENOMEM;
 394        mutex_lock(&list_mutex);
 395        mem->buffer = *dmab;
 396        mem->id = id;
 397        list_add_tail(&mem->list, &mem_list_head);
 398        mutex_unlock(&list_mutex);
 399        return 0;
 400}
 401
 402/*
 403 * purge all reserved buffers
 404 */
 405static void free_all_reserved_pages(void)
 406{
 407        struct list_head *p;
 408        struct snd_mem_list *mem;
 409
 410        mutex_lock(&list_mutex);
 411        while (! list_empty(&mem_list_head)) {
 412                p = mem_list_head.next;
 413                mem = list_entry(p, struct snd_mem_list, list);
 414                list_del(p);
 415                snd_dma_free_pages(&mem->buffer);
 416                kfree(mem);
 417        }
 418        mutex_unlock(&list_mutex);
 419}
 420
 421
 422#ifdef CONFIG_PROC_FS
 423/*
 424 * proc file interface
 425 */
 426#define SND_MEM_PROC_FILE       "driver/snd-page-alloc"
 427static struct proc_dir_entry *snd_mem_proc;
 428
 429static int snd_mem_proc_read(struct seq_file *seq, void *offset)
 430{
 431        long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
 432        struct snd_mem_list *mem;
 433        int devno;
 434        static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG", "SBUS" };
 435
 436        mutex_lock(&list_mutex);
 437        seq_printf(seq, "pages  : %li bytes (%li pages per %likB)\n",
 438                   pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
 439        devno = 0;
 440        list_for_each_entry(mem, &mem_list_head, list) {
 441                devno++;
 442                seq_printf(seq, "buffer %d : ID %08x : type %s\n",
 443                           devno, mem->id, types[mem->buffer.dev.type]);
 444                seq_printf(seq, "  addr = 0x%lx, size = %d bytes\n",
 445                           (unsigned long)mem->buffer.addr,
 446                           (int)mem->buffer.bytes);
 447        }
 448        mutex_unlock(&list_mutex);
 449        return 0;
 450}
 451
 452static int snd_mem_proc_open(struct inode *inode, struct file *file)
 453{
 454        return single_open(file, snd_mem_proc_read, NULL);
 455}
 456
 457/* FIXME: for pci only - other bus? */
 458#ifdef CONFIG_PCI
 459#define gettoken(bufp) strsep(bufp, " \t\n")
 460
 461static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
 462                                  size_t count, loff_t * ppos)
 463{
 464        char buf[128];
 465        char *token, *p;
 466
 467        if (count > sizeof(buf) - 1)
 468                return -EINVAL;
 469        if (copy_from_user(buf, buffer, count))
 470                return -EFAULT;
 471        buf[count] = '\0';
 472
 473        p = buf;
 474        token = gettoken(&p);
 475        if (! token || *token == '#')
 476                return count;
 477        if (strcmp(token, "add") == 0) {
 478                char *endp;
 479                int vendor, device, size, buffers;
 480                long mask;
 481                int i, alloced;
 482                struct pci_dev *pci;
 483
 484                if ((token = gettoken(&p)) == NULL ||
 485                    (vendor = simple_strtol(token, NULL, 0)) <= 0 ||
 486                    (token = gettoken(&p)) == NULL ||
 487                    (device = simple_strtol(token, NULL, 0)) <= 0 ||
 488                    (token = gettoken(&p)) == NULL ||
 489                    (mask = simple_strtol(token, NULL, 0)) < 0 ||
 490                    (token = gettoken(&p)) == NULL ||
 491                    (size = memparse(token, &endp)) < 64*1024 ||
 492                    size > 16*1024*1024 /* too big */ ||
 493                    (token = gettoken(&p)) == NULL ||
 494                    (buffers = simple_strtol(token, NULL, 0)) <= 0 ||
 495                    buffers > 4) {
 496                        printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
 497                        return count;
 498                }
 499                vendor &= 0xffff;
 500                device &= 0xffff;
 501
 502                alloced = 0;
 503                pci = NULL;
 504                while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
 505                        if (mask > 0 && mask < 0xffffffff) {
 506                                if (pci_set_dma_mask(pci, mask) < 0 ||
 507                                    pci_set_consistent_dma_mask(pci, mask) < 0) {
 508                                        printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
 509                                        pci_dev_put(pci);
 510                                        return count;
 511                                }
 512                        }
 513                        for (i = 0; i < buffers; i++) {
 514                                struct snd_dma_buffer dmab;
 515                                memset(&dmab, 0, sizeof(dmab));
 516                                if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
 517                                                        size, &dmab) < 0) {
 518                                        printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
 519                                        pci_dev_put(pci);
 520                                        return count;
 521                                }
 522                                snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
 523                        }
 524                        alloced++;
 525                }
 526                if (! alloced) {
 527                        for (i = 0; i < buffers; i++) {
 528                                struct snd_dma_buffer dmab;
 529                                memset(&dmab, 0, sizeof(dmab));
 530                                /* FIXME: We can allocate only in ZONE_DMA
 531                                 * without a device pointer!
 532                                 */
 533                                if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
 534                                                        size, &dmab) < 0) {
 535                                        printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
 536                                        break;
 537                                }
 538                                snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
 539                        }
 540                }
 541        } else if (strcmp(token, "erase") == 0)
 542                /* FIXME: need for releasing each buffer chunk? */
 543                free_all_reserved_pages();
 544        else
 545                printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
 546        return count;
 547}
 548#endif /* CONFIG_PCI */
 549
 550static const struct file_operations snd_mem_proc_fops = {
 551        .owner          = THIS_MODULE,
 552        .open           = snd_mem_proc_open,
 553        .read           = seq_read,
 554#ifdef CONFIG_PCI
 555        .write          = snd_mem_proc_write,
 556#endif
 557        .llseek         = seq_lseek,
 558        .release        = single_release,
 559};
 560
 561#endif /* CONFIG_PROC_FS */
 562
 563/*
 564 * module entry
 565 */
 566
 567static int __init snd_mem_init(void)
 568{
 569#ifdef CONFIG_PROC_FS
 570        snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
 571                                   &snd_mem_proc_fops);
 572#endif
 573        return 0;
 574}
 575
 576static void __exit snd_mem_exit(void)
 577{
 578        remove_proc_entry(SND_MEM_PROC_FILE, NULL);
 579        free_all_reserved_pages();
 580        if (snd_allocated_pages > 0)
 581                printk(KERN_ERR "snd-malloc: Memory leak?  pages not freed = %li\n", snd_allocated_pages);
 582}
 583
 584
 585module_init(snd_mem_init)
 586module_exit(snd_mem_exit)
 587
 588
 589/*
 590 * exports
 591 */
 592EXPORT_SYMBOL(snd_dma_alloc_pages);
 593EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
 594EXPORT_SYMBOL(snd_dma_free_pages);
 595
 596EXPORT_SYMBOL(snd_dma_get_reserved_buf);
 597EXPORT_SYMBOL(snd_dma_reserve_buf);
 598
 599EXPORT_SYMBOL(snd_malloc_pages);
 600EXPORT_SYMBOL(snd_free_pages);
 601