linux/mm/vmalloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/vmalloc.c
   3 *
   4 *  Copyright (C) 1993  Linus Torvalds
   5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   8 *  Numa awareness, Christoph Lameter, SGI, June 2005
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/slab.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/proc_fs.h>
  19#include <linux/seq_file.h>
  20#include <linux/debugobjects.h>
  21#include <linux/kallsyms.h>
  22#include <linux/list.h>
  23#include <linux/rbtree.h>
  24#include <linux/radix-tree.h>
  25#include <linux/rcupdate.h>
  26#include <linux/bootmem.h>
  27
  28#include <asm/atomic.h>
  29#include <asm/uaccess.h>
  30#include <asm/tlbflush.h>
  31
  32
  33/*** Page table manipulation functions ***/
  34
  35static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  36{
  37        pte_t *pte;
  38
  39        pte = pte_offset_kernel(pmd, addr);
  40        do {
  41                pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  42                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  43        } while (pte++, addr += PAGE_SIZE, addr != end);
  44}
  45
  46static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  47{
  48        pmd_t *pmd;
  49        unsigned long next;
  50
  51        pmd = pmd_offset(pud, addr);
  52        do {
  53                next = pmd_addr_end(addr, end);
  54                if (pmd_none_or_clear_bad(pmd))
  55                        continue;
  56                vunmap_pte_range(pmd, addr, next);
  57        } while (pmd++, addr = next, addr != end);
  58}
  59
  60static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
  61{
  62        pud_t *pud;
  63        unsigned long next;
  64
  65        pud = pud_offset(pgd, addr);
  66        do {
  67                next = pud_addr_end(addr, end);
  68                if (pud_none_or_clear_bad(pud))
  69                        continue;
  70                vunmap_pmd_range(pud, addr, next);
  71        } while (pud++, addr = next, addr != end);
  72}
  73
  74static void vunmap_page_range(unsigned long addr, unsigned long end)
  75{
  76        pgd_t *pgd;
  77        unsigned long next;
  78
  79        BUG_ON(addr >= end);
  80        pgd = pgd_offset_k(addr);
  81        do {
  82                next = pgd_addr_end(addr, end);
  83                if (pgd_none_or_clear_bad(pgd))
  84                        continue;
  85                vunmap_pud_range(pgd, addr, next);
  86        } while (pgd++, addr = next, addr != end);
  87}
  88
  89static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  90                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  91{
  92        pte_t *pte;
  93
  94        /*
  95         * nr is a running index into the array which helps higher level
  96         * callers keep track of where we're up to.
  97         */
  98
  99        pte = pte_alloc_kernel(pmd, addr);
 100        if (!pte)
 101                return -ENOMEM;
 102        do {
 103                struct page *page = pages[*nr];
 104
 105                if (WARN_ON(!pte_none(*pte)))
 106                        return -EBUSY;
 107                if (WARN_ON(!page))
 108                        return -ENOMEM;
 109                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 110                (*nr)++;
 111        } while (pte++, addr += PAGE_SIZE, addr != end);
 112        return 0;
 113}
 114
 115static int vmap_pmd_range(pud_t *pud, unsigned long addr,
 116                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 117{
 118        pmd_t *pmd;
 119        unsigned long next;
 120
 121        pmd = pmd_alloc(&init_mm, pud, addr);
 122        if (!pmd)
 123                return -ENOMEM;
 124        do {
 125                next = pmd_addr_end(addr, end);
 126                if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 127                        return -ENOMEM;
 128        } while (pmd++, addr = next, addr != end);
 129        return 0;
 130}
 131
 132static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
 133                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 134{
 135        pud_t *pud;
 136        unsigned long next;
 137
 138        pud = pud_alloc(&init_mm, pgd, addr);
 139        if (!pud)
 140                return -ENOMEM;
 141        do {
 142                next = pud_addr_end(addr, end);
 143                if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 144                        return -ENOMEM;
 145        } while (pud++, addr = next, addr != end);
 146        return 0;
 147}
 148
 149/*
 150 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 151 * will have pfns corresponding to the "pages" array.
 152 *
 153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 154 */
 155static int vmap_page_range(unsigned long start, unsigned long end,
 156                                pgprot_t prot, struct page **pages)
 157{
 158        pgd_t *pgd;
 159        unsigned long next;
 160        unsigned long addr = start;
 161        int err = 0;
 162        int nr = 0;
 163
 164        BUG_ON(addr >= end);
 165        pgd = pgd_offset_k(addr);
 166        do {
 167                next = pgd_addr_end(addr, end);
 168                err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
 169                if (err)
 170                        break;
 171        } while (pgd++, addr = next, addr != end);
 172        flush_cache_vmap(start, end);
 173
 174        if (unlikely(err))
 175                return err;
 176        return nr;
 177}
 178
 179static inline int is_vmalloc_or_module_addr(const void *x)
 180{
 181        /*
 182         * ARM, x86-64 and sparc64 put modules in a special place,
 183         * and fall back on vmalloc() if that fails. Others
 184         * just put it in the vmalloc space.
 185         */
 186#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 187        unsigned long addr = (unsigned long)x;
 188        if (addr >= MODULES_VADDR && addr < MODULES_END)
 189                return 1;
 190#endif
 191        return is_vmalloc_addr(x);
 192}
 193
 194/*
 195 * Walk a vmap address to the struct page it maps.
 196 */
 197struct page *vmalloc_to_page(const void *vmalloc_addr)
 198{
 199        unsigned long addr = (unsigned long) vmalloc_addr;
 200        struct page *page = NULL;
 201        pgd_t *pgd = pgd_offset_k(addr);
 202
 203        /*
 204         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 205         * architectures that do not vmalloc module space
 206         */
 207        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 208
 209        if (!pgd_none(*pgd)) {
 210                pud_t *pud = pud_offset(pgd, addr);
 211                if (!pud_none(*pud)) {
 212                        pmd_t *pmd = pmd_offset(pud, addr);
 213                        if (!pmd_none(*pmd)) {
 214                                pte_t *ptep, pte;
 215
 216                                ptep = pte_offset_map(pmd, addr);
 217                                pte = *ptep;
 218                                if (pte_present(pte))
 219                                        page = pte_page(pte);
 220                                pte_unmap(ptep);
 221                        }
 222                }
 223        }
 224        return page;
 225}
 226EXPORT_SYMBOL(vmalloc_to_page);
 227
 228/*
 229 * Map a vmalloc()-space virtual address to the physical page frame number.
 230 */
 231unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 232{
 233        return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 234}
 235EXPORT_SYMBOL(vmalloc_to_pfn);
 236
 237
 238/*** Global kva allocator ***/
 239
 240#define VM_LAZY_FREE    0x01
 241#define VM_LAZY_FREEING 0x02
 242#define VM_VM_AREA      0x04
 243
 244struct vmap_area {
 245        unsigned long va_start;
 246        unsigned long va_end;
 247        unsigned long flags;
 248        struct rb_node rb_node;         /* address sorted rbtree */
 249        struct list_head list;          /* address sorted list */
 250        struct list_head purge_list;    /* "lazy purge" list */
 251        void *private;
 252        struct rcu_head rcu_head;
 253};
 254
 255static DEFINE_SPINLOCK(vmap_area_lock);
 256static struct rb_root vmap_area_root = RB_ROOT;
 257static LIST_HEAD(vmap_area_list);
 258
 259static struct vmap_area *__find_vmap_area(unsigned long addr)
 260{
 261        struct rb_node *n = vmap_area_root.rb_node;
 262
 263        while (n) {
 264                struct vmap_area *va;
 265
 266                va = rb_entry(n, struct vmap_area, rb_node);
 267                if (addr < va->va_start)
 268                        n = n->rb_left;
 269                else if (addr > va->va_start)
 270                        n = n->rb_right;
 271                else
 272                        return va;
 273        }
 274
 275        return NULL;
 276}
 277
 278static void __insert_vmap_area(struct vmap_area *va)
 279{
 280        struct rb_node **p = &vmap_area_root.rb_node;
 281        struct rb_node *parent = NULL;
 282        struct rb_node *tmp;
 283
 284        while (*p) {
 285                struct vmap_area *tmp;
 286
 287                parent = *p;
 288                tmp = rb_entry(parent, struct vmap_area, rb_node);
 289                if (va->va_start < tmp->va_end)
 290                        p = &(*p)->rb_left;
 291                else if (va->va_end > tmp->va_start)
 292                        p = &(*p)->rb_right;
 293                else
 294                        BUG();
 295        }
 296
 297        rb_link_node(&va->rb_node, parent, p);
 298        rb_insert_color(&va->rb_node, &vmap_area_root);
 299
 300        /* address-sort this list so it is usable like the vmlist */
 301        tmp = rb_prev(&va->rb_node);
 302        if (tmp) {
 303                struct vmap_area *prev;
 304                prev = rb_entry(tmp, struct vmap_area, rb_node);
 305                list_add_rcu(&va->list, &prev->list);
 306        } else
 307                list_add_rcu(&va->list, &vmap_area_list);
 308}
 309
 310static void purge_vmap_area_lazy(void);
 311
 312/*
 313 * Allocate a region of KVA of the specified size and alignment, within the
 314 * vstart and vend.
 315 */
 316static struct vmap_area *alloc_vmap_area(unsigned long size,
 317                                unsigned long align,
 318                                unsigned long vstart, unsigned long vend,
 319                                int node, gfp_t gfp_mask)
 320{
 321        struct vmap_area *va;
 322        struct rb_node *n;
 323        unsigned long addr;
 324        int purged = 0;
 325
 326        BUG_ON(!size);
 327        BUG_ON(size & ~PAGE_MASK);
 328
 329        va = kmalloc_node(sizeof(struct vmap_area),
 330                        gfp_mask & GFP_RECLAIM_MASK, node);
 331        if (unlikely(!va))
 332                return ERR_PTR(-ENOMEM);
 333
 334retry:
 335        addr = ALIGN(vstart, align);
 336
 337        spin_lock(&vmap_area_lock);
 338        if (addr + size - 1 < addr)
 339                goto overflow;
 340
 341        /* XXX: could have a last_hole cache */
 342        n = vmap_area_root.rb_node;
 343        if (n) {
 344                struct vmap_area *first = NULL;
 345
 346                do {
 347                        struct vmap_area *tmp;
 348                        tmp = rb_entry(n, struct vmap_area, rb_node);
 349                        if (tmp->va_end >= addr) {
 350                                if (!first && tmp->va_start < addr + size)
 351                                        first = tmp;
 352                                n = n->rb_left;
 353                        } else {
 354                                first = tmp;
 355                                n = n->rb_right;
 356                        }
 357                } while (n);
 358
 359                if (!first)
 360                        goto found;
 361
 362                if (first->va_end < addr) {
 363                        n = rb_next(&first->rb_node);
 364                        if (n)
 365                                first = rb_entry(n, struct vmap_area, rb_node);
 366                        else
 367                                goto found;
 368                }
 369
 370                while (addr + size > first->va_start && addr + size <= vend) {
 371                        addr = ALIGN(first->va_end + PAGE_SIZE, align);
 372                        if (addr + size - 1 < addr)
 373                                goto overflow;
 374
 375                        n = rb_next(&first->rb_node);
 376                        if (n)
 377                                first = rb_entry(n, struct vmap_area, rb_node);
 378                        else
 379                                goto found;
 380                }
 381        }
 382found:
 383        if (addr + size > vend) {
 384overflow:
 385                spin_unlock(&vmap_area_lock);
 386                if (!purged) {
 387                        purge_vmap_area_lazy();
 388                        purged = 1;
 389                        goto retry;
 390                }
 391                if (printk_ratelimit())
 392                        printk(KERN_WARNING
 393                                "vmap allocation for size %lu failed: "
 394                                "use vmalloc=<size> to increase size.\n", size);
 395                return ERR_PTR(-EBUSY);
 396        }
 397
 398        BUG_ON(addr & (align-1));
 399
 400        va->va_start = addr;
 401        va->va_end = addr + size;
 402        va->flags = 0;
 403        __insert_vmap_area(va);
 404        spin_unlock(&vmap_area_lock);
 405
 406        return va;
 407}
 408
 409static void rcu_free_va(struct rcu_head *head)
 410{
 411        struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
 412
 413        kfree(va);
 414}
 415
 416static void __free_vmap_area(struct vmap_area *va)
 417{
 418        BUG_ON(RB_EMPTY_NODE(&va->rb_node));
 419        rb_erase(&va->rb_node, &vmap_area_root);
 420        RB_CLEAR_NODE(&va->rb_node);
 421        list_del_rcu(&va->list);
 422
 423        call_rcu(&va->rcu_head, rcu_free_va);
 424}
 425
 426/*
 427 * Free a region of KVA allocated by alloc_vmap_area
 428 */
 429static void free_vmap_area(struct vmap_area *va)
 430{
 431        spin_lock(&vmap_area_lock);
 432        __free_vmap_area(va);
 433        spin_unlock(&vmap_area_lock);
 434}
 435
 436/*
 437 * Clear the pagetable entries of a given vmap_area
 438 */
 439static void unmap_vmap_area(struct vmap_area *va)
 440{
 441        vunmap_page_range(va->va_start, va->va_end);
 442}
 443
 444static void vmap_debug_free_range(unsigned long start, unsigned long end)
 445{
 446        /*
 447         * Unmap page tables and force a TLB flush immediately if
 448         * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
 449         * bugs similarly to those in linear kernel virtual address
 450         * space after a page has been freed.
 451         *
 452         * All the lazy freeing logic is still retained, in order to
 453         * minimise intrusiveness of this debugging feature.
 454         *
 455         * This is going to be *slow* (linear kernel virtual address
 456         * debugging doesn't do a broadcast TLB flush so it is a lot
 457         * faster).
 458         */
 459#ifdef CONFIG_DEBUG_PAGEALLOC
 460        vunmap_page_range(start, end);
 461        flush_tlb_kernel_range(start, end);
 462#endif
 463}
 464
 465/*
 466 * lazy_max_pages is the maximum amount of virtual address space we gather up
 467 * before attempting to purge with a TLB flush.
 468 *
 469 * There is a tradeoff here: a larger number will cover more kernel page tables
 470 * and take slightly longer to purge, but it will linearly reduce the number of
 471 * global TLB flushes that must be performed. It would seem natural to scale
 472 * this number up linearly with the number of CPUs (because vmapping activity
 473 * could also scale linearly with the number of CPUs), however it is likely
 474 * that in practice, workloads might be constrained in other ways that mean
 475 * vmap activity will not scale linearly with CPUs. Also, I want to be
 476 * conservative and not introduce a big latency on huge systems, so go with
 477 * a less aggressive log scale. It will still be an improvement over the old
 478 * code, and it will be simple to change the scale factor if we find that it
 479 * becomes a problem on bigger systems.
 480 */
 481static unsigned long lazy_max_pages(void)
 482{
 483        unsigned int log;
 484
 485        log = fls(num_online_cpus());
 486
 487        return log * (32UL * 1024 * 1024 / PAGE_SIZE);
 488}
 489
 490static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 491
 492/*
 493 * Purges all lazily-freed vmap areas.
 494 *
 495 * If sync is 0 then don't purge if there is already a purge in progress.
 496 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 497 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 498 * their own TLB flushing).
 499 * Returns with *start = min(*start, lowest purged address)
 500 *              *end = max(*end, highest purged address)
 501 */
 502static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 503                                        int sync, int force_flush)
 504{
 505        static DEFINE_SPINLOCK(purge_lock);
 506        LIST_HEAD(valist);
 507        struct vmap_area *va;
 508        struct vmap_area *n_va;
 509        int nr = 0;
 510
 511        /*
 512         * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
 513         * should not expect such behaviour. This just simplifies locking for
 514         * the case that isn't actually used at the moment anyway.
 515         */
 516        if (!sync && !force_flush) {
 517                if (!spin_trylock(&purge_lock))
 518                        return;
 519        } else
 520                spin_lock(&purge_lock);
 521
 522        rcu_read_lock();
 523        list_for_each_entry_rcu(va, &vmap_area_list, list) {
 524                if (va->flags & VM_LAZY_FREE) {
 525                        if (va->va_start < *start)
 526                                *start = va->va_start;
 527                        if (va->va_end > *end)
 528                                *end = va->va_end;
 529                        nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
 530                        unmap_vmap_area(va);
 531                        list_add_tail(&va->purge_list, &valist);
 532                        va->flags |= VM_LAZY_FREEING;
 533                        va->flags &= ~VM_LAZY_FREE;
 534                }
 535        }
 536        rcu_read_unlock();
 537
 538        if (nr) {
 539                BUG_ON(nr > atomic_read(&vmap_lazy_nr));
 540                atomic_sub(nr, &vmap_lazy_nr);
 541        }
 542
 543        if (nr || force_flush)
 544                flush_tlb_kernel_range(*start, *end);
 545
 546        if (nr) {
 547                spin_lock(&vmap_area_lock);
 548                list_for_each_entry_safe(va, n_va, &valist, purge_list)
 549                        __free_vmap_area(va);
 550                spin_unlock(&vmap_area_lock);
 551        }
 552        spin_unlock(&purge_lock);
 553}
 554
 555/*
 556 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 557 * is already purging.
 558 */
 559static void try_purge_vmap_area_lazy(void)
 560{
 561        unsigned long start = ULONG_MAX, end = 0;
 562
 563        __purge_vmap_area_lazy(&start, &end, 0, 0);
 564}
 565
 566/*
 567 * Kick off a purge of the outstanding lazy areas.
 568 */
 569static void purge_vmap_area_lazy(void)
 570{
 571        unsigned long start = ULONG_MAX, end = 0;
 572
 573        __purge_vmap_area_lazy(&start, &end, 1, 0);
 574}
 575
 576/*
 577 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 578 * called for the correct range previously.
 579 */
 580static void free_unmap_vmap_area_noflush(struct vmap_area *va)
 581{
 582        va->flags |= VM_LAZY_FREE;
 583        atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
 584        if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
 585                try_purge_vmap_area_lazy();
 586}
 587
 588/*
 589 * Free and unmap a vmap area
 590 */
 591static void free_unmap_vmap_area(struct vmap_area *va)
 592{
 593        flush_cache_vunmap(va->va_start, va->va_end);
 594        free_unmap_vmap_area_noflush(va);
 595}
 596
 597static struct vmap_area *find_vmap_area(unsigned long addr)
 598{
 599        struct vmap_area *va;
 600
 601        spin_lock(&vmap_area_lock);
 602        va = __find_vmap_area(addr);
 603        spin_unlock(&vmap_area_lock);
 604
 605        return va;
 606}
 607
 608static void free_unmap_vmap_area_addr(unsigned long addr)
 609{
 610        struct vmap_area *va;
 611
 612        va = find_vmap_area(addr);
 613        BUG_ON(!va);
 614        free_unmap_vmap_area(va);
 615}
 616
 617
 618/*** Per cpu kva allocator ***/
 619
 620/*
 621 * vmap space is limited especially on 32 bit architectures. Ensure there is
 622 * room for at least 16 percpu vmap blocks per CPU.
 623 */
 624/*
 625 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 626 * to #define VMALLOC_SPACE             (VMALLOC_END-VMALLOC_START). Guess
 627 * instead (we just need a rough idea)
 628 */
 629#if BITS_PER_LONG == 32
 630#define VMALLOC_SPACE           (128UL*1024*1024)
 631#else
 632#define VMALLOC_SPACE           (128UL*1024*1024*1024)
 633#endif
 634
 635#define VMALLOC_PAGES           (VMALLOC_SPACE / PAGE_SIZE)
 636#define VMAP_MAX_ALLOC          BITS_PER_LONG   /* 256K with 4K pages */
 637#define VMAP_BBMAP_BITS_MAX     1024    /* 4MB with 4K pages */
 638#define VMAP_BBMAP_BITS_MIN     (VMAP_MAX_ALLOC*2)
 639#define VMAP_MIN(x, y)          ((x) < (y) ? (x) : (y)) /* can't use min() */
 640#define VMAP_MAX(x, y)          ((x) > (y) ? (x) : (y)) /* can't use max() */
 641#define VMAP_BBMAP_BITS         VMAP_MIN(VMAP_BBMAP_BITS_MAX,           \
 642                                        VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
 643                                                VMALLOC_PAGES / NR_CPUS / 16))
 644
 645#define VMAP_BLOCK_SIZE         (VMAP_BBMAP_BITS * PAGE_SIZE)
 646
 647static bool vmap_initialized __read_mostly = false;
 648
 649struct vmap_block_queue {
 650        spinlock_t lock;
 651        struct list_head free;
 652        struct list_head dirty;
 653        unsigned int nr_dirty;
 654};
 655
 656struct vmap_block {
 657        spinlock_t lock;
 658        struct vmap_area *va;
 659        struct vmap_block_queue *vbq;
 660        unsigned long free, dirty;
 661        DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
 662        DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
 663        union {
 664                struct {
 665                        struct list_head free_list;
 666                        struct list_head dirty_list;
 667                };
 668                struct rcu_head rcu_head;
 669        };
 670};
 671
 672/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
 673static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
 674
 675/*
 676 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 677 * in the free path. Could get rid of this if we change the API to return a
 678 * "cookie" from alloc, to be passed to free. But no big deal yet.
 679 */
 680static DEFINE_SPINLOCK(vmap_block_tree_lock);
 681static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
 682
 683/*
 684 * We should probably have a fallback mechanism to allocate virtual memory
 685 * out of partially filled vmap blocks. However vmap block sizing should be
 686 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 687 * big problem.
 688 */
 689
 690static unsigned long addr_to_vb_idx(unsigned long addr)
 691{
 692        addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
 693        addr /= VMAP_BLOCK_SIZE;
 694        return addr;
 695}
 696
 697static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
 698{
 699        struct vmap_block_queue *vbq;
 700        struct vmap_block *vb;
 701        struct vmap_area *va;
 702        unsigned long vb_idx;
 703        int node, err;
 704
 705        node = numa_node_id();
 706
 707        vb = kmalloc_node(sizeof(struct vmap_block),
 708                        gfp_mask & GFP_RECLAIM_MASK, node);
 709        if (unlikely(!vb))
 710                return ERR_PTR(-ENOMEM);
 711
 712        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 713                                        VMALLOC_START, VMALLOC_END,
 714                                        node, gfp_mask);
 715        if (unlikely(IS_ERR(va))) {
 716                kfree(vb);
 717                return ERR_PTR(PTR_ERR(va));
 718        }
 719
 720        err = radix_tree_preload(gfp_mask);
 721        if (unlikely(err)) {
 722                kfree(vb);
 723                free_vmap_area(va);
 724                return ERR_PTR(err);
 725        }
 726
 727        spin_lock_init(&vb->lock);
 728        vb->va = va;
 729        vb->free = VMAP_BBMAP_BITS;
 730        vb->dirty = 0;
 731        bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
 732        bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
 733        INIT_LIST_HEAD(&vb->free_list);
 734        INIT_LIST_HEAD(&vb->dirty_list);
 735
 736        vb_idx = addr_to_vb_idx(va->va_start);
 737        spin_lock(&vmap_block_tree_lock);
 738        err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
 739        spin_unlock(&vmap_block_tree_lock);
 740        BUG_ON(err);
 741        radix_tree_preload_end();
 742
 743        vbq = &get_cpu_var(vmap_block_queue);
 744        vb->vbq = vbq;
 745        spin_lock(&vbq->lock);
 746        list_add(&vb->free_list, &vbq->free);
 747        spin_unlock(&vbq->lock);
 748        put_cpu_var(vmap_cpu_blocks);
 749
 750        return vb;
 751}
 752
 753static void rcu_free_vb(struct rcu_head *head)
 754{
 755        struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
 756
 757        kfree(vb);
 758}
 759
 760static void free_vmap_block(struct vmap_block *vb)
 761{
 762        struct vmap_block *tmp;
 763        unsigned long vb_idx;
 764
 765        spin_lock(&vb->vbq->lock);
 766        if (!list_empty(&vb->free_list))
 767                list_del(&vb->free_list);
 768        if (!list_empty(&vb->dirty_list))
 769                list_del(&vb->dirty_list);
 770        spin_unlock(&vb->vbq->lock);
 771
 772        vb_idx = addr_to_vb_idx(vb->va->va_start);
 773        spin_lock(&vmap_block_tree_lock);
 774        tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
 775        spin_unlock(&vmap_block_tree_lock);
 776        BUG_ON(tmp != vb);
 777
 778        free_unmap_vmap_area_noflush(vb->va);
 779        call_rcu(&vb->rcu_head, rcu_free_vb);
 780}
 781
 782static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 783{
 784        struct vmap_block_queue *vbq;
 785        struct vmap_block *vb;
 786        unsigned long addr = 0;
 787        unsigned int order;
 788
 789        BUG_ON(size & ~PAGE_MASK);
 790        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 791        order = get_order(size);
 792
 793again:
 794        rcu_read_lock();
 795        vbq = &get_cpu_var(vmap_block_queue);
 796        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 797                int i;
 798
 799                spin_lock(&vb->lock);
 800                i = bitmap_find_free_region(vb->alloc_map,
 801                                                VMAP_BBMAP_BITS, order);
 802
 803                if (i >= 0) {
 804                        addr = vb->va->va_start + (i << PAGE_SHIFT);
 805                        BUG_ON(addr_to_vb_idx(addr) !=
 806                                        addr_to_vb_idx(vb->va->va_start));
 807                        vb->free -= 1UL << order;
 808                        if (vb->free == 0) {
 809                                spin_lock(&vbq->lock);
 810                                list_del_init(&vb->free_list);
 811                                spin_unlock(&vbq->lock);
 812                        }
 813                        spin_unlock(&vb->lock);
 814                        break;
 815                }
 816                spin_unlock(&vb->lock);
 817        }
 818        put_cpu_var(vmap_cpu_blocks);
 819        rcu_read_unlock();
 820
 821        if (!addr) {
 822                vb = new_vmap_block(gfp_mask);
 823                if (IS_ERR(vb))
 824                        return vb;
 825                goto again;
 826        }
 827
 828        return (void *)addr;
 829}
 830
 831static void vb_free(const void *addr, unsigned long size)
 832{
 833        unsigned long offset;
 834        unsigned long vb_idx;
 835        unsigned int order;
 836        struct vmap_block *vb;
 837
 838        BUG_ON(size & ~PAGE_MASK);
 839        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 840
 841        flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
 842
 843        order = get_order(size);
 844
 845        offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
 846
 847        vb_idx = addr_to_vb_idx((unsigned long)addr);
 848        rcu_read_lock();
 849        vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
 850        rcu_read_unlock();
 851        BUG_ON(!vb);
 852
 853        spin_lock(&vb->lock);
 854        bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
 855        if (!vb->dirty) {
 856                spin_lock(&vb->vbq->lock);
 857                list_add(&vb->dirty_list, &vb->vbq->dirty);
 858                spin_unlock(&vb->vbq->lock);
 859        }
 860        vb->dirty += 1UL << order;
 861        if (vb->dirty == VMAP_BBMAP_BITS) {
 862                BUG_ON(vb->free || !list_empty(&vb->free_list));
 863                spin_unlock(&vb->lock);
 864                free_vmap_block(vb);
 865        } else
 866                spin_unlock(&vb->lock);
 867}
 868
 869/**
 870 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 871 *
 872 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 873 * to amortize TLB flushing overheads. What this means is that any page you
 874 * have now, may, in a former life, have been mapped into kernel virtual
 875 * address by the vmap layer and so there might be some CPUs with TLB entries
 876 * still referencing that page (additional to the regular 1:1 kernel mapping).
 877 *
 878 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 879 * be sure that none of the pages we have control over will have any aliases
 880 * from the vmap layer.
 881 */
 882void vm_unmap_aliases(void)
 883{
 884        unsigned long start = ULONG_MAX, end = 0;
 885        int cpu;
 886        int flush = 0;
 887
 888        if (unlikely(!vmap_initialized))
 889                return;
 890
 891        for_each_possible_cpu(cpu) {
 892                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 893                struct vmap_block *vb;
 894
 895                rcu_read_lock();
 896                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 897                        int i;
 898
 899                        spin_lock(&vb->lock);
 900                        i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
 901                        while (i < VMAP_BBMAP_BITS) {
 902                                unsigned long s, e;
 903                                int j;
 904                                j = find_next_zero_bit(vb->dirty_map,
 905                                        VMAP_BBMAP_BITS, i);
 906
 907                                s = vb->va->va_start + (i << PAGE_SHIFT);
 908                                e = vb->va->va_start + (j << PAGE_SHIFT);
 909                                vunmap_page_range(s, e);
 910                                flush = 1;
 911
 912                                if (s < start)
 913                                        start = s;
 914                                if (e > end)
 915                                        end = e;
 916
 917                                i = j;
 918                                i = find_next_bit(vb->dirty_map,
 919                                                        VMAP_BBMAP_BITS, i);
 920                        }
 921                        spin_unlock(&vb->lock);
 922                }
 923                rcu_read_unlock();
 924        }
 925
 926        __purge_vmap_area_lazy(&start, &end, 1, flush);
 927}
 928EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 929
 930/**
 931 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 932 * @mem: the pointer returned by vm_map_ram
 933 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 934 */
 935void vm_unmap_ram(const void *mem, unsigned int count)
 936{
 937        unsigned long size = count << PAGE_SHIFT;
 938        unsigned long addr = (unsigned long)mem;
 939
 940        BUG_ON(!addr);
 941        BUG_ON(addr < VMALLOC_START);
 942        BUG_ON(addr > VMALLOC_END);
 943        BUG_ON(addr & (PAGE_SIZE-1));
 944
 945        debug_check_no_locks_freed(mem, size);
 946        vmap_debug_free_range(addr, addr+size);
 947
 948        if (likely(count <= VMAP_MAX_ALLOC))
 949                vb_free(mem, size);
 950        else
 951                free_unmap_vmap_area_addr(addr);
 952}
 953EXPORT_SYMBOL(vm_unmap_ram);
 954
 955/**
 956 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 957 * @pages: an array of pointers to the pages to be mapped
 958 * @count: number of pages
 959 * @node: prefer to allocate data structures on this node
 960 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
 961 *
 962 * Returns: a pointer to the address that has been mapped, or %NULL on failure
 963 */
 964void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 965{
 966        unsigned long size = count << PAGE_SHIFT;
 967        unsigned long addr;
 968        void *mem;
 969
 970        if (likely(count <= VMAP_MAX_ALLOC)) {
 971                mem = vb_alloc(size, GFP_KERNEL);
 972                if (IS_ERR(mem))
 973                        return NULL;
 974                addr = (unsigned long)mem;
 975        } else {
 976                struct vmap_area *va;
 977                va = alloc_vmap_area(size, PAGE_SIZE,
 978                                VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
 979                if (IS_ERR(va))
 980                        return NULL;
 981
 982                addr = va->va_start;
 983                mem = (void *)addr;
 984        }
 985        if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
 986                vm_unmap_ram(mem, count);
 987                return NULL;
 988        }
 989        return mem;
 990}
 991EXPORT_SYMBOL(vm_map_ram);
 992
 993void __init vmalloc_init(void)
 994{
 995        struct vmap_area *va;
 996        struct vm_struct *tmp;
 997        int i;
 998
 999        for_each_possible_cpu(i) {
1000                struct vmap_block_queue *vbq;
1001
1002                vbq = &per_cpu(vmap_block_queue, i);
1003                spin_lock_init(&vbq->lock);
1004                INIT_LIST_HEAD(&vbq->free);
1005                INIT_LIST_HEAD(&vbq->dirty);
1006                vbq->nr_dirty = 0;
1007        }
1008
1009        /* Import existing vmlist entries. */
1010        for (tmp = vmlist; tmp; tmp = tmp->next) {
1011                va = alloc_bootmem(sizeof(struct vmap_area));
1012                va->flags = tmp->flags | VM_VM_AREA;
1013                va->va_start = (unsigned long)tmp->addr;
1014                va->va_end = va->va_start + tmp->size;
1015                __insert_vmap_area(va);
1016        }
1017        vmap_initialized = true;
1018}
1019
1020void unmap_kernel_range(unsigned long addr, unsigned long size)
1021{
1022        unsigned long end = addr + size;
1023
1024        flush_cache_vunmap(addr, end);
1025        vunmap_page_range(addr, end);
1026        flush_tlb_kernel_range(addr, end);
1027}
1028
1029int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1030{
1031        unsigned long addr = (unsigned long)area->addr;
1032        unsigned long end = addr + area->size - PAGE_SIZE;
1033        int err;
1034
1035        err = vmap_page_range(addr, end, prot, *pages);
1036        if (err > 0) {
1037                *pages += err;
1038                err = 0;
1039        }
1040
1041        return err;
1042}
1043EXPORT_SYMBOL_GPL(map_vm_area);
1044
1045/*** Old vmalloc interfaces ***/
1046DEFINE_RWLOCK(vmlist_lock);
1047struct vm_struct *vmlist;
1048
1049static struct vm_struct *__get_vm_area_node(unsigned long size,
1050                unsigned long flags, unsigned long start, unsigned long end,
1051                int node, gfp_t gfp_mask, void *caller)
1052{
1053        static struct vmap_area *va;
1054        struct vm_struct *area;
1055        struct vm_struct *tmp, **p;
1056        unsigned long align = 1;
1057
1058        BUG_ON(in_interrupt());
1059        if (flags & VM_IOREMAP) {
1060                int bit = fls(size);
1061
1062                if (bit > IOREMAP_MAX_ORDER)
1063                        bit = IOREMAP_MAX_ORDER;
1064                else if (bit < PAGE_SHIFT)
1065                        bit = PAGE_SHIFT;
1066
1067                align = 1ul << bit;
1068        }
1069
1070        size = PAGE_ALIGN(size);
1071        if (unlikely(!size))
1072                return NULL;
1073
1074        area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1075        if (unlikely(!area))
1076                return NULL;
1077
1078        /*
1079         * We always allocate a guard page.
1080         */
1081        size += PAGE_SIZE;
1082
1083        va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1084        if (IS_ERR(va)) {
1085                kfree(area);
1086                return NULL;
1087        }
1088
1089        area->flags = flags;
1090        area->addr = (void *)va->va_start;
1091        area->size = size;
1092        area->pages = NULL;
1093        area->nr_pages = 0;
1094        area->phys_addr = 0;
1095        area->caller = caller;
1096        va->private = area;
1097        va->flags |= VM_VM_AREA;
1098
1099        write_lock(&vmlist_lock);
1100        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1101                if (tmp->addr >= area->addr)
1102                        break;
1103        }
1104        area->next = *p;
1105        *p = area;
1106        write_unlock(&vmlist_lock);
1107
1108        return area;
1109}
1110
1111struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1112                                unsigned long start, unsigned long end)
1113{
1114        return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1115                                                __builtin_return_address(0));
1116}
1117EXPORT_SYMBOL_GPL(__get_vm_area);
1118
1119struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1120                                       unsigned long start, unsigned long end,
1121                                       void *caller)
1122{
1123        return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1124                                  caller);
1125}
1126
1127/**
1128 *      get_vm_area  -  reserve a contiguous kernel virtual area
1129 *      @size:          size of the area
1130 *      @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC
1131 *
1132 *      Search an area of @size in the kernel virtual mapping area,
1133 *      and reserved it for out purposes.  Returns the area descriptor
1134 *      on success or %NULL on failure.
1135 */
1136struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1137{
1138        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1139                                -1, GFP_KERNEL, __builtin_return_address(0));
1140}
1141
1142struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1143                                void *caller)
1144{
1145        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1146                                                -1, GFP_KERNEL, caller);
1147}
1148
1149struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1150                                   int node, gfp_t gfp_mask)
1151{
1152        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
1153                                  gfp_mask, __builtin_return_address(0));
1154}
1155
1156static struct vm_struct *find_vm_area(const void *addr)
1157{
1158        struct vmap_area *va;
1159
1160        va = find_vmap_area((unsigned long)addr);
1161        if (va && va->flags & VM_VM_AREA)
1162                return va->private;
1163
1164        return NULL;
1165}
1166
1167/**
1168 *      remove_vm_area  -  find and remove a continuous kernel virtual area
1169 *      @addr:          base address
1170 *
1171 *      Search for the kernel VM area starting at @addr, and remove it.
1172 *      This function returns the found VM area, but using it is NOT safe
1173 *      on SMP machines, except for its size or flags.
1174 */
1175struct vm_struct *remove_vm_area(const void *addr)
1176{
1177        struct vmap_area *va;
1178
1179        va = find_vmap_area((unsigned long)addr);
1180        if (va && va->flags & VM_VM_AREA) {
1181                struct vm_struct *vm = va->private;
1182                struct vm_struct *tmp, **p;
1183
1184                vmap_debug_free_range(va->va_start, va->va_end);
1185                free_unmap_vmap_area(va);
1186                vm->size -= PAGE_SIZE;
1187
1188                write_lock(&vmlist_lock);
1189                for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1190                        ;
1191                *p = tmp->next;
1192                write_unlock(&vmlist_lock);
1193
1194                return vm;
1195        }
1196        return NULL;
1197}
1198
1199static void __vunmap(const void *addr, int deallocate_pages)
1200{
1201        struct vm_struct *area;
1202
1203        if (!addr)
1204                return;
1205
1206        if ((PAGE_SIZE-1) & (unsigned long)addr) {
1207                WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1208                return;
1209        }
1210
1211        area = remove_vm_area(addr);
1212        if (unlikely(!area)) {
1213                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1214                                addr);
1215                return;
1216        }
1217
1218        debug_check_no_locks_freed(addr, area->size);
1219        debug_check_no_obj_freed(addr, area->size);
1220
1221        if (deallocate_pages) {
1222                int i;
1223
1224                for (i = 0; i < area->nr_pages; i++) {
1225                        struct page *page = area->pages[i];
1226
1227                        BUG_ON(!page);
1228                        __free_page(page);
1229                }
1230
1231                if (area->flags & VM_VPAGES)
1232                        vfree(area->pages);
1233                else
1234                        kfree(area->pages);
1235        }
1236
1237        kfree(area);
1238        return;
1239}
1240
1241/**
1242 *      vfree  -  release memory allocated by vmalloc()
1243 *      @addr:          memory base address
1244 *
1245 *      Free the virtually continuous memory area starting at @addr, as
1246 *      obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1247 *      NULL, no operation is performed.
1248 *
1249 *      Must not be called in interrupt context.
1250 */
1251void vfree(const void *addr)
1252{
1253        BUG_ON(in_interrupt());
1254        __vunmap(addr, 1);
1255}
1256EXPORT_SYMBOL(vfree);
1257
1258/**
1259 *      vunmap  -  release virtual mapping obtained by vmap()
1260 *      @addr:          memory base address
1261 *
1262 *      Free the virtually contiguous memory area starting at @addr,
1263 *      which was created from the page array passed to vmap().
1264 *
1265 *      Must not be called in interrupt context.
1266 */
1267void vunmap(const void *addr)
1268{
1269        BUG_ON(in_interrupt());
1270        __vunmap(addr, 0);
1271}
1272EXPORT_SYMBOL(vunmap);
1273
1274/**
1275 *      vmap  -  map an array of pages into virtually contiguous space
1276 *      @pages:         array of page pointers
1277 *      @count:         number of pages to map
1278 *      @flags:         vm_area->flags
1279 *      @prot:          page protection for the mapping
1280 *
1281 *      Maps @count pages from @pages into contiguous kernel virtual
1282 *      space.
1283 */
1284void *vmap(struct page **pages, unsigned int count,
1285                unsigned long flags, pgprot_t prot)
1286{
1287        struct vm_struct *area;
1288
1289        if (count > num_physpages)
1290                return NULL;
1291
1292        area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1293                                        __builtin_return_address(0));
1294        if (!area)
1295                return NULL;
1296
1297        if (map_vm_area(area, prot, &pages)) {
1298                vunmap(area->addr);
1299                return NULL;
1300        }
1301
1302        return area->addr;
1303}
1304EXPORT_SYMBOL(vmap);
1305
1306static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1307                            int node, void *caller);
1308static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1309                                 pgprot_t prot, int node, void *caller)
1310{
1311        struct page **pages;
1312        unsigned int nr_pages, array_size, i;
1313
1314        nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1315        array_size = (nr_pages * sizeof(struct page *));
1316
1317        area->nr_pages = nr_pages;
1318        /* Please note that the recursion is strictly bounded. */
1319        if (array_size > PAGE_SIZE) {
1320                pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
1321                                PAGE_KERNEL, node, caller);
1322                area->flags |= VM_VPAGES;
1323        } else {
1324                pages = kmalloc_node(array_size,
1325                                (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1326                                node);
1327        }
1328        area->pages = pages;
1329        area->caller = caller;
1330        if (!area->pages) {
1331                remove_vm_area(area->addr);
1332                kfree(area);
1333                return NULL;
1334        }
1335
1336        for (i = 0; i < area->nr_pages; i++) {
1337                struct page *page;
1338
1339                if (node < 0)
1340                        page = alloc_page(gfp_mask);
1341                else
1342                        page = alloc_pages_node(node, gfp_mask, 0);
1343
1344                if (unlikely(!page)) {
1345                        /* Successfully allocated i pages, free them in __vunmap() */
1346                        area->nr_pages = i;
1347                        goto fail;
1348                }
1349                area->pages[i] = page;
1350        }
1351
1352        if (map_vm_area(area, prot, &pages))
1353                goto fail;
1354        return area->addr;
1355
1356fail:
1357        vfree(area->addr);
1358        return NULL;
1359}
1360
1361void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1362{
1363        return __vmalloc_area_node(area, gfp_mask, prot, -1,
1364                                        __builtin_return_address(0));
1365}
1366
1367/**
1368 *      __vmalloc_node  -  allocate virtually contiguous memory
1369 *      @size:          allocation size
1370 *      @gfp_mask:      flags for the page level allocator
1371 *      @prot:          protection mask for the allocated pages
1372 *      @node:          node to use for allocation or -1
1373 *      @caller:        caller's return address
1374 *
1375 *      Allocate enough pages to cover @size from the page level
1376 *      allocator with @gfp_mask flags.  Map them into contiguous
1377 *      kernel virtual space, using a pagetable protection of @prot.
1378 */
1379static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1380                                                int node, void *caller)
1381{
1382        struct vm_struct *area;
1383
1384        size = PAGE_ALIGN(size);
1385        if (!size || (size >> PAGE_SHIFT) > num_physpages)
1386                return NULL;
1387
1388        area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
1389                                                node, gfp_mask, caller);
1390
1391        if (!area)
1392                return NULL;
1393
1394        return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1395}
1396
1397void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1398{
1399        return __vmalloc_node(size, gfp_mask, prot, -1,
1400                                __builtin_return_address(0));
1401}
1402EXPORT_SYMBOL(__vmalloc);
1403
1404/**
1405 *      vmalloc  -  allocate virtually contiguous memory
1406 *      @size:          allocation size
1407 *      Allocate enough pages to cover @size from the page level
1408 *      allocator and map them into contiguous kernel virtual space.
1409 *
1410 *      For tight control over page level allocator and protection flags
1411 *      use __vmalloc() instead.
1412 */
1413void *vmalloc(unsigned long size)
1414{
1415        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1416                                        -1, __builtin_return_address(0));
1417}
1418EXPORT_SYMBOL(vmalloc);
1419
1420/**
1421 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1422 * @size: allocation size
1423 *
1424 * The resulting memory area is zeroed so it can be mapped to userspace
1425 * without leaking data.
1426 */
1427void *vmalloc_user(unsigned long size)
1428{
1429        struct vm_struct *area;
1430        void *ret;
1431
1432        ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1433                             PAGE_KERNEL, -1, __builtin_return_address(0));
1434        if (ret) {
1435                area = find_vm_area(ret);
1436                area->flags |= VM_USERMAP;
1437        }
1438        return ret;
1439}
1440EXPORT_SYMBOL(vmalloc_user);
1441
1442/**
1443 *      vmalloc_node  -  allocate memory on a specific node
1444 *      @size:          allocation size
1445 *      @node:          numa node
1446 *
1447 *      Allocate enough pages to cover @size from the page level
1448 *      allocator and map them into contiguous kernel virtual space.
1449 *
1450 *      For tight control over page level allocator and protection flags
1451 *      use __vmalloc() instead.
1452 */
1453void *vmalloc_node(unsigned long size, int node)
1454{
1455        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1456                                        node, __builtin_return_address(0));
1457}
1458EXPORT_SYMBOL(vmalloc_node);
1459
1460#ifndef PAGE_KERNEL_EXEC
1461# define PAGE_KERNEL_EXEC PAGE_KERNEL
1462#endif
1463
1464/**
1465 *      vmalloc_exec  -  allocate virtually contiguous, executable memory
1466 *      @size:          allocation size
1467 *
1468 *      Kernel-internal function to allocate enough pages to cover @size
1469 *      the page level allocator and map them into contiguous and
1470 *      executable kernel virtual space.
1471 *
1472 *      For tight control over page level allocator and protection flags
1473 *      use __vmalloc() instead.
1474 */
1475
1476void *vmalloc_exec(unsigned long size)
1477{
1478        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1479                              -1, __builtin_return_address(0));
1480}
1481
1482#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1483#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1484#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1485#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1486#else
1487#define GFP_VMALLOC32 GFP_KERNEL
1488#endif
1489
1490/**
1491 *      vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1492 *      @size:          allocation size
1493 *
1494 *      Allocate enough 32bit PA addressable pages to cover @size from the
1495 *      page level allocator and map them into contiguous kernel virtual space.
1496 */
1497void *vmalloc_32(unsigned long size)
1498{
1499        return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
1500                              -1, __builtin_return_address(0));
1501}
1502EXPORT_SYMBOL(vmalloc_32);
1503
1504/**
1505 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1506 *      @size:          allocation size
1507 *
1508 * The resulting memory area is 32bit addressable and zeroed so it can be
1509 * mapped to userspace without leaking data.
1510 */
1511void *vmalloc_32_user(unsigned long size)
1512{
1513        struct vm_struct *area;
1514        void *ret;
1515
1516        ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1517                             -1, __builtin_return_address(0));
1518        if (ret) {
1519                area = find_vm_area(ret);
1520                area->flags |= VM_USERMAP;
1521        }
1522        return ret;
1523}
1524EXPORT_SYMBOL(vmalloc_32_user);
1525
1526long vread(char *buf, char *addr, unsigned long count)
1527{
1528        struct vm_struct *tmp;
1529        char *vaddr, *buf_start = buf;
1530        unsigned long n;
1531
1532        /* Don't allow overflow */
1533        if ((unsigned long) addr + count < count)
1534                count = -(unsigned long) addr;
1535
1536        read_lock(&vmlist_lock);
1537        for (tmp = vmlist; tmp; tmp = tmp->next) {
1538                vaddr = (char *) tmp->addr;
1539                if (addr >= vaddr + tmp->size - PAGE_SIZE)
1540                        continue;
1541                while (addr < vaddr) {
1542                        if (count == 0)
1543                                goto finished;
1544                        *buf = '\0';
1545                        buf++;
1546                        addr++;
1547                        count--;
1548                }
1549                n = vaddr + tmp->size - PAGE_SIZE - addr;
1550                do {
1551                        if (count == 0)
1552                                goto finished;
1553                        *buf = *addr;
1554                        buf++;
1555                        addr++;
1556                        count--;
1557                } while (--n > 0);
1558        }
1559finished:
1560        read_unlock(&vmlist_lock);
1561        return buf - buf_start;
1562}
1563
1564long vwrite(char *buf, char *addr, unsigned long count)
1565{
1566        struct vm_struct *tmp;
1567        char *vaddr, *buf_start = buf;
1568        unsigned long n;
1569
1570        /* Don't allow overflow */
1571        if ((unsigned long) addr + count < count)
1572                count = -(unsigned long) addr;
1573
1574        read_lock(&vmlist_lock);
1575        for (tmp = vmlist; tmp; tmp = tmp->next) {
1576                vaddr = (char *) tmp->addr;
1577                if (addr >= vaddr + tmp->size - PAGE_SIZE)
1578                        continue;
1579                while (addr < vaddr) {
1580                        if (count == 0)
1581                                goto finished;
1582                        buf++;
1583                        addr++;
1584                        count--;
1585                }
1586                n = vaddr + tmp->size - PAGE_SIZE - addr;
1587                do {
1588                        if (count == 0)
1589                                goto finished;
1590                        *addr = *buf;
1591                        buf++;
1592                        addr++;
1593                        count--;
1594                } while (--n > 0);
1595        }
1596finished:
1597        read_unlock(&vmlist_lock);
1598        return buf - buf_start;
1599}
1600
1601/**
1602 *      remap_vmalloc_range  -  map vmalloc pages to userspace
1603 *      @vma:           vma to cover (map full range of vma)
1604 *      @addr:          vmalloc memory
1605 *      @pgoff:         number of pages into addr before first page to map
1606 *
1607 *      Returns:        0 for success, -Exxx on failure
1608 *
1609 *      This function checks that addr is a valid vmalloc'ed area, and
1610 *      that it is big enough to cover the vma. Will return failure if
1611 *      that criteria isn't met.
1612 *
1613 *      Similar to remap_pfn_range() (see mm/memory.c)
1614 */
1615int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1616                                                unsigned long pgoff)
1617{
1618        struct vm_struct *area;
1619        unsigned long uaddr = vma->vm_start;
1620        unsigned long usize = vma->vm_end - vma->vm_start;
1621
1622        if ((PAGE_SIZE-1) & (unsigned long)addr)
1623                return -EINVAL;
1624
1625        area = find_vm_area(addr);
1626        if (!area)
1627                return -EINVAL;
1628
1629        if (!(area->flags & VM_USERMAP))
1630                return -EINVAL;
1631
1632        if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
1633                return -EINVAL;
1634
1635        addr += pgoff << PAGE_SHIFT;
1636        do {
1637                struct page *page = vmalloc_to_page(addr);
1638                int ret;
1639
1640                ret = vm_insert_page(vma, uaddr, page);
1641                if (ret)
1642                        return ret;
1643
1644                uaddr += PAGE_SIZE;
1645                addr += PAGE_SIZE;
1646                usize -= PAGE_SIZE;
1647        } while (usize > 0);
1648
1649        /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1650        vma->vm_flags |= VM_RESERVED;
1651
1652        return 0;
1653}
1654EXPORT_SYMBOL(remap_vmalloc_range);
1655
1656/*
1657 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1658 * have one.
1659 */
1660void  __attribute__((weak)) vmalloc_sync_all(void)
1661{
1662}
1663
1664
1665static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
1666{
1667        /* apply_to_page_range() does all the hard work. */
1668        return 0;
1669}
1670
1671/**
1672 *      alloc_vm_area - allocate a range of kernel address space
1673 *      @size:          size of the area
1674 *
1675 *      Returns:        NULL on failure, vm_struct on success
1676 *
1677 *      This function reserves a range of kernel address space, and
1678 *      allocates pagetables to map that range.  No actual mappings
1679 *      are created.  If the kernel address space is not shared
1680 *      between processes, it syncs the pagetable across all
1681 *      processes.
1682 */
1683struct vm_struct *alloc_vm_area(size_t size)
1684{
1685        struct vm_struct *area;
1686
1687        area = get_vm_area_caller(size, VM_IOREMAP,
1688                                __builtin_return_address(0));
1689        if (area == NULL)
1690                return NULL;
1691
1692        /*
1693         * This ensures that page tables are constructed for this region
1694         * of kernel virtual address space and mapped into init_mm.
1695         */
1696        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1697                                area->size, f, NULL)) {
1698                free_vm_area(area);
1699                return NULL;
1700        }
1701
1702        /* Make sure the pagetables are constructed in process kernel
1703           mappings */
1704        vmalloc_sync_all();
1705
1706        return area;
1707}
1708EXPORT_SYMBOL_GPL(alloc_vm_area);
1709
1710void free_vm_area(struct vm_struct *area)
1711{
1712        struct vm_struct *ret;
1713        ret = remove_vm_area(area->addr);
1714        BUG_ON(ret != area);
1715        kfree(area);
1716}
1717EXPORT_SYMBOL_GPL(free_vm_area);
1718
1719
1720#ifdef CONFIG_PROC_FS
1721static void *s_start(struct seq_file *m, loff_t *pos)
1722{
1723        loff_t n = *pos;
1724        struct vm_struct *v;
1725
1726        read_lock(&vmlist_lock);
1727        v = vmlist;
1728        while (n > 0 && v) {
1729                n--;
1730                v = v->next;
1731        }
1732        if (!n)
1733                return v;
1734
1735        return NULL;
1736
1737}
1738
1739static void *s_next(struct seq_file *m, void *p, loff_t *pos)
1740{
1741        struct vm_struct *v = p;
1742
1743        ++*pos;
1744        return v->next;
1745}
1746
1747static void s_stop(struct seq_file *m, void *p)
1748{
1749        read_unlock(&vmlist_lock);
1750}
1751
1752static void show_numa_info(struct seq_file *m, struct vm_struct *v)
1753{
1754        if (NUMA_BUILD) {
1755                unsigned int nr, *counters = m->private;
1756
1757                if (!counters)
1758                        return;
1759
1760                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
1761
1762                for (nr = 0; nr < v->nr_pages; nr++)
1763                        counters[page_to_nid(v->pages[nr])]++;
1764
1765                for_each_node_state(nr, N_HIGH_MEMORY)
1766                        if (counters[nr])
1767                                seq_printf(m, " N%u=%u", nr, counters[nr]);
1768        }
1769}
1770
1771static int s_show(struct seq_file *m, void *p)
1772{
1773        struct vm_struct *v = p;
1774
1775        seq_printf(m, "0x%p-0x%p %7ld",
1776                v->addr, v->addr + v->size, v->size);
1777
1778        if (v->caller) {
1779                char buff[KSYM_SYMBOL_LEN];
1780
1781                seq_putc(m, ' ');
1782                sprint_symbol(buff, (unsigned long)v->caller);
1783                seq_puts(m, buff);
1784        }
1785
1786        if (v->nr_pages)
1787                seq_printf(m, " pages=%d", v->nr_pages);
1788
1789        if (v->phys_addr)
1790                seq_printf(m, " phys=%lx", v->phys_addr);
1791
1792        if (v->flags & VM_IOREMAP)
1793                seq_printf(m, " ioremap");
1794
1795        if (v->flags & VM_ALLOC)
1796                seq_printf(m, " vmalloc");
1797
1798        if (v->flags & VM_MAP)
1799                seq_printf(m, " vmap");
1800
1801        if (v->flags & VM_USERMAP)
1802                seq_printf(m, " user");
1803
1804        if (v->flags & VM_VPAGES)
1805                seq_printf(m, " vpages");
1806
1807        show_numa_info(m, v);
1808        seq_putc(m, '\n');
1809        return 0;
1810}
1811
1812static const struct seq_operations vmalloc_op = {
1813        .start = s_start,
1814        .next = s_next,
1815        .stop = s_stop,
1816        .show = s_show,
1817};
1818
1819static int vmalloc_open(struct inode *inode, struct file *file)
1820{
1821        unsigned int *ptr = NULL;
1822        int ret;
1823
1824        if (NUMA_BUILD)
1825                ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
1826        ret = seq_open(file, &vmalloc_op);
1827        if (!ret) {
1828                struct seq_file *m = file->private_data;
1829                m->private = ptr;
1830        } else
1831                kfree(ptr);
1832        return ret;
1833}
1834
1835static const struct file_operations proc_vmalloc_operations = {
1836        .open           = vmalloc_open,
1837        .read           = seq_read,
1838        .llseek         = seq_lseek,
1839        .release        = seq_release_private,
1840};
1841
1842static int __init proc_vmalloc_init(void)
1843{
1844        proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
1845        return 0;
1846}
1847module_init(proc_vmalloc_init);
1848#endif
1849
1850