linux/mm/vmalloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/vmalloc.c
   3 *
   4 *  Copyright (C) 1993  Linus Torvalds
   5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   8 *  Numa awareness, Christoph Lameter, SGI, June 2005
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/slab.h>
  16#include <linux/spinlock.h>
  17#include <linux/interrupt.h>
  18#include <linux/proc_fs.h>
  19#include <linux/seq_file.h>
  20#include <linux/debugobjects.h>
  21#include <linux/kallsyms.h>
  22#include <linux/list.h>
  23#include <linux/rbtree.h>
  24#include <linux/radix-tree.h>
  25#include <linux/rcupdate.h>
  26#include <linux/bootmem.h>
  27#include <linux/pfn.h>
  28
  29#include <asm/atomic.h>
  30#include <asm/uaccess.h>
  31#include <asm/tlbflush.h>
  32
  33
  34/*** Page table manipulation functions ***/
  35
  36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  37{
  38        pte_t *pte;
  39
  40        pte = pte_offset_kernel(pmd, addr);
  41        do {
  42                pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  43                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  44        } while (pte++, addr += PAGE_SIZE, addr != end);
  45}
  46
  47static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  48{
  49        pmd_t *pmd;
  50        unsigned long next;
  51
  52        pmd = pmd_offset(pud, addr);
  53        do {
  54                next = pmd_addr_end(addr, end);
  55                if (pmd_none_or_clear_bad(pmd))
  56                        continue;
  57                vunmap_pte_range(pmd, addr, next);
  58        } while (pmd++, addr = next, addr != end);
  59}
  60
  61static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
  62{
  63        pud_t *pud;
  64        unsigned long next;
  65
  66        pud = pud_offset(pgd, addr);
  67        do {
  68                next = pud_addr_end(addr, end);
  69                if (pud_none_or_clear_bad(pud))
  70                        continue;
  71                vunmap_pmd_range(pud, addr, next);
  72        } while (pud++, addr = next, addr != end);
  73}
  74
  75static void vunmap_page_range(unsigned long addr, unsigned long end)
  76{
  77        pgd_t *pgd;
  78        unsigned long next;
  79
  80        BUG_ON(addr >= end);
  81        pgd = pgd_offset_k(addr);
  82        do {
  83                next = pgd_addr_end(addr, end);
  84                if (pgd_none_or_clear_bad(pgd))
  85                        continue;
  86                vunmap_pud_range(pgd, addr, next);
  87        } while (pgd++, addr = next, addr != end);
  88}
  89
  90static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  91                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  92{
  93        pte_t *pte;
  94
  95        /*
  96         * nr is a running index into the array which helps higher level
  97         * callers keep track of where we're up to.
  98         */
  99
 100        pte = pte_alloc_kernel(pmd, addr);
 101        if (!pte)
 102                return -ENOMEM;
 103        do {
 104                struct page *page = pages[*nr];
 105
 106                if (WARN_ON(!pte_none(*pte)))
 107                        return -EBUSY;
 108                if (WARN_ON(!page))
 109                        return -ENOMEM;
 110                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 111                (*nr)++;
 112        } while (pte++, addr += PAGE_SIZE, addr != end);
 113        return 0;
 114}
 115
 116static int vmap_pmd_range(pud_t *pud, unsigned long addr,
 117                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 118{
 119        pmd_t *pmd;
 120        unsigned long next;
 121
 122        pmd = pmd_alloc(&init_mm, pud, addr);
 123        if (!pmd)
 124                return -ENOMEM;
 125        do {
 126                next = pmd_addr_end(addr, end);
 127                if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 128                        return -ENOMEM;
 129        } while (pmd++, addr = next, addr != end);
 130        return 0;
 131}
 132
 133static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
 134                unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 135{
 136        pud_t *pud;
 137        unsigned long next;
 138
 139        pud = pud_alloc(&init_mm, pgd, addr);
 140        if (!pud)
 141                return -ENOMEM;
 142        do {
 143                next = pud_addr_end(addr, end);
 144                if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 145                        return -ENOMEM;
 146        } while (pud++, addr = next, addr != end);
 147        return 0;
 148}
 149
 150/*
 151 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 152 * will have pfns corresponding to the "pages" array.
 153 *
 154 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 155 */
 156static int vmap_page_range_noflush(unsigned long start, unsigned long end,
 157                                   pgprot_t prot, struct page **pages)
 158{
 159        pgd_t *pgd;
 160        unsigned long next;
 161        unsigned long addr = start;
 162        int err = 0;
 163        int nr = 0;
 164
 165        BUG_ON(addr >= end);
 166        pgd = pgd_offset_k(addr);
 167        do {
 168                next = pgd_addr_end(addr, end);
 169                err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
 170                if (err)
 171                        break;
 172        } while (pgd++, addr = next, addr != end);
 173
 174        if (unlikely(err))
 175                return err;
 176        return nr;
 177}
 178
 179static int vmap_page_range(unsigned long start, unsigned long end,
 180                           pgprot_t prot, struct page **pages)
 181{
 182        int ret;
 183
 184        ret = vmap_page_range_noflush(start, end, prot, pages);
 185        flush_cache_vmap(start, end);
 186        return ret;
 187}
 188
 189static inline int is_vmalloc_or_module_addr(const void *x)
 190{
 191        /*
 192         * ARM, x86-64 and sparc64 put modules in a special place,
 193         * and fall back on vmalloc() if that fails. Others
 194         * just put it in the vmalloc space.
 195         */
 196#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 197        unsigned long addr = (unsigned long)x;
 198        if (addr >= MODULES_VADDR && addr < MODULES_END)
 199                return 1;
 200#endif
 201        return is_vmalloc_addr(x);
 202}
 203
 204/*
 205 * Walk a vmap address to the struct page it maps.
 206 */
 207struct page *vmalloc_to_page(const void *vmalloc_addr)
 208{
 209        unsigned long addr = (unsigned long) vmalloc_addr;
 210        struct page *page = NULL;
 211        pgd_t *pgd = pgd_offset_k(addr);
 212
 213        /*
 214         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 215         * architectures that do not vmalloc module space
 216         */
 217        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 218
 219        if (!pgd_none(*pgd)) {
 220                pud_t *pud = pud_offset(pgd, addr);
 221                if (!pud_none(*pud)) {
 222                        pmd_t *pmd = pmd_offset(pud, addr);
 223                        if (!pmd_none(*pmd)) {
 224                                pte_t *ptep, pte;
 225
 226                                ptep = pte_offset_map(pmd, addr);
 227                                pte = *ptep;
 228                                if (pte_present(pte))
 229                                        page = pte_page(pte);
 230                                pte_unmap(ptep);
 231                        }
 232                }
 233        }
 234        return page;
 235}
 236EXPORT_SYMBOL(vmalloc_to_page);
 237
 238/*
 239 * Map a vmalloc()-space virtual address to the physical page frame number.
 240 */
 241unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 242{
 243        return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 244}
 245EXPORT_SYMBOL(vmalloc_to_pfn);
 246
 247
 248/*** Global kva allocator ***/
 249
 250#define VM_LAZY_FREE    0x01
 251#define VM_LAZY_FREEING 0x02
 252#define VM_VM_AREA      0x04
 253
 254struct vmap_area {
 255        unsigned long va_start;
 256        unsigned long va_end;
 257        unsigned long flags;
 258        struct rb_node rb_node;         /* address sorted rbtree */
 259        struct list_head list;          /* address sorted list */
 260        struct list_head purge_list;    /* "lazy purge" list */
 261        void *private;
 262        struct rcu_head rcu_head;
 263};
 264
 265static DEFINE_SPINLOCK(vmap_area_lock);
 266static struct rb_root vmap_area_root = RB_ROOT;
 267static LIST_HEAD(vmap_area_list);
 268
 269static struct vmap_area *__find_vmap_area(unsigned long addr)
 270{
 271        struct rb_node *n = vmap_area_root.rb_node;
 272
 273        while (n) {
 274                struct vmap_area *va;
 275
 276                va = rb_entry(n, struct vmap_area, rb_node);
 277                if (addr < va->va_start)
 278                        n = n->rb_left;
 279                else if (addr > va->va_start)
 280                        n = n->rb_right;
 281                else
 282                        return va;
 283        }
 284
 285        return NULL;
 286}
 287
 288static void __insert_vmap_area(struct vmap_area *va)
 289{
 290        struct rb_node **p = &vmap_area_root.rb_node;
 291        struct rb_node *parent = NULL;
 292        struct rb_node *tmp;
 293
 294        while (*p) {
 295                struct vmap_area *tmp;
 296
 297                parent = *p;
 298                tmp = rb_entry(parent, struct vmap_area, rb_node);
 299                if (va->va_start < tmp->va_end)
 300                        p = &(*p)->rb_left;
 301                else if (va->va_end > tmp->va_start)
 302                        p = &(*p)->rb_right;
 303                else
 304                        BUG();
 305        }
 306
 307        rb_link_node(&va->rb_node, parent, p);
 308        rb_insert_color(&va->rb_node, &vmap_area_root);
 309
 310        /* address-sort this list so it is usable like the vmlist */
 311        tmp = rb_prev(&va->rb_node);
 312        if (tmp) {
 313                struct vmap_area *prev;
 314                prev = rb_entry(tmp, struct vmap_area, rb_node);
 315                list_add_rcu(&va->list, &prev->list);
 316        } else
 317                list_add_rcu(&va->list, &vmap_area_list);
 318}
 319
 320static void purge_vmap_area_lazy(void);
 321
 322/*
 323 * Allocate a region of KVA of the specified size and alignment, within the
 324 * vstart and vend.
 325 */
 326static struct vmap_area *alloc_vmap_area(unsigned long size,
 327                                unsigned long align,
 328                                unsigned long vstart, unsigned long vend,
 329                                int node, gfp_t gfp_mask)
 330{
 331        struct vmap_area *va;
 332        struct rb_node *n;
 333        unsigned long addr;
 334        int purged = 0;
 335
 336        BUG_ON(!size);
 337        BUG_ON(size & ~PAGE_MASK);
 338
 339        va = kmalloc_node(sizeof(struct vmap_area),
 340                        gfp_mask & GFP_RECLAIM_MASK, node);
 341        if (unlikely(!va))
 342                return ERR_PTR(-ENOMEM);
 343
 344retry:
 345        addr = ALIGN(vstart, align);
 346
 347        spin_lock(&vmap_area_lock);
 348        if (addr + size - 1 < addr)
 349                goto overflow;
 350
 351        /* XXX: could have a last_hole cache */
 352        n = vmap_area_root.rb_node;
 353        if (n) {
 354                struct vmap_area *first = NULL;
 355
 356                do {
 357                        struct vmap_area *tmp;
 358                        tmp = rb_entry(n, struct vmap_area, rb_node);
 359                        if (tmp->va_end >= addr) {
 360                                if (!first && tmp->va_start < addr + size)
 361                                        first = tmp;
 362                                n = n->rb_left;
 363                        } else {
 364                                first = tmp;
 365                                n = n->rb_right;
 366                        }
 367                } while (n);
 368
 369                if (!first)
 370                        goto found;
 371
 372                if (first->va_end < addr) {
 373                        n = rb_next(&first->rb_node);
 374                        if (n)
 375                                first = rb_entry(n, struct vmap_area, rb_node);
 376                        else
 377                                goto found;
 378                }
 379
 380                while (addr + size > first->va_start && addr + size <= vend) {
 381                        addr = ALIGN(first->va_end + PAGE_SIZE, align);
 382                        if (addr + size - 1 < addr)
 383                                goto overflow;
 384
 385                        n = rb_next(&first->rb_node);
 386                        if (n)
 387                                first = rb_entry(n, struct vmap_area, rb_node);
 388                        else
 389                                goto found;
 390                }
 391        }
 392found:
 393        if (addr + size > vend) {
 394overflow:
 395                spin_unlock(&vmap_area_lock);
 396                if (!purged) {
 397                        purge_vmap_area_lazy();
 398                        purged = 1;
 399                        goto retry;
 400                }
 401                if (printk_ratelimit())
 402                        printk(KERN_WARNING
 403                                "vmap allocation for size %lu failed: "
 404                                "use vmalloc=<size> to increase size.\n", size);
 405                kfree(va);
 406                return ERR_PTR(-EBUSY);
 407        }
 408
 409        BUG_ON(addr & (align-1));
 410
 411        va->va_start = addr;
 412        va->va_end = addr + size;
 413        va->flags = 0;
 414        __insert_vmap_area(va);
 415        spin_unlock(&vmap_area_lock);
 416
 417        return va;
 418}
 419
 420static void rcu_free_va(struct rcu_head *head)
 421{
 422        struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
 423
 424        kfree(va);
 425}
 426
 427static void __free_vmap_area(struct vmap_area *va)
 428{
 429        BUG_ON(RB_EMPTY_NODE(&va->rb_node));
 430        rb_erase(&va->rb_node, &vmap_area_root);
 431        RB_CLEAR_NODE(&va->rb_node);
 432        list_del_rcu(&va->list);
 433
 434        call_rcu(&va->rcu_head, rcu_free_va);
 435}
 436
 437/*
 438 * Free a region of KVA allocated by alloc_vmap_area
 439 */
 440static void free_vmap_area(struct vmap_area *va)
 441{
 442        spin_lock(&vmap_area_lock);
 443        __free_vmap_area(va);
 444        spin_unlock(&vmap_area_lock);
 445}
 446
 447/*
 448 * Clear the pagetable entries of a given vmap_area
 449 */
 450static void unmap_vmap_area(struct vmap_area *va)
 451{
 452        vunmap_page_range(va->va_start, va->va_end);
 453}
 454
 455static void vmap_debug_free_range(unsigned long start, unsigned long end)
 456{
 457        /*
 458         * Unmap page tables and force a TLB flush immediately if
 459         * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
 460         * bugs similarly to those in linear kernel virtual address
 461         * space after a page has been freed.
 462         *
 463         * All the lazy freeing logic is still retained, in order to
 464         * minimise intrusiveness of this debugging feature.
 465         *
 466         * This is going to be *slow* (linear kernel virtual address
 467         * debugging doesn't do a broadcast TLB flush so it is a lot
 468         * faster).
 469         */
 470#ifdef CONFIG_DEBUG_PAGEALLOC
 471        vunmap_page_range(start, end);
 472        flush_tlb_kernel_range(start, end);
 473#endif
 474}
 475
 476/*
 477 * lazy_max_pages is the maximum amount of virtual address space we gather up
 478 * before attempting to purge with a TLB flush.
 479 *
 480 * There is a tradeoff here: a larger number will cover more kernel page tables
 481 * and take slightly longer to purge, but it will linearly reduce the number of
 482 * global TLB flushes that must be performed. It would seem natural to scale
 483 * this number up linearly with the number of CPUs (because vmapping activity
 484 * could also scale linearly with the number of CPUs), however it is likely
 485 * that in practice, workloads might be constrained in other ways that mean
 486 * vmap activity will not scale linearly with CPUs. Also, I want to be
 487 * conservative and not introduce a big latency on huge systems, so go with
 488 * a less aggressive log scale. It will still be an improvement over the old
 489 * code, and it will be simple to change the scale factor if we find that it
 490 * becomes a problem on bigger systems.
 491 */
 492static unsigned long lazy_max_pages(void)
 493{
 494        unsigned int log;
 495
 496        log = fls(num_online_cpus());
 497
 498        return log * (32UL * 1024 * 1024 / PAGE_SIZE);
 499}
 500
 501static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 502
 503/*
 504 * Purges all lazily-freed vmap areas.
 505 *
 506 * If sync is 0 then don't purge if there is already a purge in progress.
 507 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 508 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 509 * their own TLB flushing).
 510 * Returns with *start = min(*start, lowest purged address)
 511 *              *end = max(*end, highest purged address)
 512 */
 513static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 514                                        int sync, int force_flush)
 515{
 516        static DEFINE_SPINLOCK(purge_lock);
 517        LIST_HEAD(valist);
 518        struct vmap_area *va;
 519        struct vmap_area *n_va;
 520        int nr = 0;
 521
 522        /*
 523         * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
 524         * should not expect such behaviour. This just simplifies locking for
 525         * the case that isn't actually used at the moment anyway.
 526         */
 527        if (!sync && !force_flush) {
 528                if (!spin_trylock(&purge_lock))
 529                        return;
 530        } else
 531                spin_lock(&purge_lock);
 532
 533        rcu_read_lock();
 534        list_for_each_entry_rcu(va, &vmap_area_list, list) {
 535                if (va->flags & VM_LAZY_FREE) {
 536                        if (va->va_start < *start)
 537                                *start = va->va_start;
 538                        if (va->va_end > *end)
 539                                *end = va->va_end;
 540                        nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
 541                        unmap_vmap_area(va);
 542                        list_add_tail(&va->purge_list, &valist);
 543                        va->flags |= VM_LAZY_FREEING;
 544                        va->flags &= ~VM_LAZY_FREE;
 545                }
 546        }
 547        rcu_read_unlock();
 548
 549        if (nr) {
 550                BUG_ON(nr > atomic_read(&vmap_lazy_nr));
 551                atomic_sub(nr, &vmap_lazy_nr);
 552        }
 553
 554        if (nr || force_flush)
 555                flush_tlb_kernel_range(*start, *end);
 556
 557        if (nr) {
 558                spin_lock(&vmap_area_lock);
 559                list_for_each_entry_safe(va, n_va, &valist, purge_list)
 560                        __free_vmap_area(va);
 561                spin_unlock(&vmap_area_lock);
 562        }
 563        spin_unlock(&purge_lock);
 564}
 565
 566/*
 567 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 568 * is already purging.
 569 */
 570static void try_purge_vmap_area_lazy(void)
 571{
 572        unsigned long start = ULONG_MAX, end = 0;
 573
 574        __purge_vmap_area_lazy(&start, &end, 0, 0);
 575}
 576
 577/*
 578 * Kick off a purge of the outstanding lazy areas.
 579 */
 580static void purge_vmap_area_lazy(void)
 581{
 582        unsigned long start = ULONG_MAX, end = 0;
 583
 584        __purge_vmap_area_lazy(&start, &end, 1, 0);
 585}
 586
 587/*
 588 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 589 * called for the correct range previously.
 590 */
 591static void free_unmap_vmap_area_noflush(struct vmap_area *va)
 592{
 593        va->flags |= VM_LAZY_FREE;
 594        atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
 595        if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
 596                try_purge_vmap_area_lazy();
 597}
 598
 599/*
 600 * Free and unmap a vmap area
 601 */
 602static void free_unmap_vmap_area(struct vmap_area *va)
 603{
 604        flush_cache_vunmap(va->va_start, va->va_end);
 605        free_unmap_vmap_area_noflush(va);
 606}
 607
 608static struct vmap_area *find_vmap_area(unsigned long addr)
 609{
 610        struct vmap_area *va;
 611
 612        spin_lock(&vmap_area_lock);
 613        va = __find_vmap_area(addr);
 614        spin_unlock(&vmap_area_lock);
 615
 616        return va;
 617}
 618
 619static void free_unmap_vmap_area_addr(unsigned long addr)
 620{
 621        struct vmap_area *va;
 622
 623        va = find_vmap_area(addr);
 624        BUG_ON(!va);
 625        free_unmap_vmap_area(va);
 626}
 627
 628
 629/*** Per cpu kva allocator ***/
 630
 631/*
 632 * vmap space is limited especially on 32 bit architectures. Ensure there is
 633 * room for at least 16 percpu vmap blocks per CPU.
 634 */
 635/*
 636 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 637 * to #define VMALLOC_SPACE             (VMALLOC_END-VMALLOC_START). Guess
 638 * instead (we just need a rough idea)
 639 */
 640#if BITS_PER_LONG == 32
 641#define VMALLOC_SPACE           (128UL*1024*1024)
 642#else
 643#define VMALLOC_SPACE           (128UL*1024*1024*1024)
 644#endif
 645
 646#define VMALLOC_PAGES           (VMALLOC_SPACE / PAGE_SIZE)
 647#define VMAP_MAX_ALLOC          BITS_PER_LONG   /* 256K with 4K pages */
 648#define VMAP_BBMAP_BITS_MAX     1024    /* 4MB with 4K pages */
 649#define VMAP_BBMAP_BITS_MIN     (VMAP_MAX_ALLOC*2)
 650#define VMAP_MIN(x, y)          ((x) < (y) ? (x) : (y)) /* can't use min() */
 651#define VMAP_MAX(x, y)          ((x) > (y) ? (x) : (y)) /* can't use max() */
 652#define VMAP_BBMAP_BITS         VMAP_MIN(VMAP_BBMAP_BITS_MAX,           \
 653                                        VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
 654                                                VMALLOC_PAGES / NR_CPUS / 16))
 655
 656#define VMAP_BLOCK_SIZE         (VMAP_BBMAP_BITS * PAGE_SIZE)
 657
 658static bool vmap_initialized __read_mostly = false;
 659
 660struct vmap_block_queue {
 661        spinlock_t lock;
 662        struct list_head free;
 663        struct list_head dirty;
 664        unsigned int nr_dirty;
 665};
 666
 667struct vmap_block {
 668        spinlock_t lock;
 669        struct vmap_area *va;
 670        struct vmap_block_queue *vbq;
 671        unsigned long free, dirty;
 672        DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
 673        DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
 674        union {
 675                struct list_head free_list;
 676                struct rcu_head rcu_head;
 677        };
 678};
 679
 680/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
 681static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
 682
 683/*
 684 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 685 * in the free path. Could get rid of this if we change the API to return a
 686 * "cookie" from alloc, to be passed to free. But no big deal yet.
 687 */
 688static DEFINE_SPINLOCK(vmap_block_tree_lock);
 689static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
 690
 691/*
 692 * We should probably have a fallback mechanism to allocate virtual memory
 693 * out of partially filled vmap blocks. However vmap block sizing should be
 694 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 695 * big problem.
 696 */
 697
 698static unsigned long addr_to_vb_idx(unsigned long addr)
 699{
 700        addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
 701        addr /= VMAP_BLOCK_SIZE;
 702        return addr;
 703}
 704
 705static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
 706{
 707        struct vmap_block_queue *vbq;
 708        struct vmap_block *vb;
 709        struct vmap_area *va;
 710        unsigned long vb_idx;
 711        int node, err;
 712
 713        node = numa_node_id();
 714
 715        vb = kmalloc_node(sizeof(struct vmap_block),
 716                        gfp_mask & GFP_RECLAIM_MASK, node);
 717        if (unlikely(!vb))
 718                return ERR_PTR(-ENOMEM);
 719
 720        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 721                                        VMALLOC_START, VMALLOC_END,
 722                                        node, gfp_mask);
 723        if (unlikely(IS_ERR(va))) {
 724                kfree(vb);
 725                return ERR_PTR(PTR_ERR(va));
 726        }
 727
 728        err = radix_tree_preload(gfp_mask);
 729        if (unlikely(err)) {
 730                kfree(vb);
 731                free_vmap_area(va);
 732                return ERR_PTR(err);
 733        }
 734
 735        spin_lock_init(&vb->lock);
 736        vb->va = va;
 737        vb->free = VMAP_BBMAP_BITS;
 738        vb->dirty = 0;
 739        bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
 740        bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
 741        INIT_LIST_HEAD(&vb->free_list);
 742
 743        vb_idx = addr_to_vb_idx(va->va_start);
 744        spin_lock(&vmap_block_tree_lock);
 745        err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
 746        spin_unlock(&vmap_block_tree_lock);
 747        BUG_ON(err);
 748        radix_tree_preload_end();
 749
 750        vbq = &get_cpu_var(vmap_block_queue);
 751        vb->vbq = vbq;
 752        spin_lock(&vbq->lock);
 753        list_add(&vb->free_list, &vbq->free);
 754        spin_unlock(&vbq->lock);
 755        put_cpu_var(vmap_cpu_blocks);
 756
 757        return vb;
 758}
 759
 760static void rcu_free_vb(struct rcu_head *head)
 761{
 762        struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
 763
 764        kfree(vb);
 765}
 766
 767static void free_vmap_block(struct vmap_block *vb)
 768{
 769        struct vmap_block *tmp;
 770        unsigned long vb_idx;
 771
 772        BUG_ON(!list_empty(&vb->free_list));
 773
 774        vb_idx = addr_to_vb_idx(vb->va->va_start);
 775        spin_lock(&vmap_block_tree_lock);
 776        tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
 777        spin_unlock(&vmap_block_tree_lock);
 778        BUG_ON(tmp != vb);
 779
 780        free_unmap_vmap_area_noflush(vb->va);
 781        call_rcu(&vb->rcu_head, rcu_free_vb);
 782}
 783
 784static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 785{
 786        struct vmap_block_queue *vbq;
 787        struct vmap_block *vb;
 788        unsigned long addr = 0;
 789        unsigned int order;
 790
 791        BUG_ON(size & ~PAGE_MASK);
 792        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 793        order = get_order(size);
 794
 795again:
 796        rcu_read_lock();
 797        vbq = &get_cpu_var(vmap_block_queue);
 798        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 799                int i;
 800
 801                spin_lock(&vb->lock);
 802                i = bitmap_find_free_region(vb->alloc_map,
 803                                                VMAP_BBMAP_BITS, order);
 804
 805                if (i >= 0) {
 806                        addr = vb->va->va_start + (i << PAGE_SHIFT);
 807                        BUG_ON(addr_to_vb_idx(addr) !=
 808                                        addr_to_vb_idx(vb->va->va_start));
 809                        vb->free -= 1UL << order;
 810                        if (vb->free == 0) {
 811                                spin_lock(&vbq->lock);
 812                                list_del_init(&vb->free_list);
 813                                spin_unlock(&vbq->lock);
 814                        }
 815                        spin_unlock(&vb->lock);
 816                        break;
 817                }
 818                spin_unlock(&vb->lock);
 819        }
 820        put_cpu_var(vmap_cpu_blocks);
 821        rcu_read_unlock();
 822
 823        if (!addr) {
 824                vb = new_vmap_block(gfp_mask);
 825                if (IS_ERR(vb))
 826                        return vb;
 827                goto again;
 828        }
 829
 830        return (void *)addr;
 831}
 832
 833static void vb_free(const void *addr, unsigned long size)
 834{
 835        unsigned long offset;
 836        unsigned long vb_idx;
 837        unsigned int order;
 838        struct vmap_block *vb;
 839
 840        BUG_ON(size & ~PAGE_MASK);
 841        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 842
 843        flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
 844
 845        order = get_order(size);
 846
 847        offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
 848
 849        vb_idx = addr_to_vb_idx((unsigned long)addr);
 850        rcu_read_lock();
 851        vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
 852        rcu_read_unlock();
 853        BUG_ON(!vb);
 854
 855        spin_lock(&vb->lock);
 856        bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
 857
 858        vb->dirty += 1UL << order;
 859        if (vb->dirty == VMAP_BBMAP_BITS) {
 860                BUG_ON(vb->free || !list_empty(&vb->free_list));
 861                spin_unlock(&vb->lock);
 862                free_vmap_block(vb);
 863        } else
 864                spin_unlock(&vb->lock);
 865}
 866
 867/**
 868 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 869 *
 870 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 871 * to amortize TLB flushing overheads. What this means is that any page you
 872 * have now, may, in a former life, have been mapped into kernel virtual
 873 * address by the vmap layer and so there might be some CPUs with TLB entries
 874 * still referencing that page (additional to the regular 1:1 kernel mapping).
 875 *
 876 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 877 * be sure that none of the pages we have control over will have any aliases
 878 * from the vmap layer.
 879 */
 880void vm_unmap_aliases(void)
 881{
 882        unsigned long start = ULONG_MAX, end = 0;
 883        int cpu;
 884        int flush = 0;
 885
 886        if (unlikely(!vmap_initialized))
 887                return;
 888
 889        for_each_possible_cpu(cpu) {
 890                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 891                struct vmap_block *vb;
 892
 893                rcu_read_lock();
 894                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 895                        int i;
 896
 897                        spin_lock(&vb->lock);
 898                        i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
 899                        while (i < VMAP_BBMAP_BITS) {
 900                                unsigned long s, e;
 901                                int j;
 902                                j = find_next_zero_bit(vb->dirty_map,
 903                                        VMAP_BBMAP_BITS, i);
 904
 905                                s = vb->va->va_start + (i << PAGE_SHIFT);
 906                                e = vb->va->va_start + (j << PAGE_SHIFT);
 907                                vunmap_page_range(s, e);
 908                                flush = 1;
 909
 910                                if (s < start)
 911                                        start = s;
 912                                if (e > end)
 913                                        end = e;
 914
 915                                i = j;
 916                                i = find_next_bit(vb->dirty_map,
 917                                                        VMAP_BBMAP_BITS, i);
 918                        }
 919                        spin_unlock(&vb->lock);
 920                }
 921                rcu_read_unlock();
 922        }
 923
 924        __purge_vmap_area_lazy(&start, &end, 1, flush);
 925}
 926EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 927
 928/**
 929 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 930 * @mem: the pointer returned by vm_map_ram
 931 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 932 */
 933void vm_unmap_ram(const void *mem, unsigned int count)
 934{
 935        unsigned long size = count << PAGE_SHIFT;
 936        unsigned long addr = (unsigned long)mem;
 937
 938        BUG_ON(!addr);
 939        BUG_ON(addr < VMALLOC_START);
 940        BUG_ON(addr > VMALLOC_END);
 941        BUG_ON(addr & (PAGE_SIZE-1));
 942
 943        debug_check_no_locks_freed(mem, size);
 944        vmap_debug_free_range(addr, addr+size);
 945
 946        if (likely(count <= VMAP_MAX_ALLOC))
 947                vb_free(mem, size);
 948        else
 949                free_unmap_vmap_area_addr(addr);
 950}
 951EXPORT_SYMBOL(vm_unmap_ram);
 952
 953/**
 954 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 955 * @pages: an array of pointers to the pages to be mapped
 956 * @count: number of pages
 957 * @node: prefer to allocate data structures on this node
 958 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
 959 *
 960 * Returns: a pointer to the address that has been mapped, or %NULL on failure
 961 */
 962void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 963{
 964        unsigned long size = count << PAGE_SHIFT;
 965        unsigned long addr;
 966        void *mem;
 967
 968        if (likely(count <= VMAP_MAX_ALLOC)) {
 969                mem = vb_alloc(size, GFP_KERNEL);
 970                if (IS_ERR(mem))
 971                        return NULL;
 972                addr = (unsigned long)mem;
 973        } else {
 974                struct vmap_area *va;
 975                va = alloc_vmap_area(size, PAGE_SIZE,
 976                                VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
 977                if (IS_ERR(va))
 978                        return NULL;
 979
 980                addr = va->va_start;
 981                mem = (void *)addr;
 982        }
 983        if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
 984                vm_unmap_ram(mem, count);
 985                return NULL;
 986        }
 987        return mem;
 988}
 989EXPORT_SYMBOL(vm_map_ram);
 990
 991/**
 992 * vm_area_register_early - register vmap area early during boot
 993 * @vm: vm_struct to register
 994 * @align: requested alignment
 995 *
 996 * This function is used to register kernel vm area before
 997 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
 998 * proper values on entry and other fields should be zero.  On return,
 999 * vm->addr contains the allocated address.
1000 *
1001 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1002 */
1003void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1004{
1005        static size_t vm_init_off __initdata;
1006        unsigned long addr;
1007
1008        addr = ALIGN(VMALLOC_START + vm_init_off, align);
1009        vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1010
1011        vm->addr = (void *)addr;
1012
1013        vm->next = vmlist;
1014        vmlist = vm;
1015}
1016
1017void __init vmalloc_init(void)
1018{
1019        struct vmap_area *va;
1020        struct vm_struct *tmp;
1021        int i;
1022
1023        for_each_possible_cpu(i) {
1024                struct vmap_block_queue *vbq;
1025
1026                vbq = &per_cpu(vmap_block_queue, i);
1027                spin_lock_init(&vbq->lock);
1028                INIT_LIST_HEAD(&vbq->free);
1029                INIT_LIST_HEAD(&vbq->dirty);
1030                vbq->nr_dirty = 0;
1031        }
1032
1033        /* Import existing vmlist entries. */
1034        for (tmp = vmlist; tmp; tmp = tmp->next) {
1035                va = alloc_bootmem(sizeof(struct vmap_area));
1036                va->flags = tmp->flags | VM_VM_AREA;
1037                va->va_start = (unsigned long)tmp->addr;
1038                va->va_end = va->va_start + tmp->size;
1039                __insert_vmap_area(va);
1040        }
1041        vmap_initialized = true;
1042}
1043
1044/**
1045 * map_kernel_range_noflush - map kernel VM area with the specified pages
1046 * @addr: start of the VM area to map
1047 * @size: size of the VM area to map
1048 * @prot: page protection flags to use
1049 * @pages: pages to map
1050 *
1051 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1052 * specify should have been allocated using get_vm_area() and its
1053 * friends.
1054 *
1055 * NOTE:
1056 * This function does NOT do any cache flushing.  The caller is
1057 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1058 * before calling this function.
1059 *
1060 * RETURNS:
1061 * The number of pages mapped on success, -errno on failure.
1062 */
1063int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1064                             pgprot_t prot, struct page **pages)
1065{
1066        return vmap_page_range_noflush(addr, addr + size, prot, pages);
1067}
1068
1069/**
1070 * unmap_kernel_range_noflush - unmap kernel VM area
1071 * @addr: start of the VM area to unmap
1072 * @size: size of the VM area to unmap
1073 *
1074 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1075 * specify should have been allocated using get_vm_area() and its
1076 * friends.
1077 *
1078 * NOTE:
1079 * This function does NOT do any cache flushing.  The caller is
1080 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1081 * before calling this function and flush_tlb_kernel_range() after.
1082 */
1083void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1084{
1085        vunmap_page_range(addr, addr + size);
1086}
1087
1088/**
1089 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1090 * @addr: start of the VM area to unmap
1091 * @size: size of the VM area to unmap
1092 *
1093 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1094 * the unmapping and tlb after.
1095 */
1096void unmap_kernel_range(unsigned long addr, unsigned long size)
1097{
1098        unsigned long end = addr + size;
1099
1100        flush_cache_vunmap(addr, end);
1101        vunmap_page_range(addr, end);
1102        flush_tlb_kernel_range(addr, end);
1103}
1104
1105int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1106{
1107        unsigned long addr = (unsigned long)area->addr;
1108        unsigned long end = addr + area->size - PAGE_SIZE;
1109        int err;
1110
1111        err = vmap_page_range(addr, end, prot, *pages);
1112        if (err > 0) {
1113                *pages += err;
1114                err = 0;
1115        }
1116
1117        return err;
1118}
1119EXPORT_SYMBOL_GPL(map_vm_area);
1120
1121/*** Old vmalloc interfaces ***/
1122DEFINE_RWLOCK(vmlist_lock);
1123struct vm_struct *vmlist;
1124
1125static struct vm_struct *__get_vm_area_node(unsigned long size,
1126                unsigned long flags, unsigned long start, unsigned long end,
1127                int node, gfp_t gfp_mask, void *caller)
1128{
1129        static struct vmap_area *va;
1130        struct vm_struct *area;
1131        struct vm_struct *tmp, **p;
1132        unsigned long align = 1;
1133
1134        BUG_ON(in_interrupt());
1135        if (flags & VM_IOREMAP) {
1136                int bit = fls(size);
1137
1138                if (bit > IOREMAP_MAX_ORDER)
1139                        bit = IOREMAP_MAX_ORDER;
1140                else if (bit < PAGE_SHIFT)
1141                        bit = PAGE_SHIFT;
1142
1143                align = 1ul << bit;
1144        }
1145
1146        size = PAGE_ALIGN(size);
1147        if (unlikely(!size))
1148                return NULL;
1149
1150        area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1151        if (unlikely(!area))
1152                return NULL;
1153
1154        /*
1155         * We always allocate a guard page.
1156         */
1157        size += PAGE_SIZE;
1158
1159        va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1160        if (IS_ERR(va)) {
1161                kfree(area);
1162                return NULL;
1163        }
1164
1165        area->flags = flags;
1166        area->addr = (void *)va->va_start;
1167        area->size = size;
1168        area->pages = NULL;
1169        area->nr_pages = 0;
1170        area->phys_addr = 0;
1171        area->caller = caller;
1172        va->private = area;
1173        va->flags |= VM_VM_AREA;
1174
1175        write_lock(&vmlist_lock);
1176        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1177                if (tmp->addr >= area->addr)
1178                        break;
1179        }
1180        area->next = *p;
1181        *p = area;
1182        write_unlock(&vmlist_lock);
1183
1184        return area;
1185}
1186
1187struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1188                                unsigned long start, unsigned long end)
1189{
1190        return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1191                                                __builtin_return_address(0));
1192}
1193EXPORT_SYMBOL_GPL(__get_vm_area);
1194
1195struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1196                                       unsigned long start, unsigned long end,
1197                                       void *caller)
1198{
1199        return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
1200                                  caller);
1201}
1202
1203/**
1204 *      get_vm_area  -  reserve a contiguous kernel virtual area
1205 *      @size:          size of the area
1206 *      @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC
1207 *
1208 *      Search an area of @size in the kernel virtual mapping area,
1209 *      and reserved it for out purposes.  Returns the area descriptor
1210 *      on success or %NULL on failure.
1211 */
1212struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1213{
1214        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1215                                -1, GFP_KERNEL, __builtin_return_address(0));
1216}
1217
1218struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1219                                void *caller)
1220{
1221        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
1222                                                -1, GFP_KERNEL, caller);
1223}
1224
1225struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1226                                   int node, gfp_t gfp_mask)
1227{
1228        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
1229                                  gfp_mask, __builtin_return_address(0));
1230}
1231
1232static struct vm_struct *find_vm_area(const void *addr)
1233{
1234        struct vmap_area *va;
1235
1236        va = find_vmap_area((unsigned long)addr);
1237        if (va && va->flags & VM_VM_AREA)
1238                return va->private;
1239
1240        return NULL;
1241}
1242
1243/**
1244 *      remove_vm_area  -  find and remove a continuous kernel virtual area
1245 *      @addr:          base address
1246 *
1247 *      Search for the kernel VM area starting at @addr, and remove it.
1248 *      This function returns the found VM area, but using it is NOT safe
1249 *      on SMP machines, except for its size or flags.
1250 */
1251struct vm_struct *remove_vm_area(const void *addr)
1252{
1253        struct vmap_area *va;
1254
1255        va = find_vmap_area((unsigned long)addr);
1256        if (va && va->flags & VM_VM_AREA) {
1257                struct vm_struct *vm = va->private;
1258                struct vm_struct *tmp, **p;
1259
1260                vmap_debug_free_range(va->va_start, va->va_end);
1261                free_unmap_vmap_area(va);
1262                vm->size -= PAGE_SIZE;
1263
1264                write_lock(&vmlist_lock);
1265                for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
1266                        ;
1267                *p = tmp->next;
1268                write_unlock(&vmlist_lock);
1269
1270                return vm;
1271        }
1272        return NULL;
1273}
1274
1275static void __vunmap(const void *addr, int deallocate_pages)
1276{
1277        struct vm_struct *area;
1278
1279        if (!addr)
1280                return;
1281
1282        if ((PAGE_SIZE-1) & (unsigned long)addr) {
1283                WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
1284                return;
1285        }
1286
1287        area = remove_vm_area(addr);
1288        if (unlikely(!area)) {
1289                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1290                                addr);
1291                return;
1292        }
1293
1294        debug_check_no_locks_freed(addr, area->size);
1295        debug_check_no_obj_freed(addr, area->size);
1296
1297        if (deallocate_pages) {
1298                int i;
1299
1300                for (i = 0; i < area->nr_pages; i++) {
1301                        struct page *page = area->pages[i];
1302
1303                        BUG_ON(!page);
1304                        __free_page(page);
1305                }
1306
1307                if (area->flags & VM_VPAGES)
1308                        vfree(area->pages);
1309                else
1310                        kfree(area->pages);
1311        }
1312
1313        kfree(area);
1314        return;
1315}
1316
1317/**
1318 *      vfree  -  release memory allocated by vmalloc()
1319 *      @addr:          memory base address
1320 *
1321 *      Free the virtually continuous memory area starting at @addr, as
1322 *      obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1323 *      NULL, no operation is performed.
1324 *
1325 *      Must not be called in interrupt context.
1326 */
1327void vfree(const void *addr)
1328{
1329        BUG_ON(in_interrupt());
1330        __vunmap(addr, 1);
1331}
1332EXPORT_SYMBOL(vfree);
1333
1334/**
1335 *      vunmap  -  release virtual mapping obtained by vmap()
1336 *      @addr:          memory base address
1337 *
1338 *      Free the virtually contiguous memory area starting at @addr,
1339 *      which was created from the page array passed to vmap().
1340 *
1341 *      Must not be called in interrupt context.
1342 */
1343void vunmap(const void *addr)
1344{
1345        BUG_ON(in_interrupt());
1346        might_sleep();
1347        __vunmap(addr, 0);
1348}
1349EXPORT_SYMBOL(vunmap);
1350
1351/**
1352 *      vmap  -  map an array of pages into virtually contiguous space
1353 *      @pages:         array of page pointers
1354 *      @count:         number of pages to map
1355 *      @flags:         vm_area->flags
1356 *      @prot:          page protection for the mapping
1357 *
1358 *      Maps @count pages from @pages into contiguous kernel virtual
1359 *      space.
1360 */
1361void *vmap(struct page **pages, unsigned int count,
1362                unsigned long flags, pgprot_t prot)
1363{
1364        struct vm_struct *area;
1365
1366        might_sleep();
1367
1368        if (count > num_physpages)
1369                return NULL;
1370
1371        area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1372                                        __builtin_return_address(0));
1373        if (!area)
1374                return NULL;
1375
1376        if (map_vm_area(area, prot, &pages)) {
1377                vunmap(area->addr);
1378                return NULL;
1379        }
1380
1381        return area->addr;
1382}
1383EXPORT_SYMBOL(vmap);
1384
1385static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1386                            int node, void *caller);
1387static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1388                                 pgprot_t prot, int node, void *caller)
1389{
1390        struct page **pages;
1391        unsigned int nr_pages, array_size, i;
1392
1393        nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1394        array_size = (nr_pages * sizeof(struct page *));
1395
1396        area->nr_pages = nr_pages;
1397        /* Please note that the recursion is strictly bounded. */
1398        if (array_size > PAGE_SIZE) {
1399                pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
1400                                PAGE_KERNEL, node, caller);
1401                area->flags |= VM_VPAGES;
1402        } else {
1403                pages = kmalloc_node(array_size,
1404                                (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1405                                node);
1406        }
1407        area->pages = pages;
1408        area->caller = caller;
1409        if (!area->pages) {
1410                remove_vm_area(area->addr);
1411                kfree(area);
1412                return NULL;
1413        }
1414
1415        for (i = 0; i < area->nr_pages; i++) {
1416                struct page *page;
1417
1418                if (node < 0)
1419                        page = alloc_page(gfp_mask);
1420                else
1421                        page = alloc_pages_node(node, gfp_mask, 0);
1422
1423                if (unlikely(!page)) {
1424                        /* Successfully allocated i pages, free them in __vunmap() */
1425                        area->nr_pages = i;
1426                        goto fail;
1427                }
1428                area->pages[i] = page;
1429        }
1430
1431        if (map_vm_area(area, prot, &pages))
1432                goto fail;
1433        return area->addr;
1434
1435fail:
1436        vfree(area->addr);
1437        return NULL;
1438}
1439
1440void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1441{
1442        return __vmalloc_area_node(area, gfp_mask, prot, -1,
1443                                        __builtin_return_address(0));
1444}
1445
1446/**
1447 *      __vmalloc_node  -  allocate virtually contiguous memory
1448 *      @size:          allocation size
1449 *      @gfp_mask:      flags for the page level allocator
1450 *      @prot:          protection mask for the allocated pages
1451 *      @node:          node to use for allocation or -1
1452 *      @caller:        caller's return address
1453 *
1454 *      Allocate enough pages to cover @size from the page level
1455 *      allocator with @gfp_mask flags.  Map them into contiguous
1456 *      kernel virtual space, using a pagetable protection of @prot.
1457 */
1458static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1459                                                int node, void *caller)
1460{
1461        struct vm_struct *area;
1462
1463        size = PAGE_ALIGN(size);
1464        if (!size || (size >> PAGE_SHIFT) > num_physpages)
1465                return NULL;
1466
1467        area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
1468                                                node, gfp_mask, caller);
1469
1470        if (!area)
1471                return NULL;
1472
1473        return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1474}
1475
1476void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1477{
1478        return __vmalloc_node(size, gfp_mask, prot, -1,
1479                                __builtin_return_address(0));
1480}
1481EXPORT_SYMBOL(__vmalloc);
1482
1483/**
1484 *      vmalloc  -  allocate virtually contiguous memory
1485 *      @size:          allocation size
1486 *      Allocate enough pages to cover @size from the page level
1487 *      allocator and map them into contiguous kernel virtual space.
1488 *
1489 *      For tight control over page level allocator and protection flags
1490 *      use __vmalloc() instead.
1491 */
1492void *vmalloc(unsigned long size)
1493{
1494        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1495                                        -1, __builtin_return_address(0));
1496}
1497EXPORT_SYMBOL(vmalloc);
1498
1499/**
1500 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1501 * @size: allocation size
1502 *
1503 * The resulting memory area is zeroed so it can be mapped to userspace
1504 * without leaking data.
1505 */
1506void *vmalloc_user(unsigned long size)
1507{
1508        struct vm_struct *area;
1509        void *ret;
1510
1511        ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1512                             PAGE_KERNEL, -1, __builtin_return_address(0));
1513        if (ret) {
1514                area = find_vm_area(ret);
1515                area->flags |= VM_USERMAP;
1516        }
1517        return ret;
1518}
1519EXPORT_SYMBOL(vmalloc_user);
1520
1521/**
1522 *      vmalloc_node  -  allocate memory on a specific node
1523 *      @size:          allocation size
1524 *      @node:          numa node
1525 *
1526 *      Allocate enough pages to cover @size from the page level
1527 *      allocator and map them into contiguous kernel virtual space.
1528 *
1529 *      For tight control over page level allocator and protection flags
1530 *      use __vmalloc() instead.
1531 */
1532void *vmalloc_node(unsigned long size, int node)
1533{
1534        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1535                                        node, __builtin_return_address(0));
1536}
1537EXPORT_SYMBOL(vmalloc_node);
1538
1539#ifndef PAGE_KERNEL_EXEC
1540# define PAGE_KERNEL_EXEC PAGE_KERNEL
1541#endif
1542
1543/**
1544 *      vmalloc_exec  -  allocate virtually contiguous, executable memory
1545 *      @size:          allocation size
1546 *
1547 *      Kernel-internal function to allocate enough pages to cover @size
1548 *      the page level allocator and map them into contiguous and
1549 *      executable kernel virtual space.
1550 *
1551 *      For tight control over page level allocator and protection flags
1552 *      use __vmalloc() instead.
1553 */
1554
1555void *vmalloc_exec(unsigned long size)
1556{
1557        return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1558                              -1, __builtin_return_address(0));
1559}
1560
1561#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1562#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1563#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1564#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1565#else
1566#define GFP_VMALLOC32 GFP_KERNEL
1567#endif
1568
1569/**
1570 *      vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1571 *      @size:          allocation size
1572 *
1573 *      Allocate enough 32bit PA addressable pages to cover @size from the
1574 *      page level allocator and map them into contiguous kernel virtual space.
1575 */
1576void *vmalloc_32(unsigned long size)
1577{
1578        return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
1579                              -1, __builtin_return_address(0));
1580}
1581EXPORT_SYMBOL(vmalloc_32);
1582
1583/**
1584 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1585 *      @size:          allocation size
1586 *
1587 * The resulting memory area is 32bit addressable and zeroed so it can be
1588 * mapped to userspace without leaking data.
1589 */
1590void *vmalloc_32_user(unsigned long size)
1591{
1592        struct vm_struct *area;
1593        void *ret;
1594
1595        ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1596                             -1, __builtin_return_address(0));
1597        if (ret) {
1598                area = find_vm_area(ret);
1599                area->flags |= VM_USERMAP;
1600        }
1601        return ret;
1602}
1603EXPORT_SYMBOL(vmalloc_32_user);
1604
1605long vread(char *buf, char *addr, unsigned long count)
1606{
1607        struct vm_struct *tmp;
1608        char *vaddr, *buf_start = buf;
1609        unsigned long n;
1610
1611        /* Don't allow overflow */
1612        if ((unsigned long) addr + count < count)
1613                count = -(unsigned long) addr;
1614
1615        read_lock(&vmlist_lock);
1616        for (tmp = vmlist; tmp; tmp = tmp->next) {
1617                vaddr = (char *) tmp->addr;
1618                if (addr >= vaddr + tmp->size - PAGE_SIZE)
1619                        continue;
1620                while (addr < vaddr) {
1621                        if (count == 0)
1622                                goto finished;
1623                        *buf = '\0';
1624                        buf++;
1625                        addr++;
1626                        count--;
1627                }
1628                n = vaddr + tmp->size - PAGE_SIZE - addr;
1629                do {
1630                        if (count == 0)
1631                                goto finished;
1632                        *buf = *addr;
1633                        buf++;
1634                        addr++;
1635                        count--;
1636                } while (--n > 0);
1637        }
1638finished:
1639        read_unlock(&vmlist_lock);
1640        return buf - buf_start;
1641}
1642
1643long vwrite(char *buf, char *addr, unsigned long count)
1644{
1645        struct vm_struct *tmp;
1646        char *vaddr, *buf_start = buf;
1647        unsigned long n;
1648
1649        /* Don't allow overflow */
1650        if ((unsigned long) addr + count < count)
1651                count = -(unsigned long) addr;
1652
1653        read_lock(&vmlist_lock);
1654        for (tmp = vmlist; tmp; tmp = tmp->next) {
1655                vaddr = (char *) tmp->addr;
1656                if (addr >= vaddr + tmp->size - PAGE_SIZE)
1657                        continue;
1658                while (addr < vaddr) {
1659                        if (count == 0)
1660                                goto finished;
1661                        buf++;
1662                        addr++;
1663                        count--;
1664                }
1665                n = vaddr + tmp->size - PAGE_SIZE - addr;
1666                do {
1667                        if (count == 0)
1668                                goto finished;
1669                        *addr = *buf;
1670                        buf++;
1671                        addr++;
1672                        count--;
1673                } while (--n > 0);
1674        }
1675finished:
1676        read_unlock(&vmlist_lock);
1677        return buf - buf_start;
1678}
1679
1680/**
1681 *      remap_vmalloc_range  -  map vmalloc pages to userspace
1682 *      @vma:           vma to cover (map full range of vma)
1683 *      @addr:          vmalloc memory
1684 *      @pgoff:         number of pages into addr before first page to map
1685 *
1686 *      Returns:        0 for success, -Exxx on failure
1687 *
1688 *      This function checks that addr is a valid vmalloc'ed area, and
1689 *      that it is big enough to cover the vma. Will return failure if
1690 *      that criteria isn't met.
1691 *
1692 *      Similar to remap_pfn_range() (see mm/memory.c)
1693 */
1694int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1695                                                unsigned long pgoff)
1696{
1697        struct vm_struct *area;
1698        unsigned long uaddr = vma->vm_start;
1699        unsigned long usize = vma->vm_end - vma->vm_start;
1700
1701        if ((PAGE_SIZE-1) & (unsigned long)addr)
1702                return -EINVAL;
1703
1704        area = find_vm_area(addr);
1705        if (!area)
1706                return -EINVAL;
1707
1708        if (!(area->flags & VM_USERMAP))
1709                return -EINVAL;
1710
1711        if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
1712                return -EINVAL;
1713
1714        addr += pgoff << PAGE_SHIFT;
1715        do {
1716                struct page *page = vmalloc_to_page(addr);
1717                int ret;
1718
1719                ret = vm_insert_page(vma, uaddr, page);
1720                if (ret)
1721                        return ret;
1722
1723                uaddr += PAGE_SIZE;
1724                addr += PAGE_SIZE;
1725                usize -= PAGE_SIZE;
1726        } while (usize > 0);
1727
1728        /* Prevent "things" like memory migration? VM_flags need a cleanup... */
1729        vma->vm_flags |= VM_RESERVED;
1730
1731        return 0;
1732}
1733EXPORT_SYMBOL(remap_vmalloc_range);
1734
1735/*
1736 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
1737 * have one.
1738 */
1739void  __attribute__((weak)) vmalloc_sync_all(void)
1740{
1741}
1742
1743
1744static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
1745{
1746        /* apply_to_page_range() does all the hard work. */
1747        return 0;
1748}
1749
1750/**
1751 *      alloc_vm_area - allocate a range of kernel address space
1752 *      @size:          size of the area
1753 *
1754 *      Returns:        NULL on failure, vm_struct on success
1755 *
1756 *      This function reserves a range of kernel address space, and
1757 *      allocates pagetables to map that range.  No actual mappings
1758 *      are created.  If the kernel address space is not shared
1759 *      between processes, it syncs the pagetable across all
1760 *      processes.
1761 */
1762struct vm_struct *alloc_vm_area(size_t size)
1763{
1764        struct vm_struct *area;
1765
1766        area = get_vm_area_caller(size, VM_IOREMAP,
1767                                __builtin_return_address(0));
1768        if (area == NULL)
1769                return NULL;
1770
1771        /*
1772         * This ensures that page tables are constructed for this region
1773         * of kernel virtual address space and mapped into init_mm.
1774         */
1775        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
1776                                area->size, f, NULL)) {
1777                free_vm_area(area);
1778                return NULL;
1779        }
1780
1781        /* Make sure the pagetables are constructed in process kernel
1782           mappings */
1783        vmalloc_sync_all();
1784
1785        return area;
1786}
1787EXPORT_SYMBOL_GPL(alloc_vm_area);
1788
1789void free_vm_area(struct vm_struct *area)
1790{
1791        struct vm_struct *ret;
1792        ret = remove_vm_area(area->addr);
1793        BUG_ON(ret != area);
1794        kfree(area);
1795}
1796EXPORT_SYMBOL_GPL(free_vm_area);
1797
1798
1799#ifdef CONFIG_PROC_FS
1800static void *s_start(struct seq_file *m, loff_t *pos)
1801{
1802        loff_t n = *pos;
1803        struct vm_struct *v;
1804
1805        read_lock(&vmlist_lock);
1806        v = vmlist;
1807        while (n > 0 && v) {
1808                n--;
1809                v = v->next;
1810        }
1811        if (!n)
1812                return v;
1813
1814        return NULL;
1815
1816}
1817
1818static void *s_next(struct seq_file *m, void *p, loff_t *pos)
1819{
1820        struct vm_struct *v = p;
1821
1822        ++*pos;
1823        return v->next;
1824}
1825
1826static void s_stop(struct seq_file *m, void *p)
1827{
1828        read_unlock(&vmlist_lock);
1829}
1830
1831static void show_numa_info(struct seq_file *m, struct vm_struct *v)
1832{
1833        if (NUMA_BUILD) {
1834                unsigned int nr, *counters = m->private;
1835
1836                if (!counters)
1837                        return;
1838
1839                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
1840
1841                for (nr = 0; nr < v->nr_pages; nr++)
1842                        counters[page_to_nid(v->pages[nr])]++;
1843
1844                for_each_node_state(nr, N_HIGH_MEMORY)
1845                        if (counters[nr])
1846                                seq_printf(m, " N%u=%u", nr, counters[nr]);
1847        }
1848}
1849
1850static int s_show(struct seq_file *m, void *p)
1851{
1852        struct vm_struct *v = p;
1853
1854        seq_printf(m, "0x%p-0x%p %7ld",
1855                v->addr, v->addr + v->size, v->size);
1856
1857        if (v->caller) {
1858                char buff[KSYM_SYMBOL_LEN];
1859
1860                seq_putc(m, ' ');
1861                sprint_symbol(buff, (unsigned long)v->caller);
1862                seq_puts(m, buff);
1863        }
1864
1865        if (v->nr_pages)
1866                seq_printf(m, " pages=%d", v->nr_pages);
1867
1868        if (v->phys_addr)
1869                seq_printf(m, " phys=%lx", v->phys_addr);
1870
1871        if (v->flags & VM_IOREMAP)
1872                seq_printf(m, " ioremap");
1873
1874        if (v->flags & VM_ALLOC)
1875                seq_printf(m, " vmalloc");
1876
1877        if (v->flags & VM_MAP)
1878                seq_printf(m, " vmap");
1879
1880        if (v->flags & VM_USERMAP)
1881                seq_printf(m, " user");
1882
1883        if (v->flags & VM_VPAGES)
1884                seq_printf(m, " vpages");
1885
1886        show_numa_info(m, v);
1887        seq_putc(m, '\n');
1888        return 0;
1889}
1890
1891static const struct seq_operations vmalloc_op = {
1892        .start = s_start,
1893        .next = s_next,
1894        .stop = s_stop,
1895        .show = s_show,
1896};
1897
1898static int vmalloc_open(struct inode *inode, struct file *file)
1899{
1900        unsigned int *ptr = NULL;
1901        int ret;
1902
1903        if (NUMA_BUILD)
1904                ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
1905        ret = seq_open(file, &vmalloc_op);
1906        if (!ret) {
1907                struct seq_file *m = file->private_data;
1908                m->private = ptr;
1909        } else
1910                kfree(ptr);
1911        return ret;
1912}
1913
1914static const struct file_operations proc_vmalloc_operations = {
1915        .open           = vmalloc_open,
1916        .read           = seq_read,
1917        .llseek         = seq_lseek,
1918        .release        = seq_release_private,
1919};
1920
1921static int __init proc_vmalloc_init(void)
1922{
1923        proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
1924        return 0;
1925}
1926module_init(proc_vmalloc_init);
1927#endif
1928
1929