linux/mm/vmalloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  Copyright (C) 1993  Linus Torvalds
   4 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   5 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   6 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   7 *  Numa awareness, Christoph Lameter, SGI, June 2005
   8 *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/sched/signal.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/set_memory.h>
  22#include <linux/debugobjects.h>
  23#include <linux/kallsyms.h>
  24#include <linux/list.h>
  25#include <linux/notifier.h>
  26#include <linux/rbtree.h>
  27#include <linux/xarray.h>
  28#include <linux/rcupdate.h>
  29#include <linux/pfn.h>
  30#include <linux/kmemleak.h>
  31#include <linux/atomic.h>
  32#include <linux/compiler.h>
  33#include <linux/llist.h>
  34#include <linux/bitops.h>
  35#include <linux/rbtree_augmented.h>
  36#include <linux/overflow.h>
  37#include <linux/pgtable.h>
  38#include <linux/uaccess.h>
  39#include <asm/tlbflush.h>
  40#include <asm/shmparam.h>
  41
  42#include "internal.h"
  43#include "pgalloc-track.h"
  44
  45#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  46static bool __ro_after_init vmap_allow_huge = true;
  47
  48static int __init set_nohugevmalloc(char *str)
  49{
  50        vmap_allow_huge = false;
  51        return 0;
  52}
  53early_param("nohugevmalloc", set_nohugevmalloc);
  54#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  55static const bool vmap_allow_huge = false;
  56#endif  /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  57
  58bool is_vmalloc_addr(const void *x)
  59{
  60        unsigned long addr = (unsigned long)x;
  61
  62        return addr >= VMALLOC_START && addr < VMALLOC_END;
  63}
  64EXPORT_SYMBOL(is_vmalloc_addr);
  65
  66struct vfree_deferred {
  67        struct llist_head list;
  68        struct work_struct wq;
  69};
  70static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  71
  72static void __vunmap(const void *, int);
  73
  74static void free_work(struct work_struct *w)
  75{
  76        struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  77        struct llist_node *t, *llnode;
  78
  79        llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  80                __vunmap((void *)llnode, 1);
  81}
  82
  83/*** Page table manipulation functions ***/
  84static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  85                        phys_addr_t phys_addr, pgprot_t prot,
  86                        pgtbl_mod_mask *mask)
  87{
  88        pte_t *pte;
  89        u64 pfn;
  90
  91        pfn = phys_addr >> PAGE_SHIFT;
  92        pte = pte_alloc_kernel_track(pmd, addr, mask);
  93        if (!pte)
  94                return -ENOMEM;
  95        do {
  96                BUG_ON(!pte_none(*pte));
  97                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
  98                pfn++;
  99        } while (pte++, addr += PAGE_SIZE, addr != end);
 100        *mask |= PGTBL_PTE_MODIFIED;
 101        return 0;
 102}
 103
 104static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
 105                        phys_addr_t phys_addr, pgprot_t prot,
 106                        unsigned int max_page_shift)
 107{
 108        if (max_page_shift < PMD_SHIFT)
 109                return 0;
 110
 111        if (!arch_vmap_pmd_supported(prot))
 112                return 0;
 113
 114        if ((end - addr) != PMD_SIZE)
 115                return 0;
 116
 117        if (!IS_ALIGNED(addr, PMD_SIZE))
 118                return 0;
 119
 120        if (!IS_ALIGNED(phys_addr, PMD_SIZE))
 121                return 0;
 122
 123        if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
 124                return 0;
 125
 126        return pmd_set_huge(pmd, phys_addr, prot);
 127}
 128
 129static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 130                        phys_addr_t phys_addr, pgprot_t prot,
 131                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 132{
 133        pmd_t *pmd;
 134        unsigned long next;
 135
 136        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 137        if (!pmd)
 138                return -ENOMEM;
 139        do {
 140                next = pmd_addr_end(addr, end);
 141
 142                if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
 143                                        max_page_shift)) {
 144                        *mask |= PGTBL_PMD_MODIFIED;
 145                        continue;
 146                }
 147
 148                if (vmap_pte_range(pmd, addr, next, phys_addr, prot, mask))
 149                        return -ENOMEM;
 150        } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
 151        return 0;
 152}
 153
 154static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
 155                        phys_addr_t phys_addr, pgprot_t prot,
 156                        unsigned int max_page_shift)
 157{
 158        if (max_page_shift < PUD_SHIFT)
 159                return 0;
 160
 161        if (!arch_vmap_pud_supported(prot))
 162                return 0;
 163
 164        if ((end - addr) != PUD_SIZE)
 165                return 0;
 166
 167        if (!IS_ALIGNED(addr, PUD_SIZE))
 168                return 0;
 169
 170        if (!IS_ALIGNED(phys_addr, PUD_SIZE))
 171                return 0;
 172
 173        if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
 174                return 0;
 175
 176        return pud_set_huge(pud, phys_addr, prot);
 177}
 178
 179static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 180                        phys_addr_t phys_addr, pgprot_t prot,
 181                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 182{
 183        pud_t *pud;
 184        unsigned long next;
 185
 186        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 187        if (!pud)
 188                return -ENOMEM;
 189        do {
 190                next = pud_addr_end(addr, end);
 191
 192                if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
 193                                        max_page_shift)) {
 194                        *mask |= PGTBL_PUD_MODIFIED;
 195                        continue;
 196                }
 197
 198                if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
 199                                        max_page_shift, mask))
 200                        return -ENOMEM;
 201        } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
 202        return 0;
 203}
 204
 205static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
 206                        phys_addr_t phys_addr, pgprot_t prot,
 207                        unsigned int max_page_shift)
 208{
 209        if (max_page_shift < P4D_SHIFT)
 210                return 0;
 211
 212        if (!arch_vmap_p4d_supported(prot))
 213                return 0;
 214
 215        if ((end - addr) != P4D_SIZE)
 216                return 0;
 217
 218        if (!IS_ALIGNED(addr, P4D_SIZE))
 219                return 0;
 220
 221        if (!IS_ALIGNED(phys_addr, P4D_SIZE))
 222                return 0;
 223
 224        if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
 225                return 0;
 226
 227        return p4d_set_huge(p4d, phys_addr, prot);
 228}
 229
 230static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 231                        phys_addr_t phys_addr, pgprot_t prot,
 232                        unsigned int max_page_shift, pgtbl_mod_mask *mask)
 233{
 234        p4d_t *p4d;
 235        unsigned long next;
 236
 237        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 238        if (!p4d)
 239                return -ENOMEM;
 240        do {
 241                next = p4d_addr_end(addr, end);
 242
 243                if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
 244                                        max_page_shift)) {
 245                        *mask |= PGTBL_P4D_MODIFIED;
 246                        continue;
 247                }
 248
 249                if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
 250                                        max_page_shift, mask))
 251                        return -ENOMEM;
 252        } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
 253        return 0;
 254}
 255
 256static int vmap_range_noflush(unsigned long addr, unsigned long end,
 257                        phys_addr_t phys_addr, pgprot_t prot,
 258                        unsigned int max_page_shift)
 259{
 260        pgd_t *pgd;
 261        unsigned long start;
 262        unsigned long next;
 263        int err;
 264        pgtbl_mod_mask mask = 0;
 265
 266        might_sleep();
 267        BUG_ON(addr >= end);
 268
 269        start = addr;
 270        pgd = pgd_offset_k(addr);
 271        do {
 272                next = pgd_addr_end(addr, end);
 273                err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
 274                                        max_page_shift, &mask);
 275                if (err)
 276                        break;
 277        } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 278
 279        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 280                arch_sync_kernel_mappings(start, end);
 281
 282        return err;
 283}
 284
 285int vmap_range(unsigned long addr, unsigned long end,
 286                        phys_addr_t phys_addr, pgprot_t prot,
 287                        unsigned int max_page_shift)
 288{
 289        int err;
 290
 291        err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
 292        flush_cache_vmap(addr, end);
 293
 294        return err;
 295}
 296
 297static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 298                             pgtbl_mod_mask *mask)
 299{
 300        pte_t *pte;
 301
 302        pte = pte_offset_kernel(pmd, addr);
 303        do {
 304                pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
 305                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
 306        } while (pte++, addr += PAGE_SIZE, addr != end);
 307        *mask |= PGTBL_PTE_MODIFIED;
 308}
 309
 310static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 311                             pgtbl_mod_mask *mask)
 312{
 313        pmd_t *pmd;
 314        unsigned long next;
 315        int cleared;
 316
 317        pmd = pmd_offset(pud, addr);
 318        do {
 319                next = pmd_addr_end(addr, end);
 320
 321                cleared = pmd_clear_huge(pmd);
 322                if (cleared || pmd_bad(*pmd))
 323                        *mask |= PGTBL_PMD_MODIFIED;
 324
 325                if (cleared)
 326                        continue;
 327                if (pmd_none_or_clear_bad(pmd))
 328                        continue;
 329                vunmap_pte_range(pmd, addr, next, mask);
 330
 331                cond_resched();
 332        } while (pmd++, addr = next, addr != end);
 333}
 334
 335static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 336                             pgtbl_mod_mask *mask)
 337{
 338        pud_t *pud;
 339        unsigned long next;
 340        int cleared;
 341
 342        pud = pud_offset(p4d, addr);
 343        do {
 344                next = pud_addr_end(addr, end);
 345
 346                cleared = pud_clear_huge(pud);
 347                if (cleared || pud_bad(*pud))
 348                        *mask |= PGTBL_PUD_MODIFIED;
 349
 350                if (cleared)
 351                        continue;
 352                if (pud_none_or_clear_bad(pud))
 353                        continue;
 354                vunmap_pmd_range(pud, addr, next, mask);
 355        } while (pud++, addr = next, addr != end);
 356}
 357
 358static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 359                             pgtbl_mod_mask *mask)
 360{
 361        p4d_t *p4d;
 362        unsigned long next;
 363        int cleared;
 364
 365        p4d = p4d_offset(pgd, addr);
 366        do {
 367                next = p4d_addr_end(addr, end);
 368
 369                cleared = p4d_clear_huge(p4d);
 370                if (cleared || p4d_bad(*p4d))
 371                        *mask |= PGTBL_P4D_MODIFIED;
 372
 373                if (cleared)
 374                        continue;
 375                if (p4d_none_or_clear_bad(p4d))
 376                        continue;
 377                vunmap_pud_range(p4d, addr, next, mask);
 378        } while (p4d++, addr = next, addr != end);
 379}
 380
 381/*
 382 * vunmap_range_noflush is similar to vunmap_range, but does not
 383 * flush caches or TLBs.
 384 *
 385 * The caller is responsible for calling flush_cache_vmap() before calling
 386 * this function, and flush_tlb_kernel_range after it has returned
 387 * successfully (and before the addresses are expected to cause a page fault
 388 * or be re-mapped for something else, if TLB flushes are being delayed or
 389 * coalesced).
 390 *
 391 * This is an internal function only. Do not use outside mm/.
 392 */
 393void vunmap_range_noflush(unsigned long start, unsigned long end)
 394{
 395        unsigned long next;
 396        pgd_t *pgd;
 397        unsigned long addr = start;
 398        pgtbl_mod_mask mask = 0;
 399
 400        BUG_ON(addr >= end);
 401        pgd = pgd_offset_k(addr);
 402        do {
 403                next = pgd_addr_end(addr, end);
 404                if (pgd_bad(*pgd))
 405                        mask |= PGTBL_PGD_MODIFIED;
 406                if (pgd_none_or_clear_bad(pgd))
 407                        continue;
 408                vunmap_p4d_range(pgd, addr, next, &mask);
 409        } while (pgd++, addr = next, addr != end);
 410
 411        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 412                arch_sync_kernel_mappings(start, end);
 413}
 414
 415/**
 416 * vunmap_range - unmap kernel virtual addresses
 417 * @addr: start of the VM area to unmap
 418 * @end: end of the VM area to unmap (non-inclusive)
 419 *
 420 * Clears any present PTEs in the virtual address range, flushes TLBs and
 421 * caches. Any subsequent access to the address before it has been re-mapped
 422 * is a kernel bug.
 423 */
 424void vunmap_range(unsigned long addr, unsigned long end)
 425{
 426        flush_cache_vunmap(addr, end);
 427        vunmap_range_noflush(addr, end);
 428        flush_tlb_kernel_range(addr, end);
 429}
 430
 431static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
 432                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 433                pgtbl_mod_mask *mask)
 434{
 435        pte_t *pte;
 436
 437        /*
 438         * nr is a running index into the array which helps higher level
 439         * callers keep track of where we're up to.
 440         */
 441
 442        pte = pte_alloc_kernel_track(pmd, addr, mask);
 443        if (!pte)
 444                return -ENOMEM;
 445        do {
 446                struct page *page = pages[*nr];
 447
 448                if (WARN_ON(!pte_none(*pte)))
 449                        return -EBUSY;
 450                if (WARN_ON(!page))
 451                        return -ENOMEM;
 452                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 453                (*nr)++;
 454        } while (pte++, addr += PAGE_SIZE, addr != end);
 455        *mask |= PGTBL_PTE_MODIFIED;
 456        return 0;
 457}
 458
 459static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
 460                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 461                pgtbl_mod_mask *mask)
 462{
 463        pmd_t *pmd;
 464        unsigned long next;
 465
 466        pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 467        if (!pmd)
 468                return -ENOMEM;
 469        do {
 470                next = pmd_addr_end(addr, end);
 471                if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
 472                        return -ENOMEM;
 473        } while (pmd++, addr = next, addr != end);
 474        return 0;
 475}
 476
 477static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
 478                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 479                pgtbl_mod_mask *mask)
 480{
 481        pud_t *pud;
 482        unsigned long next;
 483
 484        pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 485        if (!pud)
 486                return -ENOMEM;
 487        do {
 488                next = pud_addr_end(addr, end);
 489                if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
 490                        return -ENOMEM;
 491        } while (pud++, addr = next, addr != end);
 492        return 0;
 493}
 494
 495static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
 496                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 497                pgtbl_mod_mask *mask)
 498{
 499        p4d_t *p4d;
 500        unsigned long next;
 501
 502        p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 503        if (!p4d)
 504                return -ENOMEM;
 505        do {
 506                next = p4d_addr_end(addr, end);
 507                if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
 508                        return -ENOMEM;
 509        } while (p4d++, addr = next, addr != end);
 510        return 0;
 511}
 512
 513static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
 514                pgprot_t prot, struct page **pages)
 515{
 516        unsigned long start = addr;
 517        pgd_t *pgd;
 518        unsigned long next;
 519        int err = 0;
 520        int nr = 0;
 521        pgtbl_mod_mask mask = 0;
 522
 523        BUG_ON(addr >= end);
 524        pgd = pgd_offset_k(addr);
 525        do {
 526                next = pgd_addr_end(addr, end);
 527                if (pgd_bad(*pgd))
 528                        mask |= PGTBL_PGD_MODIFIED;
 529                err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
 530                if (err)
 531                        return err;
 532        } while (pgd++, addr = next, addr != end);
 533
 534        if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 535                arch_sync_kernel_mappings(start, end);
 536
 537        return 0;
 538}
 539
 540/*
 541 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
 542 * flush caches.
 543 *
 544 * The caller is responsible for calling flush_cache_vmap() after this
 545 * function returns successfully and before the addresses are accessed.
 546 *
 547 * This is an internal function only. Do not use outside mm/.
 548 */
 549int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 550                pgprot_t prot, struct page **pages, unsigned int page_shift)
 551{
 552        unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
 553
 554        WARN_ON(page_shift < PAGE_SHIFT);
 555
 556        if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
 557                        page_shift == PAGE_SHIFT)
 558                return vmap_small_pages_range_noflush(addr, end, prot, pages);
 559
 560        for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
 561                int err;
 562
 563                err = vmap_range_noflush(addr, addr + (1UL << page_shift),
 564                                        __pa(page_address(pages[i])), prot,
 565                                        page_shift);
 566                if (err)
 567                        return err;
 568
 569                addr += 1UL << page_shift;
 570        }
 571
 572        return 0;
 573}
 574
 575/**
 576 * vmap_pages_range - map pages to a kernel virtual address
 577 * @addr: start of the VM area to map
 578 * @end: end of the VM area to map (non-inclusive)
 579 * @prot: page protection flags to use
 580 * @pages: pages to map (always PAGE_SIZE pages)
 581 * @page_shift: maximum shift that the pages may be mapped with, @pages must
 582 * be aligned and contiguous up to at least this shift.
 583 *
 584 * RETURNS:
 585 * 0 on success, -errno on failure.
 586 */
 587static int vmap_pages_range(unsigned long addr, unsigned long end,
 588                pgprot_t prot, struct page **pages, unsigned int page_shift)
 589{
 590        int err;
 591
 592        err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 593        flush_cache_vmap(addr, end);
 594        return err;
 595}
 596
 597int is_vmalloc_or_module_addr(const void *x)
 598{
 599        /*
 600         * ARM, x86-64 and sparc64 put modules in a special place,
 601         * and fall back on vmalloc() if that fails. Others
 602         * just put it in the vmalloc space.
 603         */
 604#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 605        unsigned long addr = (unsigned long)x;
 606        if (addr >= MODULES_VADDR && addr < MODULES_END)
 607                return 1;
 608#endif
 609        return is_vmalloc_addr(x);
 610}
 611
 612/*
 613 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
 614 * return the tail page that corresponds to the base page address, which
 615 * matches small vmap mappings.
 616 */
 617struct page *vmalloc_to_page(const void *vmalloc_addr)
 618{
 619        unsigned long addr = (unsigned long) vmalloc_addr;
 620        struct page *page = NULL;
 621        pgd_t *pgd = pgd_offset_k(addr);
 622        p4d_t *p4d;
 623        pud_t *pud;
 624        pmd_t *pmd;
 625        pte_t *ptep, pte;
 626
 627        /*
 628         * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 629         * architectures that do not vmalloc module space
 630         */
 631        VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 632
 633        if (pgd_none(*pgd))
 634                return NULL;
 635        if (WARN_ON_ONCE(pgd_leaf(*pgd)))
 636                return NULL; /* XXX: no allowance for huge pgd */
 637        if (WARN_ON_ONCE(pgd_bad(*pgd)))
 638                return NULL;
 639
 640        p4d = p4d_offset(pgd, addr);
 641        if (p4d_none(*p4d))
 642                return NULL;
 643        if (p4d_leaf(*p4d))
 644                return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
 645        if (WARN_ON_ONCE(p4d_bad(*p4d)))
 646                return NULL;
 647
 648        pud = pud_offset(p4d, addr);
 649        if (pud_none(*pud))
 650                return NULL;
 651        if (pud_leaf(*pud))
 652                return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 653        if (WARN_ON_ONCE(pud_bad(*pud)))
 654                return NULL;
 655
 656        pmd = pmd_offset(pud, addr);
 657        if (pmd_none(*pmd))
 658                return NULL;
 659        if (pmd_leaf(*pmd))
 660                return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 661        if (WARN_ON_ONCE(pmd_bad(*pmd)))
 662                return NULL;
 663
 664        ptep = pte_offset_map(pmd, addr);
 665        pte = *ptep;
 666        if (pte_present(pte))
 667                page = pte_page(pte);
 668        pte_unmap(ptep);
 669
 670        return page;
 671}
 672EXPORT_SYMBOL(vmalloc_to_page);
 673
 674/*
 675 * Map a vmalloc()-space virtual address to the physical page frame number.
 676 */
 677unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 678{
 679        return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 680}
 681EXPORT_SYMBOL(vmalloc_to_pfn);
 682
 683
 684/*** Global kva allocator ***/
 685
 686#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 687#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 688
 689
 690static DEFINE_SPINLOCK(vmap_area_lock);
 691static DEFINE_SPINLOCK(free_vmap_area_lock);
 692/* Export for kexec only */
 693LIST_HEAD(vmap_area_list);
 694static struct rb_root vmap_area_root = RB_ROOT;
 695static bool vmap_initialized __read_mostly;
 696
 697static struct rb_root purge_vmap_area_root = RB_ROOT;
 698static LIST_HEAD(purge_vmap_area_list);
 699static DEFINE_SPINLOCK(purge_vmap_area_lock);
 700
 701/*
 702 * This kmem_cache is used for vmap_area objects. Instead of
 703 * allocating from slab we reuse an object from this cache to
 704 * make things faster. Especially in "no edge" splitting of
 705 * free block.
 706 */
 707static struct kmem_cache *vmap_area_cachep;
 708
 709/*
 710 * This linked list is used in pair with free_vmap_area_root.
 711 * It gives O(1) access to prev/next to perform fast coalescing.
 712 */
 713static LIST_HEAD(free_vmap_area_list);
 714
 715/*
 716 * This augment red-black tree represents the free vmap space.
 717 * All vmap_area objects in this tree are sorted by va->va_start
 718 * address. It is used for allocation and merging when a vmap
 719 * object is released.
 720 *
 721 * Each vmap_area node contains a maximum available free block
 722 * of its sub-tree, right or left. Therefore it is possible to
 723 * find a lowest match of free area.
 724 */
 725static struct rb_root free_vmap_area_root = RB_ROOT;
 726
 727/*
 728 * Preload a CPU with one object for "no edge" split case. The
 729 * aim is to get rid of allocations from the atomic context, thus
 730 * to use more permissive allocation masks.
 731 */
 732static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 733
 734static __always_inline unsigned long
 735va_size(struct vmap_area *va)
 736{
 737        return (va->va_end - va->va_start);
 738}
 739
 740static __always_inline unsigned long
 741get_subtree_max_size(struct rb_node *node)
 742{
 743        struct vmap_area *va;
 744
 745        va = rb_entry_safe(node, struct vmap_area, rb_node);
 746        return va ? va->subtree_max_size : 0;
 747}
 748
 749/*
 750 * Gets called when remove the node and rotate.
 751 */
 752static __always_inline unsigned long
 753compute_subtree_max_size(struct vmap_area *va)
 754{
 755        return max3(va_size(va),
 756                get_subtree_max_size(va->rb_node.rb_left),
 757                get_subtree_max_size(va->rb_node.rb_right));
 758}
 759
 760RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 761        struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 762
 763static void purge_vmap_area_lazy(void);
 764static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 765static unsigned long lazy_max_pages(void);
 766
 767static atomic_long_t nr_vmalloc_pages;
 768
 769unsigned long vmalloc_nr_pages(void)
 770{
 771        return atomic_long_read(&nr_vmalloc_pages);
 772}
 773
 774static struct vmap_area *__find_vmap_area(unsigned long addr)
 775{
 776        struct rb_node *n = vmap_area_root.rb_node;
 777
 778        while (n) {
 779                struct vmap_area *va;
 780
 781                va = rb_entry(n, struct vmap_area, rb_node);
 782                if (addr < va->va_start)
 783                        n = n->rb_left;
 784                else if (addr >= va->va_end)
 785                        n = n->rb_right;
 786                else
 787                        return va;
 788        }
 789
 790        return NULL;
 791}
 792
 793/*
 794 * This function returns back addresses of parent node
 795 * and its left or right link for further processing.
 796 *
 797 * Otherwise NULL is returned. In that case all further
 798 * steps regarding inserting of conflicting overlap range
 799 * have to be declined and actually considered as a bug.
 800 */
 801static __always_inline struct rb_node **
 802find_va_links(struct vmap_area *va,
 803        struct rb_root *root, struct rb_node *from,
 804        struct rb_node **parent)
 805{
 806        struct vmap_area *tmp_va;
 807        struct rb_node **link;
 808
 809        if (root) {
 810                link = &root->rb_node;
 811                if (unlikely(!*link)) {
 812                        *parent = NULL;
 813                        return link;
 814                }
 815        } else {
 816                link = &from;
 817        }
 818
 819        /*
 820         * Go to the bottom of the tree. When we hit the last point
 821         * we end up with parent rb_node and correct direction, i name
 822         * it link, where the new va->rb_node will be attached to.
 823         */
 824        do {
 825                tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 826
 827                /*
 828                 * During the traversal we also do some sanity check.
 829                 * Trigger the BUG() if there are sides(left/right)
 830                 * or full overlaps.
 831                 */
 832                if (va->va_start < tmp_va->va_end &&
 833                                va->va_end <= tmp_va->va_start)
 834                        link = &(*link)->rb_left;
 835                else if (va->va_end > tmp_va->va_start &&
 836                                va->va_start >= tmp_va->va_end)
 837                        link = &(*link)->rb_right;
 838                else {
 839                        WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
 840                                va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
 841
 842                        return NULL;
 843                }
 844        } while (*link);
 845
 846        *parent = &tmp_va->rb_node;
 847        return link;
 848}
 849
 850static __always_inline struct list_head *
 851get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 852{
 853        struct list_head *list;
 854
 855        if (unlikely(!parent))
 856                /*
 857                 * The red-black tree where we try to find VA neighbors
 858                 * before merging or inserting is empty, i.e. it means
 859                 * there is no free vmap space. Normally it does not
 860                 * happen but we handle this case anyway.
 861                 */
 862                return NULL;
 863
 864        list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 865        return (&parent->rb_right == link ? list->next : list);
 866}
 867
 868static __always_inline void
 869link_va(struct vmap_area *va, struct rb_root *root,
 870        struct rb_node *parent, struct rb_node **link, struct list_head *head)
 871{
 872        /*
 873         * VA is still not in the list, but we can
 874         * identify its future previous list_head node.
 875         */
 876        if (likely(parent)) {
 877                head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 878                if (&parent->rb_right != link)
 879                        head = head->prev;
 880        }
 881
 882        /* Insert to the rb-tree */
 883        rb_link_node(&va->rb_node, parent, link);
 884        if (root == &free_vmap_area_root) {
 885                /*
 886                 * Some explanation here. Just perform simple insertion
 887                 * to the tree. We do not set va->subtree_max_size to
 888                 * its current size before calling rb_insert_augmented().
 889                 * It is because of we populate the tree from the bottom
 890                 * to parent levels when the node _is_ in the tree.
 891                 *
 892                 * Therefore we set subtree_max_size to zero after insertion,
 893                 * to let __augment_tree_propagate_from() puts everything to
 894                 * the correct order later on.
 895                 */
 896                rb_insert_augmented(&va->rb_node,
 897                        root, &free_vmap_area_rb_augment_cb);
 898                va->subtree_max_size = 0;
 899        } else {
 900                rb_insert_color(&va->rb_node, root);
 901        }
 902
 903        /* Address-sort this list */
 904        list_add(&va->list, head);
 905}
 906
 907static __always_inline void
 908unlink_va(struct vmap_area *va, struct rb_root *root)
 909{
 910        if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 911                return;
 912
 913        if (root == &free_vmap_area_root)
 914                rb_erase_augmented(&va->rb_node,
 915                        root, &free_vmap_area_rb_augment_cb);
 916        else
 917                rb_erase(&va->rb_node, root);
 918
 919        list_del(&va->list);
 920        RB_CLEAR_NODE(&va->rb_node);
 921}
 922
 923#if DEBUG_AUGMENT_PROPAGATE_CHECK
 924static void
 925augment_tree_propagate_check(void)
 926{
 927        struct vmap_area *va;
 928        unsigned long computed_size;
 929
 930        list_for_each_entry(va, &free_vmap_area_list, list) {
 931                computed_size = compute_subtree_max_size(va);
 932                if (computed_size != va->subtree_max_size)
 933                        pr_emerg("tree is corrupted: %lu, %lu\n",
 934                                va_size(va), va->subtree_max_size);
 935        }
 936}
 937#endif
 938
 939/*
 940 * This function populates subtree_max_size from bottom to upper
 941 * levels starting from VA point. The propagation must be done
 942 * when VA size is modified by changing its va_start/va_end. Or
 943 * in case of newly inserting of VA to the tree.
 944 *
 945 * It means that __augment_tree_propagate_from() must be called:
 946 * - After VA has been inserted to the tree(free path);
 947 * - After VA has been shrunk(allocation path);
 948 * - After VA has been increased(merging path).
 949 *
 950 * Please note that, it does not mean that upper parent nodes
 951 * and their subtree_max_size are recalculated all the time up
 952 * to the root node.
 953 *
 954 *       4--8
 955 *        /\
 956 *       /  \
 957 *      /    \
 958 *    2--2  8--8
 959 *
 960 * For example if we modify the node 4, shrinking it to 2, then
 961 * no any modification is required. If we shrink the node 2 to 1
 962 * its subtree_max_size is updated only, and set to 1. If we shrink
 963 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
 964 * node becomes 4--6.
 965 */
 966static __always_inline void
 967augment_tree_propagate_from(struct vmap_area *va)
 968{
 969        /*
 970         * Populate the tree from bottom towards the root until
 971         * the calculated maximum available size of checked node
 972         * is equal to its current one.
 973         */
 974        free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
 975
 976#if DEBUG_AUGMENT_PROPAGATE_CHECK
 977        augment_tree_propagate_check();
 978#endif
 979}
 980
 981static void
 982insert_vmap_area(struct vmap_area *va,
 983        struct rb_root *root, struct list_head *head)
 984{
 985        struct rb_node **link;
 986        struct rb_node *parent;
 987
 988        link = find_va_links(va, root, NULL, &parent);
 989        if (link)
 990                link_va(va, root, parent, link, head);
 991}
 992
 993static void
 994insert_vmap_area_augment(struct vmap_area *va,
 995        struct rb_node *from, struct rb_root *root,
 996        struct list_head *head)
 997{
 998        struct rb_node **link;
 999        struct rb_node *parent;
1000
1001        if (from)
1002                link = find_va_links(va, NULL, from, &parent);
1003        else
1004                link = find_va_links(va, root, NULL, &parent);
1005
1006        if (link) {
1007                link_va(va, root, parent, link, head);
1008                augment_tree_propagate_from(va);
1009        }
1010}
1011
1012/*
1013 * Merge de-allocated chunk of VA memory with previous
1014 * and next free blocks. If coalesce is not done a new
1015 * free area is inserted. If VA has been merged, it is
1016 * freed.
1017 *
1018 * Please note, it can return NULL in case of overlap
1019 * ranges, followed by WARN() report. Despite it is a
1020 * buggy behaviour, a system can be alive and keep
1021 * ongoing.
1022 */
1023static __always_inline struct vmap_area *
1024merge_or_add_vmap_area(struct vmap_area *va,
1025        struct rb_root *root, struct list_head *head)
1026{
1027        struct vmap_area *sibling;
1028        struct list_head *next;
1029        struct rb_node **link;
1030        struct rb_node *parent;
1031        bool merged = false;
1032
1033        /*
1034         * Find a place in the tree where VA potentially will be
1035         * inserted, unless it is merged with its sibling/siblings.
1036         */
1037        link = find_va_links(va, root, NULL, &parent);
1038        if (!link)
1039                return NULL;
1040
1041        /*
1042         * Get next node of VA to check if merging can be done.
1043         */
1044        next = get_va_next_sibling(parent, link);
1045        if (unlikely(next == NULL))
1046                goto insert;
1047
1048        /*
1049         * start            end
1050         * |                |
1051         * |<------VA------>|<-----Next----->|
1052         *                  |                |
1053         *                  start            end
1054         */
1055        if (next != head) {
1056                sibling = list_entry(next, struct vmap_area, list);
1057                if (sibling->va_start == va->va_end) {
1058                        sibling->va_start = va->va_start;
1059
1060                        /* Free vmap_area object. */
1061                        kmem_cache_free(vmap_area_cachep, va);
1062
1063                        /* Point to the new merged area. */
1064                        va = sibling;
1065                        merged = true;
1066                }
1067        }
1068
1069        /*
1070         * start            end
1071         * |                |
1072         * |<-----Prev----->|<------VA------>|
1073         *                  |                |
1074         *                  start            end
1075         */
1076        if (next->prev != head) {
1077                sibling = list_entry(next->prev, struct vmap_area, list);
1078                if (sibling->va_end == va->va_start) {
1079                        /*
1080                         * If both neighbors are coalesced, it is important
1081                         * to unlink the "next" node first, followed by merging
1082                         * with "previous" one. Otherwise the tree might not be
1083                         * fully populated if a sibling's augmented value is
1084                         * "normalized" because of rotation operations.
1085                         */
1086                        if (merged)
1087                                unlink_va(va, root);
1088
1089                        sibling->va_end = va->va_end;
1090
1091                        /* Free vmap_area object. */
1092                        kmem_cache_free(vmap_area_cachep, va);
1093
1094                        /* Point to the new merged area. */
1095                        va = sibling;
1096                        merged = true;
1097                }
1098        }
1099
1100insert:
1101        if (!merged)
1102                link_va(va, root, parent, link, head);
1103
1104        return va;
1105}
1106
1107static __always_inline struct vmap_area *
1108merge_or_add_vmap_area_augment(struct vmap_area *va,
1109        struct rb_root *root, struct list_head *head)
1110{
1111        va = merge_or_add_vmap_area(va, root, head);
1112        if (va)
1113                augment_tree_propagate_from(va);
1114
1115        return va;
1116}
1117
1118static __always_inline bool
1119is_within_this_va(struct vmap_area *va, unsigned long size,
1120        unsigned long align, unsigned long vstart)
1121{
1122        unsigned long nva_start_addr;
1123
1124        if (va->va_start > vstart)
1125                nva_start_addr = ALIGN(va->va_start, align);
1126        else
1127                nva_start_addr = ALIGN(vstart, align);
1128
1129        /* Can be overflowed due to big size or alignment. */
1130        if (nva_start_addr + size < nva_start_addr ||
1131                        nva_start_addr < vstart)
1132                return false;
1133
1134        return (nva_start_addr + size <= va->va_end);
1135}
1136
1137/*
1138 * Find the first free block(lowest start address) in the tree,
1139 * that will accomplish the request corresponding to passing
1140 * parameters.
1141 */
1142static __always_inline struct vmap_area *
1143find_vmap_lowest_match(unsigned long size,
1144        unsigned long align, unsigned long vstart)
1145{
1146        struct vmap_area *va;
1147        struct rb_node *node;
1148        unsigned long length;
1149
1150        /* Start from the root. */
1151        node = free_vmap_area_root.rb_node;
1152
1153        /* Adjust the search size for alignment overhead. */
1154        length = size + align - 1;
1155
1156        while (node) {
1157                va = rb_entry(node, struct vmap_area, rb_node);
1158
1159                if (get_subtree_max_size(node->rb_left) >= length &&
1160                                vstart < va->va_start) {
1161                        node = node->rb_left;
1162                } else {
1163                        if (is_within_this_va(va, size, align, vstart))
1164                                return va;
1165
1166                        /*
1167                         * Does not make sense to go deeper towards the right
1168                         * sub-tree if it does not have a free block that is
1169                         * equal or bigger to the requested search length.
1170                         */
1171                        if (get_subtree_max_size(node->rb_right) >= length) {
1172                                node = node->rb_right;
1173                                continue;
1174                        }
1175
1176                        /*
1177                         * OK. We roll back and find the first right sub-tree,
1178                         * that will satisfy the search criteria. It can happen
1179                         * only once due to "vstart" restriction.
1180                         */
1181                        while ((node = rb_parent(node))) {
1182                                va = rb_entry(node, struct vmap_area, rb_node);
1183                                if (is_within_this_va(va, size, align, vstart))
1184                                        return va;
1185
1186                                if (get_subtree_max_size(node->rb_right) >= length &&
1187                                                vstart <= va->va_start) {
1188                                        node = node->rb_right;
1189                                        break;
1190                                }
1191                        }
1192                }
1193        }
1194
1195        return NULL;
1196}
1197
1198#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1199#include <linux/random.h>
1200
1201static struct vmap_area *
1202find_vmap_lowest_linear_match(unsigned long size,
1203        unsigned long align, unsigned long vstart)
1204{
1205        struct vmap_area *va;
1206
1207        list_for_each_entry(va, &free_vmap_area_list, list) {
1208                if (!is_within_this_va(va, size, align, vstart))
1209                        continue;
1210
1211                return va;
1212        }
1213
1214        return NULL;
1215}
1216
1217static void
1218find_vmap_lowest_match_check(unsigned long size)
1219{
1220        struct vmap_area *va_1, *va_2;
1221        unsigned long vstart;
1222        unsigned int rnd;
1223
1224        get_random_bytes(&rnd, sizeof(rnd));
1225        vstart = VMALLOC_START + rnd;
1226
1227        va_1 = find_vmap_lowest_match(size, 1, vstart);
1228        va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1229
1230        if (va_1 != va_2)
1231                pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1232                        va_1, va_2, vstart);
1233}
1234#endif
1235
1236enum fit_type {
1237        NOTHING_FIT = 0,
1238        FL_FIT_TYPE = 1,        /* full fit */
1239        LE_FIT_TYPE = 2,        /* left edge fit */
1240        RE_FIT_TYPE = 3,        /* right edge fit */
1241        NE_FIT_TYPE = 4         /* no edge fit */
1242};
1243
1244static __always_inline enum fit_type
1245classify_va_fit_type(struct vmap_area *va,
1246        unsigned long nva_start_addr, unsigned long size)
1247{
1248        enum fit_type type;
1249
1250        /* Check if it is within VA. */
1251        if (nva_start_addr < va->va_start ||
1252                        nva_start_addr + size > va->va_end)
1253                return NOTHING_FIT;
1254
1255        /* Now classify. */
1256        if (va->va_start == nva_start_addr) {
1257                if (va->va_end == nva_start_addr + size)
1258                        type = FL_FIT_TYPE;
1259                else
1260                        type = LE_FIT_TYPE;
1261        } else if (va->va_end == nva_start_addr + size) {
1262                type = RE_FIT_TYPE;
1263        } else {
1264                type = NE_FIT_TYPE;
1265        }
1266
1267        return type;
1268}
1269
1270static __always_inline int
1271adjust_va_to_fit_type(struct vmap_area *va,
1272        unsigned long nva_start_addr, unsigned long size,
1273        enum fit_type type)
1274{
1275        struct vmap_area *lva = NULL;
1276
1277        if (type == FL_FIT_TYPE) {
1278                /*
1279                 * No need to split VA, it fully fits.
1280                 *
1281                 * |               |
1282                 * V      NVA      V
1283                 * |---------------|
1284                 */
1285                unlink_va(va, &free_vmap_area_root);
1286                kmem_cache_free(vmap_area_cachep, va);
1287        } else if (type == LE_FIT_TYPE) {
1288                /*
1289                 * Split left edge of fit VA.
1290                 *
1291                 * |       |
1292                 * V  NVA  V   R
1293                 * |-------|-------|
1294                 */
1295                va->va_start += size;
1296        } else if (type == RE_FIT_TYPE) {
1297                /*
1298                 * Split right edge of fit VA.
1299                 *
1300                 *         |       |
1301                 *     L   V  NVA  V
1302                 * |-------|-------|
1303                 */
1304                va->va_end = nva_start_addr;
1305        } else if (type == NE_FIT_TYPE) {
1306                /*
1307                 * Split no edge of fit VA.
1308                 *
1309                 *     |       |
1310                 *   L V  NVA  V R
1311                 * |---|-------|---|
1312                 */
1313                lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1314                if (unlikely(!lva)) {
1315                        /*
1316                         * For percpu allocator we do not do any pre-allocation
1317                         * and leave it as it is. The reason is it most likely
1318                         * never ends up with NE_FIT_TYPE splitting. In case of
1319                         * percpu allocations offsets and sizes are aligned to
1320                         * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1321                         * are its main fitting cases.
1322                         *
1323                         * There are a few exceptions though, as an example it is
1324                         * a first allocation (early boot up) when we have "one"
1325                         * big free space that has to be split.
1326                         *
1327                         * Also we can hit this path in case of regular "vmap"
1328                         * allocations, if "this" current CPU was not preloaded.
1329                         * See the comment in alloc_vmap_area() why. If so, then
1330                         * GFP_NOWAIT is used instead to get an extra object for
1331                         * split purpose. That is rare and most time does not
1332                         * occur.
1333                         *
1334                         * What happens if an allocation gets failed. Basically,
1335                         * an "overflow" path is triggered to purge lazily freed
1336                         * areas to free some memory, then, the "retry" path is
1337                         * triggered to repeat one more time. See more details
1338                         * in alloc_vmap_area() function.
1339                         */
1340                        lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1341                        if (!lva)
1342                                return -1;
1343                }
1344
1345                /*
1346                 * Build the remainder.
1347                 */
1348                lva->va_start = va->va_start;
1349                lva->va_end = nva_start_addr;
1350
1351                /*
1352                 * Shrink this VA to remaining size.
1353                 */
1354                va->va_start = nva_start_addr + size;
1355        } else {
1356                return -1;
1357        }
1358
1359        if (type != FL_FIT_TYPE) {
1360                augment_tree_propagate_from(va);
1361
1362                if (lva)        /* type == NE_FIT_TYPE */
1363                        insert_vmap_area_augment(lva, &va->rb_node,
1364                                &free_vmap_area_root, &free_vmap_area_list);
1365        }
1366
1367        return 0;
1368}
1369
1370/*
1371 * Returns a start address of the newly allocated area, if success.
1372 * Otherwise a vend is returned that indicates failure.
1373 */
1374static __always_inline unsigned long
1375__alloc_vmap_area(unsigned long size, unsigned long align,
1376        unsigned long vstart, unsigned long vend)
1377{
1378        unsigned long nva_start_addr;
1379        struct vmap_area *va;
1380        enum fit_type type;
1381        int ret;
1382
1383        va = find_vmap_lowest_match(size, align, vstart);
1384        if (unlikely(!va))
1385                return vend;
1386
1387        if (va->va_start > vstart)
1388                nva_start_addr = ALIGN(va->va_start, align);
1389        else
1390                nva_start_addr = ALIGN(vstart, align);
1391
1392        /* Check the "vend" restriction. */
1393        if (nva_start_addr + size > vend)
1394                return vend;
1395
1396        /* Classify what we have found. */
1397        type = classify_va_fit_type(va, nva_start_addr, size);
1398        if (WARN_ON_ONCE(type == NOTHING_FIT))
1399                return vend;
1400
1401        /* Update the free vmap_area. */
1402        ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1403        if (ret)
1404                return vend;
1405
1406#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1407        find_vmap_lowest_match_check(size);
1408#endif
1409
1410        return nva_start_addr;
1411}
1412
1413/*
1414 * Free a region of KVA allocated by alloc_vmap_area
1415 */
1416static void free_vmap_area(struct vmap_area *va)
1417{
1418        /*
1419         * Remove from the busy tree/list.
1420         */
1421        spin_lock(&vmap_area_lock);
1422        unlink_va(va, &vmap_area_root);
1423        spin_unlock(&vmap_area_lock);
1424
1425        /*
1426         * Insert/Merge it back to the free tree/list.
1427         */
1428        spin_lock(&free_vmap_area_lock);
1429        merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1430        spin_unlock(&free_vmap_area_lock);
1431}
1432
1433static inline void
1434preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1435{
1436        struct vmap_area *va = NULL;
1437
1438        /*
1439         * Preload this CPU with one extra vmap_area object. It is used
1440         * when fit type of free area is NE_FIT_TYPE. It guarantees that
1441         * a CPU that does an allocation is preloaded.
1442         *
1443         * We do it in non-atomic context, thus it allows us to use more
1444         * permissive allocation masks to be more stable under low memory
1445         * condition and high memory pressure.
1446         */
1447        if (!this_cpu_read(ne_fit_preload_node))
1448                va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1449
1450        spin_lock(lock);
1451
1452        if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1453                kmem_cache_free(vmap_area_cachep, va);
1454}
1455
1456/*
1457 * Allocate a region of KVA of the specified size and alignment, within the
1458 * vstart and vend.
1459 */
1460static struct vmap_area *alloc_vmap_area(unsigned long size,
1461                                unsigned long align,
1462                                unsigned long vstart, unsigned long vend,
1463                                int node, gfp_t gfp_mask)
1464{
1465        struct vmap_area *va;
1466        unsigned long addr;
1467        int purged = 0;
1468        int ret;
1469
1470        BUG_ON(!size);
1471        BUG_ON(offset_in_page(size));
1472        BUG_ON(!is_power_of_2(align));
1473
1474        if (unlikely(!vmap_initialized))
1475                return ERR_PTR(-EBUSY);
1476
1477        might_sleep();
1478        gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1479
1480        va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1481        if (unlikely(!va))
1482                return ERR_PTR(-ENOMEM);
1483
1484        /*
1485         * Only scan the relevant parts containing pointers to other objects
1486         * to avoid false negatives.
1487         */
1488        kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1489
1490retry:
1491        preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1492        addr = __alloc_vmap_area(size, align, vstart, vend);
1493        spin_unlock(&free_vmap_area_lock);
1494
1495        /*
1496         * If an allocation fails, the "vend" address is
1497         * returned. Therefore trigger the overflow path.
1498         */
1499        if (unlikely(addr == vend))
1500                goto overflow;
1501
1502        va->va_start = addr;
1503        va->va_end = addr + size;
1504        va->vm = NULL;
1505
1506        spin_lock(&vmap_area_lock);
1507        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1508        spin_unlock(&vmap_area_lock);
1509
1510        BUG_ON(!IS_ALIGNED(va->va_start, align));
1511        BUG_ON(va->va_start < vstart);
1512        BUG_ON(va->va_end > vend);
1513
1514        ret = kasan_populate_vmalloc(addr, size);
1515        if (ret) {
1516                free_vmap_area(va);
1517                return ERR_PTR(ret);
1518        }
1519
1520        return va;
1521
1522overflow:
1523        if (!purged) {
1524                purge_vmap_area_lazy();
1525                purged = 1;
1526                goto retry;
1527        }
1528
1529        if (gfpflags_allow_blocking(gfp_mask)) {
1530                unsigned long freed = 0;
1531                blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1532                if (freed > 0) {
1533                        purged = 0;
1534                        goto retry;
1535                }
1536        }
1537
1538        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1539                pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1540                        size);
1541
1542        kmem_cache_free(vmap_area_cachep, va);
1543        return ERR_PTR(-EBUSY);
1544}
1545
1546int register_vmap_purge_notifier(struct notifier_block *nb)
1547{
1548        return blocking_notifier_chain_register(&vmap_notify_list, nb);
1549}
1550EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1551
1552int unregister_vmap_purge_notifier(struct notifier_block *nb)
1553{
1554        return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1555}
1556EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1557
1558/*
1559 * lazy_max_pages is the maximum amount of virtual address space we gather up
1560 * before attempting to purge with a TLB flush.
1561 *
1562 * There is a tradeoff here: a larger number will cover more kernel page tables
1563 * and take slightly longer to purge, but it will linearly reduce the number of
1564 * global TLB flushes that must be performed. It would seem natural to scale
1565 * this number up linearly with the number of CPUs (because vmapping activity
1566 * could also scale linearly with the number of CPUs), however it is likely
1567 * that in practice, workloads might be constrained in other ways that mean
1568 * vmap activity will not scale linearly with CPUs. Also, I want to be
1569 * conservative and not introduce a big latency on huge systems, so go with
1570 * a less aggressive log scale. It will still be an improvement over the old
1571 * code, and it will be simple to change the scale factor if we find that it
1572 * becomes a problem on bigger systems.
1573 */
1574static unsigned long lazy_max_pages(void)
1575{
1576        unsigned int log;
1577
1578        log = fls(num_online_cpus());
1579
1580        return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1581}
1582
1583static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1584
1585/*
1586 * Serialize vmap purging.  There is no actual critical section protected
1587 * by this look, but we want to avoid concurrent calls for performance
1588 * reasons and to make the pcpu_get_vm_areas more deterministic.
1589 */
1590static DEFINE_MUTEX(vmap_purge_lock);
1591
1592/* for per-CPU blocks */
1593static void purge_fragmented_blocks_allcpus(void);
1594
1595/*
1596 * called before a call to iounmap() if the caller wants vm_area_struct's
1597 * immediately freed.
1598 */
1599void set_iounmap_nonlazy(void)
1600{
1601        atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1602}
1603
1604/*
1605 * Purges all lazily-freed vmap areas.
1606 */
1607static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1608{
1609        unsigned long resched_threshold;
1610        struct list_head local_pure_list;
1611        struct vmap_area *va, *n_va;
1612
1613        lockdep_assert_held(&vmap_purge_lock);
1614
1615        spin_lock(&purge_vmap_area_lock);
1616        purge_vmap_area_root = RB_ROOT;
1617        list_replace_init(&purge_vmap_area_list, &local_pure_list);
1618        spin_unlock(&purge_vmap_area_lock);
1619
1620        if (unlikely(list_empty(&local_pure_list)))
1621                return false;
1622
1623        start = min(start,
1624                list_first_entry(&local_pure_list,
1625                        struct vmap_area, list)->va_start);
1626
1627        end = max(end,
1628                list_last_entry(&local_pure_list,
1629                        struct vmap_area, list)->va_end);
1630
1631        flush_tlb_kernel_range(start, end);
1632        resched_threshold = lazy_max_pages() << 1;
1633
1634        spin_lock(&free_vmap_area_lock);
1635        list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1636                unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1637                unsigned long orig_start = va->va_start;
1638                unsigned long orig_end = va->va_end;
1639
1640                /*
1641                 * Finally insert or merge lazily-freed area. It is
1642                 * detached and there is no need to "unlink" it from
1643                 * anything.
1644                 */
1645                va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1646                                &free_vmap_area_list);
1647
1648                if (!va)
1649                        continue;
1650
1651                if (is_vmalloc_or_module_addr((void *)orig_start))
1652                        kasan_release_vmalloc(orig_start, orig_end,
1653                                              va->va_start, va->va_end);
1654
1655                atomic_long_sub(nr, &vmap_lazy_nr);
1656
1657                if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1658                        cond_resched_lock(&free_vmap_area_lock);
1659        }
1660        spin_unlock(&free_vmap_area_lock);
1661        return true;
1662}
1663
1664/*
1665 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1666 * is already purging.
1667 */
1668static void try_purge_vmap_area_lazy(void)
1669{
1670        if (mutex_trylock(&vmap_purge_lock)) {
1671                __purge_vmap_area_lazy(ULONG_MAX, 0);
1672                mutex_unlock(&vmap_purge_lock);
1673        }
1674}
1675
1676/*
1677 * Kick off a purge of the outstanding lazy areas.
1678 */
1679static void purge_vmap_area_lazy(void)
1680{
1681        mutex_lock(&vmap_purge_lock);
1682        purge_fragmented_blocks_allcpus();
1683        __purge_vmap_area_lazy(ULONG_MAX, 0);
1684        mutex_unlock(&vmap_purge_lock);
1685}
1686
1687/*
1688 * Free a vmap area, caller ensuring that the area has been unmapped
1689 * and flush_cache_vunmap had been called for the correct range
1690 * previously.
1691 */
1692static void free_vmap_area_noflush(struct vmap_area *va)
1693{
1694        unsigned long nr_lazy;
1695
1696        spin_lock(&vmap_area_lock);
1697        unlink_va(va, &vmap_area_root);
1698        spin_unlock(&vmap_area_lock);
1699
1700        nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1701                                PAGE_SHIFT, &vmap_lazy_nr);
1702
1703        /*
1704         * Merge or place it to the purge tree/list.
1705         */
1706        spin_lock(&purge_vmap_area_lock);
1707        merge_or_add_vmap_area(va,
1708                &purge_vmap_area_root, &purge_vmap_area_list);
1709        spin_unlock(&purge_vmap_area_lock);
1710
1711        /* After this point, we may free va at any time */
1712        if (unlikely(nr_lazy > lazy_max_pages()))
1713                try_purge_vmap_area_lazy();
1714}
1715
1716/*
1717 * Free and unmap a vmap area
1718 */
1719static void free_unmap_vmap_area(struct vmap_area *va)
1720{
1721        flush_cache_vunmap(va->va_start, va->va_end);
1722        vunmap_range_noflush(va->va_start, va->va_end);
1723        if (debug_pagealloc_enabled_static())
1724                flush_tlb_kernel_range(va->va_start, va->va_end);
1725
1726        free_vmap_area_noflush(va);
1727}
1728
1729static struct vmap_area *find_vmap_area(unsigned long addr)
1730{
1731        struct vmap_area *va;
1732
1733        spin_lock(&vmap_area_lock);
1734        va = __find_vmap_area(addr);
1735        spin_unlock(&vmap_area_lock);
1736
1737        return va;
1738}
1739
1740/*** Per cpu kva allocator ***/
1741
1742/*
1743 * vmap space is limited especially on 32 bit architectures. Ensure there is
1744 * room for at least 16 percpu vmap blocks per CPU.
1745 */
1746/*
1747 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1748 * to #define VMALLOC_SPACE             (VMALLOC_END-VMALLOC_START). Guess
1749 * instead (we just need a rough idea)
1750 */
1751#if BITS_PER_LONG == 32
1752#define VMALLOC_SPACE           (128UL*1024*1024)
1753#else
1754#define VMALLOC_SPACE           (128UL*1024*1024*1024)
1755#endif
1756
1757#define VMALLOC_PAGES           (VMALLOC_SPACE / PAGE_SIZE)
1758#define VMAP_MAX_ALLOC          BITS_PER_LONG   /* 256K with 4K pages */
1759#define VMAP_BBMAP_BITS_MAX     1024    /* 4MB with 4K pages */
1760#define VMAP_BBMAP_BITS_MIN     (VMAP_MAX_ALLOC*2)
1761#define VMAP_MIN(x, y)          ((x) < (y) ? (x) : (y)) /* can't use min() */
1762#define VMAP_MAX(x, y)          ((x) > (y) ? (x) : (y)) /* can't use max() */
1763#define VMAP_BBMAP_BITS         \
1764                VMAP_MIN(VMAP_BBMAP_BITS_MAX,   \
1765                VMAP_MAX(VMAP_BBMAP_BITS_MIN,   \
1766                        VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1767
1768#define VMAP_BLOCK_SIZE         (VMAP_BBMAP_BITS * PAGE_SIZE)
1769
1770struct vmap_block_queue {
1771        spinlock_t lock;
1772        struct list_head free;
1773};
1774
1775struct vmap_block {
1776        spinlock_t lock;
1777        struct vmap_area *va;
1778        unsigned long free, dirty;
1779        unsigned long dirty_min, dirty_max; /*< dirty range */
1780        struct list_head free_list;
1781        struct rcu_head rcu_head;
1782        struct list_head purge;
1783};
1784
1785/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1786static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1787
1788/*
1789 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1790 * in the free path. Could get rid of this if we change the API to return a
1791 * "cookie" from alloc, to be passed to free. But no big deal yet.
1792 */
1793static DEFINE_XARRAY(vmap_blocks);
1794
1795/*
1796 * We should probably have a fallback mechanism to allocate virtual memory
1797 * out of partially filled vmap blocks. However vmap block sizing should be
1798 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1799 * big problem.
1800 */
1801
1802static unsigned long addr_to_vb_idx(unsigned long addr)
1803{
1804        addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1805        addr /= VMAP_BLOCK_SIZE;
1806        return addr;
1807}
1808
1809static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1810{
1811        unsigned long addr;
1812
1813        addr = va_start + (pages_off << PAGE_SHIFT);
1814        BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1815        return (void *)addr;
1816}
1817
1818/**
1819 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1820 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1821 * @order:    how many 2^order pages should be occupied in newly allocated block
1822 * @gfp_mask: flags for the page level allocator
1823 *
1824 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1825 */
1826static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1827{
1828        struct vmap_block_queue *vbq;
1829        struct vmap_block *vb;
1830        struct vmap_area *va;
1831        unsigned long vb_idx;
1832        int node, err;
1833        void *vaddr;
1834
1835        node = numa_node_id();
1836
1837        vb = kmalloc_node(sizeof(struct vmap_block),
1838                        gfp_mask & GFP_RECLAIM_MASK, node);
1839        if (unlikely(!vb))
1840                return ERR_PTR(-ENOMEM);
1841
1842        va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1843                                        VMALLOC_START, VMALLOC_END,
1844                                        node, gfp_mask);
1845        if (IS_ERR(va)) {
1846                kfree(vb);
1847                return ERR_CAST(va);
1848        }
1849
1850        vaddr = vmap_block_vaddr(va->va_start, 0);
1851        spin_lock_init(&vb->lock);
1852        vb->va = va;
1853        /* At least something should be left free */
1854        BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1855        vb->free = VMAP_BBMAP_BITS - (1UL << order);
1856        vb->dirty = 0;
1857        vb->dirty_min = VMAP_BBMAP_BITS;
1858        vb->dirty_max = 0;
1859        INIT_LIST_HEAD(&vb->free_list);
1860
1861        vb_idx = addr_to_vb_idx(va->va_start);
1862        err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1863        if (err) {
1864                kfree(vb);
1865                free_vmap_area(va);
1866                return ERR_PTR(err);
1867        }
1868
1869        vbq = &get_cpu_var(vmap_block_queue);
1870        spin_lock(&vbq->lock);
1871        list_add_tail_rcu(&vb->free_list, &vbq->free);
1872        spin_unlock(&vbq->lock);
1873        put_cpu_var(vmap_block_queue);
1874
1875        return vaddr;
1876}
1877
1878static void free_vmap_block(struct vmap_block *vb)
1879{
1880        struct vmap_block *tmp;
1881
1882        tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
1883        BUG_ON(tmp != vb);
1884
1885        free_vmap_area_noflush(vb->va);
1886        kfree_rcu(vb, rcu_head);
1887}
1888
1889static void purge_fragmented_blocks(int cpu)
1890{
1891        LIST_HEAD(purge);
1892        struct vmap_block *vb;
1893        struct vmap_block *n_vb;
1894        struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1895
1896        rcu_read_lock();
1897        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1898
1899                if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1900                        continue;
1901
1902                spin_lock(&vb->lock);
1903                if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1904                        vb->free = 0; /* prevent further allocs after releasing lock */
1905                        vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1906                        vb->dirty_min = 0;
1907                        vb->dirty_max = VMAP_BBMAP_BITS;
1908                        spin_lock(&vbq->lock);
1909                        list_del_rcu(&vb->free_list);
1910                        spin_unlock(&vbq->lock);
1911                        spin_unlock(&vb->lock);
1912                        list_add_tail(&vb->purge, &purge);
1913                } else
1914                        spin_unlock(&vb->lock);
1915        }
1916        rcu_read_unlock();
1917
1918        list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1919                list_del(&vb->purge);
1920                free_vmap_block(vb);
1921        }
1922}
1923
1924static void purge_fragmented_blocks_allcpus(void)
1925{
1926        int cpu;
1927
1928        for_each_possible_cpu(cpu)
1929                purge_fragmented_blocks(cpu);
1930}
1931
1932static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1933{
1934        struct vmap_block_queue *vbq;
1935        struct vmap_block *vb;
1936        void *vaddr = NULL;
1937        unsigned int order;
1938
1939        BUG_ON(offset_in_page(size));
1940        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1941        if (WARN_ON(size == 0)) {
1942                /*
1943                 * Allocating 0 bytes isn't what caller wants since
1944                 * get_order(0) returns funny result. Just warn and terminate
1945                 * early.
1946                 */
1947                return NULL;
1948        }
1949        order = get_order(size);
1950
1951        rcu_read_lock();
1952        vbq = &get_cpu_var(vmap_block_queue);
1953        list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1954                unsigned long pages_off;
1955
1956                spin_lock(&vb->lock);
1957                if (vb->free < (1UL << order)) {
1958                        spin_unlock(&vb->lock);
1959                        continue;
1960                }
1961
1962                pages_off = VMAP_BBMAP_BITS - vb->free;
1963                vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1964                vb->free -= 1UL << order;
1965                if (vb->free == 0) {
1966                        spin_lock(&vbq->lock);
1967                        list_del_rcu(&vb->free_list);
1968                        spin_unlock(&vbq->lock);
1969                }
1970
1971                spin_unlock(&vb->lock);
1972                break;
1973        }
1974
1975        put_cpu_var(vmap_block_queue);
1976        rcu_read_unlock();
1977
1978        /* Allocate new block if nothing was found */
1979        if (!vaddr)
1980                vaddr = new_vmap_block(order, gfp_mask);
1981
1982        return vaddr;
1983}
1984
1985static void vb_free(unsigned long addr, unsigned long size)
1986{
1987        unsigned long offset;
1988        unsigned int order;
1989        struct vmap_block *vb;
1990
1991        BUG_ON(offset_in_page(size));
1992        BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1993
1994        flush_cache_vunmap(addr, addr + size);
1995
1996        order = get_order(size);
1997        offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
1998        vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
1999
2000        vunmap_range_noflush(addr, addr + size);
2001
2002        if (debug_pagealloc_enabled_static())
2003                flush_tlb_kernel_range(addr, addr + size);
2004
2005        spin_lock(&vb->lock);
2006
2007        /* Expand dirty range */
2008        vb->dirty_min = min(vb->dirty_min, offset);
2009        vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2010
2011        vb->dirty += 1UL << order;
2012        if (vb->dirty == VMAP_BBMAP_BITS) {
2013                BUG_ON(vb->free);
2014                spin_unlock(&vb->lock);
2015                free_vmap_block(vb);
2016        } else
2017                spin_unlock(&vb->lock);
2018}
2019
2020static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2021{
2022        int cpu;
2023
2024        if (unlikely(!vmap_initialized))
2025                return;
2026
2027        might_sleep();
2028
2029        for_each_possible_cpu(cpu) {
2030                struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2031                struct vmap_block *vb;
2032
2033                rcu_read_lock();
2034                list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2035                        spin_lock(&vb->lock);
2036                        if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2037                                unsigned long va_start = vb->va->va_start;
2038                                unsigned long s, e;
2039
2040                                s = va_start + (vb->dirty_min << PAGE_SHIFT);
2041                                e = va_start + (vb->dirty_max << PAGE_SHIFT);
2042
2043                                start = min(s, start);
2044                                end   = max(e, end);
2045
2046                                flush = 1;
2047                        }
2048                        spin_unlock(&vb->lock);
2049                }
2050                rcu_read_unlock();
2051        }
2052
2053        mutex_lock(&vmap_purge_lock);
2054        purge_fragmented_blocks_allcpus();
2055        if (!__purge_vmap_area_lazy(start, end) && flush)
2056                flush_tlb_kernel_range(start, end);
2057        mutex_unlock(&vmap_purge_lock);
2058}
2059
2060/**
2061 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2062 *
2063 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2064 * to amortize TLB flushing overheads. What this means is that any page you
2065 * have now, may, in a former life, have been mapped into kernel virtual
2066 * address by the vmap layer and so there might be some CPUs with TLB entries
2067 * still referencing that page (additional to the regular 1:1 kernel mapping).
2068 *
2069 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2070 * be sure that none of the pages we have control over will have any aliases
2071 * from the vmap layer.
2072 */
2073void vm_unmap_aliases(void)
2074{
2075        unsigned long start = ULONG_MAX, end = 0;
2076        int flush = 0;
2077
2078        _vm_unmap_aliases(start, end, flush);
2079}
2080EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2081
2082/**
2083 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2084 * @mem: the pointer returned by vm_map_ram
2085 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2086 */
2087void vm_unmap_ram(const void *mem, unsigned int count)
2088{
2089        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2090        unsigned long addr = (unsigned long)mem;
2091        struct vmap_area *va;
2092
2093        might_sleep();
2094        BUG_ON(!addr);
2095        BUG_ON(addr < VMALLOC_START);
2096        BUG_ON(addr > VMALLOC_END);
2097        BUG_ON(!PAGE_ALIGNED(addr));
2098
2099        kasan_poison_vmalloc(mem, size);
2100
2101        if (likely(count <= VMAP_MAX_ALLOC)) {
2102                debug_check_no_locks_freed(mem, size);
2103                vb_free(addr, size);
2104                return;
2105        }
2106
2107        va = find_vmap_area(addr);
2108        BUG_ON(!va);
2109        debug_check_no_locks_freed((void *)va->va_start,
2110                                    (va->va_end - va->va_start));
2111        free_unmap_vmap_area(va);
2112}
2113EXPORT_SYMBOL(vm_unmap_ram);
2114
2115/**
2116 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2117 * @pages: an array of pointers to the pages to be mapped
2118 * @count: number of pages
2119 * @node: prefer to allocate data structures on this node
2120 *
2121 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2122 * faster than vmap so it's good.  But if you mix long-life and short-life
2123 * objects with vm_map_ram(), it could consume lots of address space through
2124 * fragmentation (especially on a 32bit machine).  You could see failures in
2125 * the end.  Please use this function for short-lived objects.
2126 *
2127 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2128 */
2129void *vm_map_ram(struct page **pages, unsigned int count, int node)
2130{
2131        unsigned long size = (unsigned long)count << PAGE_SHIFT;
2132        unsigned long addr;
2133        void *mem;
2134
2135        if (likely(count <= VMAP_MAX_ALLOC)) {
2136                mem = vb_alloc(size, GFP_KERNEL);
2137                if (IS_ERR(mem))
2138                        return NULL;
2139                addr = (unsigned long)mem;
2140        } else {
2141                struct vmap_area *va;
2142                va = alloc_vmap_area(size, PAGE_SIZE,
2143                                VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2144                if (IS_ERR(va))
2145                        return NULL;
2146
2147                addr = va->va_start;
2148                mem = (void *)addr;
2149        }
2150
2151        kasan_unpoison_vmalloc(mem, size);
2152
2153        if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2154                                pages, PAGE_SHIFT) < 0) {
2155                vm_unmap_ram(mem, count);
2156                return NULL;
2157        }
2158
2159        return mem;
2160}
2161EXPORT_SYMBOL(vm_map_ram);
2162
2163static struct vm_struct *vmlist __initdata;
2164
2165static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2166{
2167#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2168        return vm->page_order;
2169#else
2170        return 0;
2171#endif
2172}
2173
2174static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2175{
2176#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2177        vm->page_order = order;
2178#else
2179        BUG_ON(order != 0);
2180#endif
2181}
2182
2183/**
2184 * vm_area_add_early - add vmap area early during boot
2185 * @vm: vm_struct to add
2186 *
2187 * This function is used to add fixed kernel vm area to vmlist before
2188 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2189 * should contain proper values and the other fields should be zero.
2190 *
2191 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2192 */
2193void __init vm_area_add_early(struct vm_struct *vm)
2194{
2195        struct vm_struct *tmp, **p;
2196
2197        BUG_ON(vmap_initialized);
2198        for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2199                if (tmp->addr >= vm->addr) {
2200                        BUG_ON(tmp->addr < vm->addr + vm->size);
2201                        break;
2202                } else
2203                        BUG_ON(tmp->addr + tmp->size > vm->addr);
2204        }
2205        vm->next = *p;
2206        *p = vm;
2207}
2208
2209/**
2210 * vm_area_register_early - register vmap area early during boot
2211 * @vm: vm_struct to register
2212 * @align: requested alignment
2213 *
2214 * This function is used to register kernel vm area before
2215 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2216 * proper values on entry and other fields should be zero.  On return,
2217 * vm->addr contains the allocated address.
2218 *
2219 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2220 */
2221void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2222{
2223        static size_t vm_init_off __initdata;
2224        unsigned long addr;
2225
2226        addr = ALIGN(VMALLOC_START + vm_init_off, align);
2227        vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2228
2229        vm->addr = (void *)addr;
2230
2231        vm_area_add_early(vm);
2232}
2233
2234static void vmap_init_free_space(void)
2235{
2236        unsigned long vmap_start = 1;
2237        const unsigned long vmap_end = ULONG_MAX;
2238        struct vmap_area *busy, *free;
2239
2240        /*
2241         *     B     F     B     B     B     F
2242         * -|-----|.....|-----|-----|-----|.....|-
2243         *  |           The KVA space           |
2244         *  |<--------------------------------->|
2245         */
2246        list_for_each_entry(busy, &vmap_area_list, list) {
2247                if (busy->va_start - vmap_start > 0) {
2248                        free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2249                        if (!WARN_ON_ONCE(!free)) {
2250                                free->va_start = vmap_start;
2251                                free->va_end = busy->va_start;
2252
2253                                insert_vmap_area_augment(free, NULL,
2254                                        &free_vmap_area_root,
2255                                                &free_vmap_area_list);
2256                        }
2257                }
2258
2259                vmap_start = busy->va_end;
2260        }
2261
2262        if (vmap_end - vmap_start > 0) {
2263                free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2264                if (!WARN_ON_ONCE(!free)) {
2265                        free->va_start = vmap_start;
2266                        free->va_end = vmap_end;
2267
2268                        insert_vmap_area_augment(free, NULL,
2269                                &free_vmap_area_root,
2270                                        &free_vmap_area_list);
2271                }
2272        }
2273}
2274
2275void __init vmalloc_init(void)
2276{
2277        struct vmap_area *va;
2278        struct vm_struct *tmp;
2279        int i;
2280
2281        /*
2282         * Create the cache for vmap_area objects.
2283         */
2284        vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2285
2286        for_each_possible_cpu(i) {
2287                struct vmap_block_queue *vbq;
2288                struct vfree_deferred *p;
2289
2290                vbq = &per_cpu(vmap_block_queue, i);
2291                spin_lock_init(&vbq->lock);
2292                INIT_LIST_HEAD(&vbq->free);
2293                p = &per_cpu(vfree_deferred, i);
2294                init_llist_head(&p->list);
2295                INIT_WORK(&p->wq, free_work);
2296        }
2297
2298        /* Import existing vmlist entries. */
2299        for (tmp = vmlist; tmp; tmp = tmp->next) {
2300                va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2301                if (WARN_ON_ONCE(!va))
2302                        continue;
2303
2304                va->va_start = (unsigned long)tmp->addr;
2305                va->va_end = va->va_start + tmp->size;
2306                va->vm = tmp;
2307                insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2308        }
2309
2310        /*
2311         * Now we can initialize a free vmap space.
2312         */
2313        vmap_init_free_space();
2314        vmap_initialized = true;
2315}
2316
2317static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2318        struct vmap_area *va, unsigned long flags, const void *caller)
2319{
2320        vm->flags = flags;
2321        vm->addr = (void *)va->va_start;
2322        vm->size = va->va_end - va->va_start;
2323        vm->caller = caller;
2324        va->vm = vm;
2325}
2326
2327static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2328                              unsigned long flags, const void *caller)
2329{
2330        spin_lock(&vmap_area_lock);
2331        setup_vmalloc_vm_locked(vm, va, flags, caller);
2332        spin_unlock(&vmap_area_lock);
2333}
2334
2335static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2336{
2337        /*
2338         * Before removing VM_UNINITIALIZED,
2339         * we should make sure that vm has proper values.
2340         * Pair with smp_rmb() in show_numa_info().
2341         */
2342        smp_wmb();
2343        vm->flags &= ~VM_UNINITIALIZED;
2344}
2345
2346static struct vm_struct *__get_vm_area_node(unsigned long size,
2347                unsigned long align, unsigned long shift, unsigned long flags,
2348                unsigned long start, unsigned long end, int node,
2349                gfp_t gfp_mask, const void *caller)
2350{
2351        struct vmap_area *va;
2352        struct vm_struct *area;
2353        unsigned long requested_size = size;
2354
2355        BUG_ON(in_interrupt());
2356        size = ALIGN(size, 1ul << shift);
2357        if (unlikely(!size))
2358                return NULL;
2359
2360        if (flags & VM_IOREMAP)
2361                align = 1ul << clamp_t(int, get_count_order_long(size),
2362                                       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2363
2364        area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2365        if (unlikely(!area))
2366                return NULL;
2367
2368        if (!(flags & VM_NO_GUARD))
2369                size += PAGE_SIZE;
2370
2371        va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2372        if (IS_ERR(va)) {
2373                kfree(area);
2374                return NULL;
2375        }
2376
2377        kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2378
2379        setup_vmalloc_vm(area, va, flags, caller);
2380
2381        return area;
2382}
2383
2384struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2385                                       unsigned long start, unsigned long end,
2386                                       const void *caller)
2387{
2388        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2389                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2390}
2391
2392/**
2393 * get_vm_area - reserve a contiguous kernel virtual area
2394 * @size:        size of the area
2395 * @flags:       %VM_IOREMAP for I/O mappings or VM_ALLOC
2396 *
2397 * Search an area of @size in the kernel virtual mapping area,
2398 * and reserved it for out purposes.  Returns the area descriptor
2399 * on success or %NULL on failure.
2400 *
2401 * Return: the area descriptor on success or %NULL on failure.
2402 */
2403struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2404{
2405        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2406                                  VMALLOC_START, VMALLOC_END,
2407                                  NUMA_NO_NODE, GFP_KERNEL,
2408                                  __builtin_return_address(0));
2409}
2410
2411struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2412                                const void *caller)
2413{
2414        return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2415                                  VMALLOC_START, VMALLOC_END,
2416                                  NUMA_NO_NODE, GFP_KERNEL, caller);
2417}
2418
2419/**
2420 * find_vm_area - find a continuous kernel virtual area
2421 * @addr:         base address
2422 *
2423 * Search for the kernel VM area starting at @addr, and return it.
2424 * It is up to the caller to do all required locking to keep the returned
2425 * pointer valid.
2426 *
2427 * Return: the area descriptor on success or %NULL on failure.
2428 */
2429struct vm_struct *find_vm_area(const void *addr)
2430{
2431        struct vmap_area *va;
2432
2433        va = find_vmap_area((unsigned long)addr);
2434        if (!va)
2435                return NULL;
2436
2437        return va->vm;
2438}
2439
2440/**
2441 * remove_vm_area - find and remove a continuous kernel virtual area
2442 * @addr:           base address
2443 *
2444 * Search for the kernel VM area starting at @addr, and remove it.
2445 * This function returns the found VM area, but using it is NOT safe
2446 * on SMP machines, except for its size or flags.
2447 *
2448 * Return: the area descriptor on success or %NULL on failure.
2449 */
2450struct vm_struct *remove_vm_area(const void *addr)
2451{
2452        struct vmap_area *va;
2453
2454        might_sleep();
2455
2456        spin_lock(&vmap_area_lock);
2457        va = __find_vmap_area((unsigned long)addr);
2458        if (va && va->vm) {
2459                struct vm_struct *vm = va->vm;
2460
2461                va->vm = NULL;
2462                spin_unlock(&vmap_area_lock);
2463
2464                kasan_free_shadow(vm);
2465                free_unmap_vmap_area(va);
2466
2467                return vm;
2468        }
2469
2470        spin_unlock(&vmap_area_lock);
2471        return NULL;
2472}
2473
2474static inline void set_area_direct_map(const struct vm_struct *area,
2475                                       int (*set_direct_map)(struct page *page))
2476{
2477        int i;
2478
2479        /* HUGE_VMALLOC passes small pages to set_direct_map */
2480        for (i = 0; i < area->nr_pages; i++)
2481                if (page_address(area->pages[i]))
2482                        set_direct_map(area->pages[i]);
2483}
2484
2485/* Handle removing and resetting vm mappings related to the vm_struct. */
2486static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2487{
2488        unsigned long start = ULONG_MAX, end = 0;
2489        unsigned int page_order = vm_area_page_order(area);
2490        int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2491        int flush_dmap = 0;
2492        int i;
2493
2494        remove_vm_area(area->addr);
2495
2496        /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2497        if (!flush_reset)
2498                return;
2499
2500        /*
2501         * If not deallocating pages, just do the flush of the VM area and
2502         * return.
2503         */
2504        if (!deallocate_pages) {
2505                vm_unmap_aliases();
2506                return;
2507        }
2508
2509        /*
2510         * If execution gets here, flush the vm mapping and reset the direct
2511         * map. Find the start and end range of the direct mappings to make sure
2512         * the vm_unmap_aliases() flush includes the direct map.
2513         */
2514        for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2515                unsigned long addr = (unsigned long)page_address(area->pages[i]);
2516                if (addr) {
2517                        unsigned long page_size;
2518
2519                        page_size = PAGE_SIZE << page_order;
2520                        start = min(addr, start);
2521                        end = max(addr + page_size, end);
2522                        flush_dmap = 1;
2523                }
2524        }
2525
2526        /*
2527         * Set direct map to something invalid so that it won't be cached if
2528         * there are any accesses after the TLB flush, then flush the TLB and
2529         * reset the direct map permissions to the default.
2530         */
2531        set_area_direct_map(area, set_direct_map_invalid_noflush);
2532        _vm_unmap_aliases(start, end, flush_dmap);
2533        set_area_direct_map(area, set_direct_map_default_noflush);
2534}
2535
2536static void __vunmap(const void *addr, int deallocate_pages)
2537{
2538        struct vm_struct *area;
2539
2540        if (!addr)
2541                return;
2542
2543        if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2544                        addr))
2545                return;
2546
2547        area = find_vm_area(addr);
2548        if (unlikely(!area)) {
2549                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2550                                addr);
2551                return;
2552        }
2553
2554        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2555        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2556
2557        kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2558
2559        vm_remove_mappings(area, deallocate_pages);
2560
2561        if (deallocate_pages) {
2562                unsigned int page_order = vm_area_page_order(area);
2563                int i;
2564
2565                for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2566                        struct page *page = area->pages[i];
2567
2568                        BUG_ON(!page);
2569                        __free_pages(page, page_order);
2570                }
2571                atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2572
2573                kvfree(area->pages);
2574        }
2575
2576        kfree(area);
2577}
2578
2579static inline void __vfree_deferred(const void *addr)
2580{
2581        /*
2582         * Use raw_cpu_ptr() because this can be called from preemptible
2583         * context. Preemption is absolutely fine here, because the llist_add()
2584         * implementation is lockless, so it works even if we are adding to
2585         * another cpu's list. schedule_work() should be fine with this too.
2586         */
2587        struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2588
2589        if (llist_add((struct llist_node *)addr, &p->list))
2590                schedule_work(&p->wq);
2591}
2592
2593/**
2594 * vfree_atomic - release memory allocated by vmalloc()
2595 * @addr:         memory base address
2596 *
2597 * This one is just like vfree() but can be called in any atomic context
2598 * except NMIs.
2599 */
2600void vfree_atomic(const void *addr)
2601{
2602        BUG_ON(in_nmi());
2603
2604        kmemleak_free(addr);
2605
2606        if (!addr)
2607                return;
2608        __vfree_deferred(addr);
2609}
2610
2611static void __vfree(const void *addr)
2612{
2613        if (unlikely(in_interrupt()))
2614                __vfree_deferred(addr);
2615        else
2616                __vunmap(addr, 1);
2617}
2618
2619/**
2620 * vfree - Release memory allocated by vmalloc()
2621 * @addr:  Memory base address
2622 *
2623 * Free the virtually continuous memory area starting at @addr, as obtained
2624 * from one of the vmalloc() family of APIs.  This will usually also free the
2625 * physical memory underlying the virtual allocation, but that memory is
2626 * reference counted, so it will not be freed until the last user goes away.
2627 *
2628 * If @addr is NULL, no operation is performed.
2629 *
2630 * Context:
2631 * May sleep if called *not* from interrupt context.
2632 * Must not be called in NMI context (strictly speaking, it could be
2633 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2634 * conventions for vfree() arch-dependent would be a really bad idea).
2635 */
2636void vfree(const void *addr)
2637{
2638        BUG_ON(in_nmi());
2639
2640        kmemleak_free(addr);
2641
2642        might_sleep_if(!in_interrupt());
2643
2644        if (!addr)
2645                return;
2646
2647        __vfree(addr);
2648}
2649EXPORT_SYMBOL(vfree);
2650
2651/**
2652 * vunmap - release virtual mapping obtained by vmap()
2653 * @addr:   memory base address
2654 *
2655 * Free the virtually contiguous memory area starting at @addr,
2656 * which was created from the page array passed to vmap().
2657 *
2658 * Must not be called in interrupt context.
2659 */
2660void vunmap(const void *addr)
2661{
2662        BUG_ON(in_interrupt());
2663        might_sleep();
2664        if (addr)
2665                __vunmap(addr, 0);
2666}
2667EXPORT_SYMBOL(vunmap);
2668
2669/**
2670 * vmap - map an array of pages into virtually contiguous space
2671 * @pages: array of page pointers
2672 * @count: number of pages to map
2673 * @flags: vm_area->flags
2674 * @prot: page protection for the mapping
2675 *
2676 * Maps @count pages from @pages into contiguous kernel virtual space.
2677 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2678 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2679 * are transferred from the caller to vmap(), and will be freed / dropped when
2680 * vfree() is called on the return value.
2681 *
2682 * Return: the address of the area or %NULL on failure
2683 */
2684void *vmap(struct page **pages, unsigned int count,
2685           unsigned long flags, pgprot_t prot)
2686{
2687        struct vm_struct *area;
2688        unsigned long addr;
2689        unsigned long size;             /* In bytes */
2690
2691        might_sleep();
2692
2693        if (count > totalram_pages())
2694                return NULL;
2695
2696        size = (unsigned long)count << PAGE_SHIFT;
2697        area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2698        if (!area)
2699                return NULL;
2700
2701        addr = (unsigned long)area->addr;
2702        if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2703                                pages, PAGE_SHIFT) < 0) {
2704                vunmap(area->addr);
2705                return NULL;
2706        }
2707
2708        if (flags & VM_MAP_PUT_PAGES) {
2709                area->pages = pages;
2710                area->nr_pages = count;
2711        }
2712        return area->addr;
2713}
2714EXPORT_SYMBOL(vmap);
2715
2716#ifdef CONFIG_VMAP_PFN
2717struct vmap_pfn_data {
2718        unsigned long   *pfns;
2719        pgprot_t        prot;
2720        unsigned int    idx;
2721};
2722
2723static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2724{
2725        struct vmap_pfn_data *data = private;
2726
2727        if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2728                return -EINVAL;
2729        *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2730        return 0;
2731}
2732
2733/**
2734 * vmap_pfn - map an array of PFNs into virtually contiguous space
2735 * @pfns: array of PFNs
2736 * @count: number of pages to map
2737 * @prot: page protection for the mapping
2738 *
2739 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2740 * the start address of the mapping.
2741 */
2742void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2743{
2744        struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2745        struct vm_struct *area;
2746
2747        area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2748                        __builtin_return_address(0));
2749        if (!area)
2750                return NULL;
2751        if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2752                        count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2753                free_vm_area(area);
2754                return NULL;
2755        }
2756        return area->addr;
2757}
2758EXPORT_SYMBOL_GPL(vmap_pfn);
2759#endif /* CONFIG_VMAP_PFN */
2760
2761static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2762                                 pgprot_t prot, unsigned int page_shift,
2763                                 int node)
2764{
2765        const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2766        unsigned long addr = (unsigned long)area->addr;
2767        unsigned long size = get_vm_area_size(area);
2768        unsigned long array_size;
2769        unsigned int nr_small_pages = size >> PAGE_SHIFT;
2770        unsigned int page_order;
2771        struct page **pages;
2772        unsigned int i;
2773
2774        array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2775        gfp_mask |= __GFP_NOWARN;
2776        if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2777                gfp_mask |= __GFP_HIGHMEM;
2778
2779        /* Please note that the recursion is strictly bounded. */
2780        if (array_size > PAGE_SIZE) {
2781                pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2782                                        area->caller);
2783        } else {
2784                pages = kmalloc_node(array_size, nested_gfp, node);
2785        }
2786
2787        if (!pages) {
2788                free_vm_area(area);
2789                warn_alloc(gfp_mask, NULL,
2790                           "vmalloc size %lu allocation failure: "
2791                           "page array size %lu allocation failed",
2792                           nr_small_pages * PAGE_SIZE, array_size);
2793                return NULL;
2794        }
2795
2796        area->pages = pages;
2797        area->nr_pages = nr_small_pages;
2798        set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2799
2800        page_order = vm_area_page_order(area);
2801
2802        /*
2803         * Careful, we allocate and map page_order pages, but tracking is done
2804         * per PAGE_SIZE page so as to keep the vm_struct APIs independent of
2805         * the physical/mapped size.
2806         */
2807        for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2808                struct page *page;
2809                int p;
2810
2811                /* Compound pages required for remap_vmalloc_page */
2812                page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
2813                if (unlikely(!page)) {
2814                        /* Successfully allocated i pages, free them in __vfree() */
2815                        area->nr_pages = i;
2816                        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2817                        warn_alloc(gfp_mask, NULL,
2818                                   "vmalloc size %lu allocation failure: "
2819                                   "page order %u allocation failed",
2820                                   area->nr_pages * PAGE_SIZE, page_order);
2821                        goto fail;
2822                }
2823
2824                for (p = 0; p < (1U << page_order); p++)
2825                        area->pages[i + p] = page + p;
2826
2827                if (gfpflags_allow_blocking(gfp_mask))
2828                        cond_resched();
2829        }
2830        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2831
2832        if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) {
2833                warn_alloc(gfp_mask, NULL,
2834                           "vmalloc size %lu allocation failure: "
2835                           "failed to map pages",
2836                           area->nr_pages * PAGE_SIZE);
2837                goto fail;
2838        }
2839
2840        return area->addr;
2841
2842fail:
2843        __vfree(area->addr);
2844        return NULL;
2845}
2846
2847/**
2848 * __vmalloc_node_range - allocate virtually contiguous memory
2849 * @size:                 allocation size
2850 * @align:                desired alignment
2851 * @start:                vm area range start
2852 * @end:                  vm area range end
2853 * @gfp_mask:             flags for the page level allocator
2854 * @prot:                 protection mask for the allocated pages
2855 * @vm_flags:             additional vm area flags (e.g. %VM_NO_GUARD)
2856 * @node:                 node to use for allocation or NUMA_NO_NODE
2857 * @caller:               caller's return address
2858 *
2859 * Allocate enough pages to cover @size from the page level
2860 * allocator with @gfp_mask flags.  Map them into contiguous
2861 * kernel virtual space, using a pagetable protection of @prot.
2862 *
2863 * Return: the address of the area or %NULL on failure
2864 */
2865void *__vmalloc_node_range(unsigned long size, unsigned long align,
2866                        unsigned long start, unsigned long end, gfp_t gfp_mask,
2867                        pgprot_t prot, unsigned long vm_flags, int node,
2868                        const void *caller)
2869{
2870        struct vm_struct *area;
2871        void *addr;
2872        unsigned long real_size = size;
2873        unsigned long real_align = align;
2874        unsigned int shift = PAGE_SHIFT;
2875
2876        if (WARN_ON_ONCE(!size))
2877                return NULL;
2878
2879        if ((size >> PAGE_SHIFT) > totalram_pages()) {
2880                warn_alloc(gfp_mask, NULL,
2881                           "vmalloc size %lu allocation failure: "
2882                           "exceeds total pages", real_size);
2883                return NULL;
2884        }
2885
2886        if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP) &&
2887                        arch_vmap_pmd_supported(prot)) {
2888                unsigned long size_per_node;
2889
2890                /*
2891                 * Try huge pages. Only try for PAGE_KERNEL allocations,
2892                 * others like modules don't yet expect huge pages in
2893                 * their allocations due to apply_to_page_range not
2894                 * supporting them.
2895                 */
2896
2897                size_per_node = size;
2898                if (node == NUMA_NO_NODE)
2899                        size_per_node /= num_online_nodes();
2900                if (size_per_node >= PMD_SIZE) {
2901                        shift = PMD_SHIFT;
2902                        align = max(real_align, 1UL << shift);
2903                        size = ALIGN(real_size, 1UL << shift);
2904                }
2905        }
2906
2907again:
2908        area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
2909                                  VM_UNINITIALIZED | vm_flags, start, end, node,
2910                                  gfp_mask, caller);
2911        if (!area) {
2912                warn_alloc(gfp_mask, NULL,
2913                           "vmalloc size %lu allocation failure: "
2914                           "vm_struct allocation failed", real_size);
2915                goto fail;
2916        }
2917
2918        addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
2919        if (!addr)
2920                goto fail;
2921
2922        /*
2923         * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2924         * flag. It means that vm_struct is not fully initialized.
2925         * Now, it is fully initialized, so remove this flag here.
2926         */
2927        clear_vm_uninitialized_flag(area);
2928
2929        size = PAGE_ALIGN(size);
2930        kmemleak_vmalloc(area, size, gfp_mask);
2931
2932        return addr;
2933
2934fail:
2935        if (shift > PAGE_SHIFT) {
2936                shift = PAGE_SHIFT;
2937                align = real_align;
2938                size = real_size;
2939                goto again;
2940        }
2941
2942        return NULL;
2943}
2944
2945/**
2946 * __vmalloc_node - allocate virtually contiguous memory
2947 * @size:           allocation size
2948 * @align:          desired alignment
2949 * @gfp_mask:       flags for the page level allocator
2950 * @node:           node to use for allocation or NUMA_NO_NODE
2951 * @caller:         caller's return address
2952 *
2953 * Allocate enough pages to cover @size from the page level allocator with
2954 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
2955 *
2956 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2957 * and __GFP_NOFAIL are not supported
2958 *
2959 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2960 * with mm people.
2961 *
2962 * Return: pointer to the allocated memory or %NULL on error
2963 */
2964void *__vmalloc_node(unsigned long size, unsigned long align,
2965                            gfp_t gfp_mask, int node, const void *caller)
2966{
2967        return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2968                                gfp_mask, PAGE_KERNEL, 0, node, caller);
2969}
2970/*
2971 * This is only for performance analysis of vmalloc and stress purpose.
2972 * It is required by vmalloc test module, therefore do not use it other
2973 * than that.
2974 */
2975#ifdef CONFIG_TEST_VMALLOC_MODULE
2976EXPORT_SYMBOL_GPL(__vmalloc_node);
2977#endif
2978
2979void *__vmalloc(unsigned long size, gfp_t gfp_mask)
2980{
2981        return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
2982                                __builtin_return_address(0));
2983}
2984EXPORT_SYMBOL(__vmalloc);
2985
2986/**
2987 * vmalloc - allocate virtually contiguous memory
2988 * @size:    allocation size
2989 *
2990 * Allocate enough pages to cover @size from the page level
2991 * allocator and map them into contiguous kernel virtual space.
2992 *
2993 * For tight control over page level allocator and protection flags
2994 * use __vmalloc() instead.
2995 *
2996 * Return: pointer to the allocated memory or %NULL on error
2997 */
2998void *vmalloc(unsigned long size)
2999{
3000        return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3001                                __builtin_return_address(0));
3002}
3003EXPORT_SYMBOL(vmalloc);
3004
3005/**
3006 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3007 * @size:    allocation size
3008 *
3009 * Allocate enough non-huge pages to cover @size from the page level
3010 * allocator and map them into contiguous kernel virtual space.
3011 *
3012 * Return: pointer to the allocated memory or %NULL on error
3013 */
3014void *vmalloc_no_huge(unsigned long size)
3015{
3016        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3017                                    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3018                                    NUMA_NO_NODE, __builtin_return_address(0));
3019}
3020EXPORT_SYMBOL(vmalloc_no_huge);
3021
3022/**
3023 * vzalloc - allocate virtually contiguous memory with zero fill
3024 * @size:    allocation size
3025 *
3026 * Allocate enough pages to cover @size from the page level
3027 * allocator and map them into contiguous kernel virtual space.
3028 * The memory allocated is set to zero.
3029 *
3030 * For tight control over page level allocator and protection flags
3031 * use __vmalloc() instead.
3032 *
3033 * Return: pointer to the allocated memory or %NULL on error
3034 */
3035void *vzalloc(unsigned long size)
3036{
3037        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3038                                __builtin_return_address(0));
3039}
3040EXPORT_SYMBOL(vzalloc);
3041
3042/**
3043 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3044 * @size: allocation size
3045 *
3046 * The resulting memory area is zeroed so it can be mapped to userspace
3047 * without leaking data.
3048 *
3049 * Return: pointer to the allocated memory or %NULL on error
3050 */
3051void *vmalloc_user(unsigned long size)
3052{
3053        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3054                                    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3055                                    VM_USERMAP, NUMA_NO_NODE,
3056                                    __builtin_return_address(0));
3057}
3058EXPORT_SYMBOL(vmalloc_user);
3059
3060/**
3061 * vmalloc_node - allocate memory on a specific node
3062 * @size:         allocation size
3063 * @node:         numa node
3064 *
3065 * Allocate enough pages to cover @size from the page level
3066 * allocator and map them into contiguous kernel virtual space.
3067 *
3068 * For tight control over page level allocator and protection flags
3069 * use __vmalloc() instead.
3070 *
3071 * Return: pointer to the allocated memory or %NULL on error
3072 */
3073void *vmalloc_node(unsigned long size, int node)
3074{
3075        return __vmalloc_node(size, 1, GFP_KERNEL, node,
3076                        __builtin_return_address(0));
3077}
3078EXPORT_SYMBOL(vmalloc_node);
3079
3080/**
3081 * vzalloc_node - allocate memory on a specific node with zero fill
3082 * @size:       allocation size
3083 * @node:       numa node
3084 *
3085 * Allocate enough pages to cover @size from the page level
3086 * allocator and map them into contiguous kernel virtual space.
3087 * The memory allocated is set to zero.
3088 *
3089 * Return: pointer to the allocated memory or %NULL on error
3090 */
3091void *vzalloc_node(unsigned long size, int node)
3092{
3093        return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3094                                __builtin_return_address(0));
3095}
3096EXPORT_SYMBOL(vzalloc_node);
3097
3098#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3099#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3100#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3101#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3102#else
3103/*
3104 * 64b systems should always have either DMA or DMA32 zones. For others
3105 * GFP_DMA32 should do the right thing and use the normal zone.
3106 */
3107#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3108#endif
3109
3110/**
3111 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3112 * @size:       allocation size
3113 *
3114 * Allocate enough 32bit PA addressable pages to cover @size from the
3115 * page level allocator and map them into contiguous kernel virtual space.
3116 *
3117 * Return: pointer to the allocated memory or %NULL on error
3118 */
3119void *vmalloc_32(unsigned long size)
3120{
3121        return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3122                        __builtin_return_address(0));
3123}
3124EXPORT_SYMBOL(vmalloc_32);
3125
3126/**
3127 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3128 * @size:            allocation size
3129 *
3130 * The resulting memory area is 32bit addressable and zeroed so it can be
3131 * mapped to userspace without leaking data.
3132 *
3133 * Return: pointer to the allocated memory or %NULL on error
3134 */
3135void *vmalloc_32_user(unsigned long size)
3136{
3137        return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3138                                    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3139                                    VM_USERMAP, NUMA_NO_NODE,
3140                                    __builtin_return_address(0));
3141}
3142EXPORT_SYMBOL(vmalloc_32_user);
3143
3144/*
3145 * small helper routine , copy contents to buf from addr.
3146 * If the page is not present, fill zero.
3147 */
3148
3149static int aligned_vread(char *buf, char *addr, unsigned long count)
3150{
3151        struct page *p;
3152        int copied = 0;
3153
3154        while (count) {
3155                unsigned long offset, length;
3156
3157                offset = offset_in_page(addr);
3158                length = PAGE_SIZE - offset;
3159                if (length > count)
3160                        length = count;
3161                p = vmalloc_to_page(addr);
3162                /*
3163                 * To do safe access to this _mapped_ area, we need
3164                 * lock. But adding lock here means that we need to add
3165                 * overhead of vmalloc()/vfree() calls for this _debug_
3166                 * interface, rarely used. Instead of that, we'll use
3167                 * kmap() and get small overhead in this access function.
3168                 */
3169                if (p) {
3170                        /* We can expect USER0 is not used -- see vread() */
3171                        void *map = kmap_atomic(p);
3172                        memcpy(buf, map + offset, length);
3173                        kunmap_atomic(map);
3174                } else
3175                        memset(buf, 0, length);
3176
3177                addr += length;
3178                buf += length;
3179                copied += length;
3180                count -= length;
3181        }
3182        return copied;
3183}
3184
3185/**
3186 * vread() - read vmalloc area in a safe way.
3187 * @buf:     buffer for reading data
3188 * @addr:    vm address.
3189 * @count:   number of bytes to be read.
3190 *
3191 * This function checks that addr is a valid vmalloc'ed area, and
3192 * copy data from that area to a given buffer. If the given memory range
3193 * of [addr...addr+count) includes some valid address, data is copied to
3194 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3195 * IOREMAP area is treated as memory hole and no copy is done.
3196 *
3197 * If [addr...addr+count) doesn't includes any intersects with alive
3198 * vm_struct area, returns 0. @buf should be kernel's buffer.
3199 *
3200 * Note: In usual ops, vread() is never necessary because the caller
3201 * should know vmalloc() area is valid and can use memcpy().
3202 * This is for routines which have to access vmalloc area without
3203 * any information, as /proc/kcore.
3204 *
3205 * Return: number of bytes for which addr and buf should be increased
3206 * (same number as @count) or %0 if [addr...addr+count) doesn't
3207 * include any intersection with valid vmalloc area
3208 */
3209long vread(char *buf, char *addr, unsigned long count)
3210{
3211        struct vmap_area *va;
3212        struct vm_struct *vm;
3213        char *vaddr, *buf_start = buf;
3214        unsigned long buflen = count;
3215        unsigned long n;
3216
3217        /* Don't allow overflow */
3218        if ((unsigned long) addr + count < count)
3219                count = -(unsigned long) addr;
3220
3221        spin_lock(&vmap_area_lock);
3222        va = __find_vmap_area((unsigned long)addr);
3223        if (!va)
3224                goto finished;
3225        list_for_each_entry_from(va, &vmap_area_list, list) {
3226                if (!count)
3227                        break;
3228
3229                if (!va->vm)
3230                        continue;
3231
3232                vm = va->vm;
3233                vaddr = (char *) vm->addr;
3234                if (addr >= vaddr + get_vm_area_size(vm))
3235                        continue;
3236                while (addr < vaddr) {
3237                        if (count == 0)
3238                                goto finished;
3239                        *buf = '\0';
3240                        buf++;
3241                        addr++;
3242                        count--;
3243                }
3244                n = vaddr + get_vm_area_size(vm) - addr;
3245                if (n > count)
3246                        n = count;
3247                if (!(vm->flags & VM_IOREMAP))
3248                        aligned_vread(buf, addr, n);
3249                else /* IOREMAP area is treated as memory hole */
3250                        memset(buf, 0, n);
3251                buf += n;
3252                addr += n;
3253                count -= n;
3254        }
3255finished:
3256        spin_unlock(&vmap_area_lock);
3257
3258        if (buf == buf_start)
3259                return 0;
3260        /* zero-fill memory holes */
3261        if (buf != buf_start + buflen)
3262                memset(buf, 0, buflen - (buf - buf_start));
3263
3264        return buflen;
3265}
3266
3267/**
3268 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3269 * @vma:                vma to cover
3270 * @uaddr:              target user address to start at
3271 * @kaddr:              virtual address of vmalloc kernel memory
3272 * @pgoff:              offset from @kaddr to start at
3273 * @size:               size of map area
3274 *
3275 * Returns:     0 for success, -Exxx on failure
3276 *
3277 * This function checks that @kaddr is a valid vmalloc'ed area,
3278 * and that it is big enough to cover the range starting at
3279 * @uaddr in @vma. Will return failure if that criteria isn't
3280 * met.
3281 *
3282 * Similar to remap_pfn_range() (see mm/memory.c)
3283 */
3284int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3285                                void *kaddr, unsigned long pgoff,
3286                                unsigned long size)
3287{
3288        struct vm_struct *area;
3289        unsigned long off;
3290        unsigned long end_index;
3291
3292        if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3293                return -EINVAL;
3294
3295        size = PAGE_ALIGN(size);
3296
3297        if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3298                return -EINVAL;
3299
3300        area = find_vm_area(kaddr);
3301        if (!area)
3302                return -EINVAL;
3303
3304        if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3305                return -EINVAL;
3306
3307        if (check_add_overflow(size, off, &end_index) ||
3308            end_index > get_vm_area_size(area))
3309                return -EINVAL;
3310        kaddr += off;
3311
3312        do {
3313                struct page *page = vmalloc_to_page(kaddr);
3314                int ret;
3315
3316                ret = vm_insert_page(vma, uaddr, page);
3317                if (ret)
3318                        return ret;
3319
3320                uaddr += PAGE_SIZE;
3321                kaddr += PAGE_SIZE;
3322                size -= PAGE_SIZE;
3323        } while (size > 0);
3324
3325        vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3326
3327        return 0;
3328}
3329
3330/**
3331 * remap_vmalloc_range - map vmalloc pages to userspace
3332 * @vma:                vma to cover (map full range of vma)
3333 * @addr:               vmalloc memory
3334 * @pgoff:              number of pages into addr before first page to map
3335 *
3336 * Returns:     0 for success, -Exxx on failure
3337 *
3338 * This function checks that addr is a valid vmalloc'ed area, and
3339 * that it is big enough to cover the vma. Will return failure if
3340 * that criteria isn't met.
3341 *
3342 * Similar to remap_pfn_range() (see mm/memory.c)
3343 */
3344int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3345                                                unsigned long pgoff)
3346{
3347        return remap_vmalloc_range_partial(vma, vma->vm_start,
3348                                           addr, pgoff,
3349                                           vma->vm_end - vma->vm_start);
3350}
3351EXPORT_SYMBOL(remap_vmalloc_range);
3352
3353void free_vm_area(struct vm_struct *area)
3354{
3355        struct vm_struct *ret;
3356        ret = remove_vm_area(area->addr);
3357        BUG_ON(ret != area);
3358        kfree(area);
3359}
3360EXPORT_SYMBOL_GPL(free_vm_area);
3361
3362#ifdef CONFIG_SMP
3363static struct vmap_area *node_to_va(struct rb_node *n)
3364{
3365        return rb_entry_safe(n, struct vmap_area, rb_node);
3366}
3367
3368/**
3369 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3370 * @addr: target address
3371 *
3372 * Returns: vmap_area if it is found. If there is no such area
3373 *   the first highest(reverse order) vmap_area is returned
3374 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3375 *   if there are no any areas before @addr.
3376 */
3377static struct vmap_area *
3378pvm_find_va_enclose_addr(unsigned long addr)
3379{
3380        struct vmap_area *va, *tmp;
3381        struct rb_node *n;
3382
3383        n = free_vmap_area_root.rb_node;
3384        va = NULL;
3385
3386        while (n) {
3387                tmp = rb_entry(n, struct vmap_area, rb_node);
3388                if (tmp->va_start <= addr) {
3389                        va = tmp;
3390                        if (tmp->va_end >= addr)
3391                                break;
3392
3393                        n = n->rb_right;
3394                } else {
3395                        n = n->rb_left;
3396                }
3397        }
3398
3399        return va;
3400}
3401
3402/**
3403 * pvm_determine_end_from_reverse - find the highest aligned address
3404 * of free block below VMALLOC_END
3405 * @va:
3406 *   in - the VA we start the search(reverse order);
3407 *   out - the VA with the highest aligned end address.
3408 * @align: alignment for required highest address
3409 *
3410 * Returns: determined end address within vmap_area
3411 */
3412static unsigned long
3413pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3414{
3415        unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3416        unsigned long addr;
3417
3418        if (likely(*va)) {
3419                list_for_each_entry_from_reverse((*va),
3420                                &free_vmap_area_list, list) {
3421                        addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3422                        if ((*va)->va_start < addr)
3423                                return addr;
3424                }
3425        }
3426
3427        return 0;
3428}
3429
3430/**
3431 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3432 * @offsets: array containing offset of each area
3433 * @sizes: array containing size of each area
3434 * @nr_vms: the number of areas to allocate
3435 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3436 *
3437 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3438 *          vm_structs on success, %NULL on failure
3439 *
3440 * Percpu allocator wants to use congruent vm areas so that it can
3441 * maintain the offsets among percpu areas.  This function allocates
3442 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3443 * be scattered pretty far, distance between two areas easily going up
3444 * to gigabytes.  To avoid interacting with regular vmallocs, these
3445 * areas are allocated from top.
3446 *
3447 * Despite its complicated look, this allocator is rather simple. It
3448 * does everything top-down and scans free blocks from the end looking
3449 * for matching base. While scanning, if any of the areas do not fit the
3450 * base address is pulled down to fit the area. Scanning is repeated till
3451 * all the areas fit and then all necessary data structures are inserted
3452 * and the result is returned.
3453 */
3454struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3455                                     const size_t *sizes, int nr_vms,
3456                                     size_t align)
3457{
3458        const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3459        const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3460        struct vmap_area **vas, *va;
3461        struct vm_struct **vms;
3462        int area, area2, last_area, term_area;
3463        unsigned long base, start, size, end, last_end, orig_start, orig_end;
3464        bool purged = false;
3465        enum fit_type type;
3466
3467        /* verify parameters and allocate data structures */
3468        BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3469        for (last_area = 0, area = 0; area < nr_vms; area++) {
3470                start = offsets[area];
3471                end = start + sizes[area];
3472
3473                /* is everything aligned properly? */
3474                BUG_ON(!IS_ALIGNED(offsets[area], align));
3475                BUG_ON(!IS_ALIGNED(sizes[area], align));
3476
3477                /* detect the area with the highest address */
3478                if (start > offsets[last_area])
3479                        last_area = area;
3480
3481                for (area2 = area + 1; area2 < nr_vms; area2++) {
3482                        unsigned long start2 = offsets[area2];
3483                        unsigned long end2 = start2 + sizes[area2];
3484
3485                        BUG_ON(start2 < end && start < end2);
3486                }
3487        }
3488        last_end = offsets[last_area] + sizes[last_area];
3489
3490        if (vmalloc_end - vmalloc_start < last_end) {
3491                WARN_ON(true);
3492                return NULL;
3493        }
3494
3495        vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3496        vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3497        if (!vas || !vms)
3498                goto err_free2;
3499
3500        for (area = 0; area < nr_vms; area++) {
3501                vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3502                vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3503                if (!vas[area] || !vms[area])
3504                        goto err_free;
3505        }
3506retry:
3507        spin_lock(&free_vmap_area_lock);
3508
3509        /* start scanning - we scan from the top, begin with the last area */
3510        area = term_area = last_area;
3511        start = offsets[area];
3512        end = start + sizes[area];
3513
3514        va = pvm_find_va_enclose_addr(vmalloc_end);
3515        base = pvm_determine_end_from_reverse(&va, align) - end;
3516
3517        while (true) {
3518                /*
3519                 * base might have underflowed, add last_end before
3520                 * comparing.
3521                 */
3522                if (base + last_end < vmalloc_start + last_end)
3523                        goto overflow;
3524
3525                /*
3526                 * Fitting base has not been found.
3527                 */
3528                if (va == NULL)
3529                        goto overflow;
3530
3531                /*
3532                 * If required width exceeds current VA block, move
3533                 * base downwards and then recheck.
3534                 */
3535                if (base + end > va->va_end) {
3536                        base = pvm_determine_end_from_reverse(&va, align) - end;
3537                        term_area = area;
3538                        continue;
3539                }
3540
3541                /*
3542                 * If this VA does not fit, move base downwards and recheck.
3543                 */
3544                if (base + start < va->va_start) {
3545                        va = node_to_va(rb_prev(&va->rb_node));
3546                        base = pvm_determine_end_from_reverse(&va, align) - end;
3547                        term_area = area;
3548                        continue;
3549                }
3550
3551                /*
3552                 * This area fits, move on to the previous one.  If
3553                 * the previous one is the terminal one, we're done.
3554                 */
3555                area = (area + nr_vms - 1) % nr_vms;
3556                if (area == term_area)
3557                        break;
3558
3559                start = offsets[area];
3560                end = start + sizes[area];
3561                va = pvm_find_va_enclose_addr(base + end);
3562        }
3563
3564        /* we've found a fitting base, insert all va's */
3565        for (area = 0; area < nr_vms; area++) {
3566                int ret;
3567
3568                start = base + offsets[area];
3569                size = sizes[area];
3570
3571                va = pvm_find_va_enclose_addr(start);
3572                if (WARN_ON_ONCE(va == NULL))
3573                        /* It is a BUG(), but trigger recovery instead. */
3574                        goto recovery;
3575
3576                type = classify_va_fit_type(va, start, size);
3577                if (WARN_ON_ONCE(type == NOTHING_FIT))
3578                        /* It is a BUG(), but trigger recovery instead. */
3579                        goto recovery;
3580
3581                ret = adjust_va_to_fit_type(va, start, size, type);
3582                if (unlikely(ret))
3583                        goto recovery;
3584
3585                /* Allocated area. */
3586                va = vas[area];
3587                va->va_start = start;
3588                va->va_end = start + size;
3589        }
3590
3591        spin_unlock(&free_vmap_area_lock);
3592
3593        /* populate the kasan shadow space */
3594        for (area = 0; area < nr_vms; area++) {
3595                if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3596                        goto err_free_shadow;
3597
3598                kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3599                                       sizes[area]);
3600        }
3601
3602        /* insert all vm's */
3603        spin_lock(&vmap_area_lock);
3604        for (area = 0; area < nr_vms; area++) {
3605                insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3606
3607                setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3608                                 pcpu_get_vm_areas);
3609        }
3610        spin_unlock(&vmap_area_lock);
3611
3612        kfree(vas);
3613        return vms;
3614
3615recovery:
3616        /*
3617         * Remove previously allocated areas. There is no
3618         * need in removing these areas from the busy tree,
3619         * because they are inserted only on the final step
3620         * and when pcpu_get_vm_areas() is success.
3621         */
3622        while (area--) {
3623                orig_start = vas[area]->va_start;
3624                orig_end = vas[area]->va_end;
3625                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3626                                &free_vmap_area_list);
3627                if (va)
3628                        kasan_release_vmalloc(orig_start, orig_end,
3629                                va->va_start, va->va_end);
3630                vas[area] = NULL;
3631        }
3632
3633overflow:
3634        spin_unlock(&free_vmap_area_lock);
3635        if (!purged) {
3636                purge_vmap_area_lazy();
3637                purged = true;
3638
3639                /* Before "retry", check if we recover. */
3640                for (area = 0; area < nr_vms; area++) {
3641                        if (vas[area])
3642                                continue;
3643
3644                        vas[area] = kmem_cache_zalloc(
3645                                vmap_area_cachep, GFP_KERNEL);
3646                        if (!vas[area])
3647                                goto err_free;
3648                }
3649
3650                goto retry;
3651        }
3652
3653err_free:
3654        for (area = 0; area < nr_vms; area++) {
3655                if (vas[area])
3656                        kmem_cache_free(vmap_area_cachep, vas[area]);
3657
3658                kfree(vms[area]);
3659        }
3660err_free2:
3661        kfree(vas);
3662        kfree(vms);
3663        return NULL;
3664
3665err_free_shadow:
3666        spin_lock(&free_vmap_area_lock);
3667        /*
3668         * We release all the vmalloc shadows, even the ones for regions that
3669         * hadn't been successfully added. This relies on kasan_release_vmalloc
3670         * being able to tolerate this case.
3671         */
3672        for (area = 0; area < nr_vms; area++) {
3673                orig_start = vas[area]->va_start;
3674                orig_end = vas[area]->va_end;
3675                va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3676                                &free_vmap_area_list);
3677                if (va)
3678                        kasan_release_vmalloc(orig_start, orig_end,
3679                                va->va_start, va->va_end);
3680                vas[area] = NULL;
3681                kfree(vms[area]);
3682        }
3683        spin_unlock(&free_vmap_area_lock);
3684        kfree(vas);
3685        kfree(vms);
3686        return NULL;
3687}
3688
3689/**
3690 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3691 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3692 * @nr_vms: the number of allocated areas
3693 *
3694 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3695 */
3696void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3697{
3698        int i;
3699
3700        for (i = 0; i < nr_vms; i++)
3701                free_vm_area(vms[i]);
3702        kfree(vms);
3703}
3704#endif  /* CONFIG_SMP */
3705
3706#ifdef CONFIG_PRINTK
3707bool vmalloc_dump_obj(void *object)
3708{
3709        struct vm_struct *vm;
3710        void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3711
3712        vm = find_vm_area(objp);
3713        if (!vm)
3714                return false;
3715        pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3716                vm->nr_pages, (unsigned long)vm->addr, vm->caller);
3717        return true;
3718}
3719#endif
3720
3721#ifdef CONFIG_PROC_FS
3722static void *s_start(struct seq_file *m, loff_t *pos)
3723        __acquires(&vmap_purge_lock)
3724        __acquires(&vmap_area_lock)
3725{
3726        mutex_lock(&vmap_purge_lock);
3727        spin_lock(&vmap_area_lock);
3728
3729        return seq_list_start(&vmap_area_list, *pos);
3730}
3731
3732static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3733{
3734        return seq_list_next(p, &vmap_area_list, pos);
3735}
3736
3737static void s_stop(struct seq_file *m, void *p)
3738        __releases(&vmap_area_lock)
3739        __releases(&vmap_purge_lock)
3740{
3741        spin_unlock(&vmap_area_lock);
3742        mutex_unlock(&vmap_purge_lock);
3743}
3744
3745static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3746{
3747        if (IS_ENABLED(CONFIG_NUMA)) {
3748                unsigned int nr, *counters = m->private;
3749
3750                if (!counters)
3751                        return;
3752
3753                if (v->flags & VM_UNINITIALIZED)
3754                        return;
3755                /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3756                smp_rmb();
3757
3758                memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3759
3760                for (nr = 0; nr < v->nr_pages; nr++)
3761                        counters[page_to_nid(v->pages[nr])]++;
3762
3763                for_each_node_state(nr, N_HIGH_MEMORY)
3764                        if (counters[nr])
3765                                seq_printf(m, " N%u=%u", nr, counters[nr]);
3766        }
3767}
3768
3769static void show_purge_info(struct seq_file *m)
3770{
3771        struct vmap_area *va;
3772
3773        spin_lock(&purge_vmap_area_lock);
3774        list_for_each_entry(va, &purge_vmap_area_list, list) {
3775                seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3776                        (void *)va->va_start, (void *)va->va_end,
3777                        va->va_end - va->va_start);
3778        }
3779        spin_unlock(&purge_vmap_area_lock);
3780}
3781
3782static int s_show(struct seq_file *m, void *p)
3783{
3784        struct vmap_area *va;
3785        struct vm_struct *v;
3786
3787        va = list_entry(p, struct vmap_area, list);
3788
3789        /*
3790         * s_show can encounter race with remove_vm_area, !vm on behalf
3791         * of vmap area is being tear down or vm_map_ram allocation.
3792         */
3793        if (!va->vm) {
3794                seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3795                        (void *)va->va_start, (void *)va->va_end,
3796                        va->va_end - va->va_start);
3797
3798                return 0;
3799        }
3800
3801        v = va->vm;
3802
3803        seq_printf(m, "0x%pK-0x%pK %7ld",
3804                v->addr, v->addr + v->size, v->size);
3805
3806        if (v->caller)
3807                seq_printf(m, " %pS", v->caller);
3808
3809        if (v->nr_pages)
3810                seq_printf(m, " pages=%d", v->nr_pages);
3811
3812        if (v->phys_addr)
3813                seq_printf(m, " phys=%pa", &v->phys_addr);
3814
3815        if (v->flags & VM_IOREMAP)
3816                seq_puts(m, " ioremap");
3817
3818        if (v->flags & VM_ALLOC)
3819                seq_puts(m, " vmalloc");
3820
3821        if (v->flags & VM_MAP)
3822                seq_puts(m, " vmap");
3823
3824        if (v->flags & VM_USERMAP)
3825                seq_puts(m, " user");
3826
3827        if (v->flags & VM_DMA_COHERENT)
3828                seq_puts(m, " dma-coherent");
3829
3830        if (is_vmalloc_addr(v->pages))
3831                seq_puts(m, " vpages");
3832
3833        show_numa_info(m, v);
3834        seq_putc(m, '\n');
3835
3836        /*
3837         * As a final step, dump "unpurged" areas.
3838         */
3839        if (list_is_last(&va->list, &vmap_area_list))
3840                show_purge_info(m);
3841
3842        return 0;
3843}
3844
3845static const struct seq_operations vmalloc_op = {
3846        .start = s_start,
3847        .next = s_next,
3848        .stop = s_stop,
3849        .show = s_show,
3850};
3851
3852static int __init proc_vmalloc_init(void)
3853{
3854        if (IS_ENABLED(CONFIG_NUMA))
3855                proc_create_seq_private("vmallocinfo", 0400, NULL,
3856                                &vmalloc_op,
3857                                nr_node_ids * sizeof(unsigned int), NULL);
3858        else
3859                proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3860        return 0;
3861}
3862module_init(proc_vmalloc_init);
3863
3864#endif
3865