linux/mm/hugetlb.c
<<
>>
Prefs
   1/*
   2 * Generic hugetlb support.
   3 * (C) William Irwin, April 2004
   4 */
   5#include <linux/list.h>
   6#include <linux/init.h>
   7#include <linux/module.h>
   8#include <linux/mm.h>
   9#include <linux/seq_file.h>
  10#include <linux/sysctl.h>
  11#include <linux/highmem.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/nodemask.h>
  14#include <linux/pagemap.h>
  15#include <linux/mempolicy.h>
  16#include <linux/cpuset.h>
  17#include <linux/mutex.h>
  18#include <linux/bootmem.h>
  19#include <linux/sysfs.h>
  20#include <linux/slab.h>
  21#include <linux/rmap.h>
  22#include <linux/swap.h>
  23#include <linux/swapops.h>
  24
  25#include <asm/page.h>
  26#include <asm/pgtable.h>
  27#include <linux/io.h>
  28
  29#include <linux/hugetlb.h>
  30#include <linux/node.h>
  31#include "internal.h"
  32
  33const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
  34static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
  35unsigned long hugepages_treat_as_movable;
  36
  37static int max_hstate;
  38unsigned int default_hstate_idx;
  39struct hstate hstates[HUGE_MAX_HSTATE];
  40
  41__initdata LIST_HEAD(huge_boot_pages);
  42
  43/* for command line parsing */
  44static struct hstate * __initdata parsed_hstate;
  45static unsigned long __initdata default_hstate_max_huge_pages;
  46static unsigned long __initdata default_hstate_size;
  47
  48#define for_each_hstate(h) \
  49        for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
  50
  51/*
  52 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
  53 */
  54static DEFINE_SPINLOCK(hugetlb_lock);
  55
  56/*
  57 * Region tracking -- allows tracking of reservations and instantiated pages
  58 *                    across the pages in a mapping.
  59 *
  60 * The region data structures are protected by a combination of the mmap_sem
  61 * and the hugetlb_instantion_mutex.  To access or modify a region the caller
  62 * must either hold the mmap_sem for write, or the mmap_sem for read and
  63 * the hugetlb_instantiation mutex:
  64 *
  65 *      down_write(&mm->mmap_sem);
  66 * or
  67 *      down_read(&mm->mmap_sem);
  68 *      mutex_lock(&hugetlb_instantiation_mutex);
  69 */
  70struct file_region {
  71        struct list_head link;
  72        long from;
  73        long to;
  74};
  75
  76static long region_add(struct list_head *head, long f, long t)
  77{
  78        struct file_region *rg, *nrg, *trg;
  79
  80        /* Locate the region we are either in or before. */
  81        list_for_each_entry(rg, head, link)
  82                if (f <= rg->to)
  83                        break;
  84
  85        /* Round our left edge to the current segment if it encloses us. */
  86        if (f > rg->from)
  87                f = rg->from;
  88
  89        /* Check for and consume any regions we now overlap with. */
  90        nrg = rg;
  91        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  92                if (&rg->link == head)
  93                        break;
  94                if (rg->from > t)
  95                        break;
  96
  97                /* If this area reaches higher then extend our area to
  98                 * include it completely.  If this is not the first area
  99                 * which we intend to reuse, free it. */
 100                if (rg->to > t)
 101                        t = rg->to;
 102                if (rg != nrg) {
 103                        list_del(&rg->link);
 104                        kfree(rg);
 105                }
 106        }
 107        nrg->from = f;
 108        nrg->to = t;
 109        return 0;
 110}
 111
 112static long region_chg(struct list_head *head, long f, long t)
 113{
 114        struct file_region *rg, *nrg;
 115        long chg = 0;
 116
 117        /* Locate the region we are before or in. */
 118        list_for_each_entry(rg, head, link)
 119                if (f <= rg->to)
 120                        break;
 121
 122        /* If we are below the current region then a new region is required.
 123         * Subtle, allocate a new region at the position but make it zero
 124         * size such that we can guarantee to record the reservation. */
 125        if (&rg->link == head || t < rg->from) {
 126                nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
 127                if (!nrg)
 128                        return -ENOMEM;
 129                nrg->from = f;
 130                nrg->to   = f;
 131                INIT_LIST_HEAD(&nrg->link);
 132                list_add(&nrg->link, rg->link.prev);
 133
 134                return t - f;
 135        }
 136
 137        /* Round our left edge to the current segment if it encloses us. */
 138        if (f > rg->from)
 139                f = rg->from;
 140        chg = t - f;
 141
 142        /* Check for and consume any regions we now overlap with. */
 143        list_for_each_entry(rg, rg->link.prev, link) {
 144                if (&rg->link == head)
 145                        break;
 146                if (rg->from > t)
 147                        return chg;
 148
 149                /* We overlap with this area, if it extends further than
 150                 * us then we must extend ourselves.  Account for its
 151                 * existing reservation. */
 152                if (rg->to > t) {
 153                        chg += rg->to - t;
 154                        t = rg->to;
 155                }
 156                chg -= rg->to - rg->from;
 157        }
 158        return chg;
 159}
 160
 161static long region_truncate(struct list_head *head, long end)
 162{
 163        struct file_region *rg, *trg;
 164        long chg = 0;
 165
 166        /* Locate the region we are either in or before. */
 167        list_for_each_entry(rg, head, link)
 168                if (end <= rg->to)
 169                        break;
 170        if (&rg->link == head)
 171                return 0;
 172
 173        /* If we are in the middle of a region then adjust it. */
 174        if (end > rg->from) {
 175                chg = rg->to - end;
 176                rg->to = end;
 177                rg = list_entry(rg->link.next, typeof(*rg), link);
 178        }
 179
 180        /* Drop any remaining regions. */
 181        list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
 182                if (&rg->link == head)
 183                        break;
 184                chg += rg->to - rg->from;
 185                list_del(&rg->link);
 186                kfree(rg);
 187        }
 188        return chg;
 189}
 190
 191static long region_count(struct list_head *head, long f, long t)
 192{
 193        struct file_region *rg;
 194        long chg = 0;
 195
 196        /* Locate each segment we overlap with, and count that overlap. */
 197        list_for_each_entry(rg, head, link) {
 198                int seg_from;
 199                int seg_to;
 200
 201                if (rg->to <= f)
 202                        continue;
 203                if (rg->from >= t)
 204                        break;
 205
 206                seg_from = max(rg->from, f);
 207                seg_to = min(rg->to, t);
 208
 209                chg += seg_to - seg_from;
 210        }
 211
 212        return chg;
 213}
 214
 215/*
 216 * Convert the address within this vma to the page offset within
 217 * the mapping, in pagecache page units; huge pages here.
 218 */
 219static pgoff_t vma_hugecache_offset(struct hstate *h,
 220                        struct vm_area_struct *vma, unsigned long address)
 221{
 222        return ((address - vma->vm_start) >> huge_page_shift(h)) +
 223                        (vma->vm_pgoff >> huge_page_order(h));
 224}
 225
 226pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
 227                                     unsigned long address)
 228{
 229        return vma_hugecache_offset(hstate_vma(vma), vma, address);
 230}
 231
 232/*
 233 * Return the size of the pages allocated when backing a VMA. In the majority
 234 * cases this will be same size as used by the page table entries.
 235 */
 236unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 237{
 238        struct hstate *hstate;
 239
 240        if (!is_vm_hugetlb_page(vma))
 241                return PAGE_SIZE;
 242
 243        hstate = hstate_vma(vma);
 244
 245        return 1UL << (hstate->order + PAGE_SHIFT);
 246}
 247EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
 248
 249/*
 250 * Return the page size being used by the MMU to back a VMA. In the majority
 251 * of cases, the page size used by the kernel matches the MMU size. On
 252 * architectures where it differs, an architecture-specific version of this
 253 * function is required.
 254 */
 255#ifndef vma_mmu_pagesize
 256unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 257{
 258        return vma_kernel_pagesize(vma);
 259}
 260#endif
 261
 262/*
 263 * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
 264 * bits of the reservation map pointer, which are always clear due to
 265 * alignment.
 266 */
 267#define HPAGE_RESV_OWNER    (1UL << 0)
 268#define HPAGE_RESV_UNMAPPED (1UL << 1)
 269#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
 270
 271/*
 272 * These helpers are used to track how many pages are reserved for
 273 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
 274 * is guaranteed to have their future faults succeed.
 275 *
 276 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
 277 * the reserve counters are updated with the hugetlb_lock held. It is safe
 278 * to reset the VMA at fork() time as it is not in use yet and there is no
 279 * chance of the global counters getting corrupted as a result of the values.
 280 *
 281 * The private mapping reservation is represented in a subtly different
 282 * manner to a shared mapping.  A shared mapping has a region map associated
 283 * with the underlying file, this region map represents the backing file
 284 * pages which have ever had a reservation assigned which this persists even
 285 * after the page is instantiated.  A private mapping has a region map
 286 * associated with the original mmap which is attached to all VMAs which
 287 * reference it, this region map represents those offsets which have consumed
 288 * reservation ie. where pages have been instantiated.
 289 */
 290static unsigned long get_vma_private_data(struct vm_area_struct *vma)
 291{
 292        return (unsigned long)vma->vm_private_data;
 293}
 294
 295static void set_vma_private_data(struct vm_area_struct *vma,
 296                                                        unsigned long value)
 297{
 298        vma->vm_private_data = (void *)value;
 299}
 300
 301struct resv_map {
 302        struct kref refs;
 303        struct list_head regions;
 304};
 305
 306static struct resv_map *resv_map_alloc(void)
 307{
 308        struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
 309        if (!resv_map)
 310                return NULL;
 311
 312        kref_init(&resv_map->refs);
 313        INIT_LIST_HEAD(&resv_map->regions);
 314
 315        return resv_map;
 316}
 317
 318static void resv_map_release(struct kref *ref)
 319{
 320        struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
 321
 322        /* Clear out any active regions before we release the map. */
 323        region_truncate(&resv_map->regions, 0);
 324        kfree(resv_map);
 325}
 326
 327static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
 328{
 329        VM_BUG_ON(!is_vm_hugetlb_page(vma));
 330        if (!(vma->vm_flags & VM_MAYSHARE))
 331                return (struct resv_map *)(get_vma_private_data(vma) &
 332                                                        ~HPAGE_RESV_MASK);
 333        return NULL;
 334}
 335
 336static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
 337{
 338        VM_BUG_ON(!is_vm_hugetlb_page(vma));
 339        VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 340
 341        set_vma_private_data(vma, (get_vma_private_data(vma) &
 342                                HPAGE_RESV_MASK) | (unsigned long)map);
 343}
 344
 345static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
 346{
 347        VM_BUG_ON(!is_vm_hugetlb_page(vma));
 348        VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
 349
 350        set_vma_private_data(vma, get_vma_private_data(vma) | flags);
 351}
 352
 353static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
 354{
 355        VM_BUG_ON(!is_vm_hugetlb_page(vma));
 356
 357        return (get_vma_private_data(vma) & flag) != 0;
 358}
 359
 360/* Decrement the reserved pages in the hugepage pool by one */
 361static void decrement_hugepage_resv_vma(struct hstate *h,
 362                        struct vm_area_struct *vma)
 363{
 364        if (vma->vm_flags & VM_NORESERVE)
 365                return;
 366
 367        if (vma->vm_flags & VM_MAYSHARE) {
 368                /* Shared mappings always use reserves */
 369                h->resv_huge_pages--;
 370        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 371                /*
 372                 * Only the process that called mmap() has reserves for
 373                 * private mappings.
 374                 */
 375                h->resv_huge_pages--;
 376        }
 377}
 378
 379/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
 380void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 381{
 382        VM_BUG_ON(!is_vm_hugetlb_page(vma));
 383        if (!(vma->vm_flags & VM_MAYSHARE))
 384                vma->vm_private_data = (void *)0;
 385}
 386
 387/* Returns true if the VMA has associated reserve pages */
 388static int vma_has_reserves(struct vm_area_struct *vma)
 389{
 390        if (vma->vm_flags & VM_MAYSHARE)
 391                return 1;
 392        if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
 393                return 1;
 394        return 0;
 395}
 396
 397static void copy_gigantic_page(struct page *dst, struct page *src)
 398{
 399        int i;
 400        struct hstate *h = page_hstate(src);
 401        struct page *dst_base = dst;
 402        struct page *src_base = src;
 403
 404        for (i = 0; i < pages_per_huge_page(h); ) {
 405                cond_resched();
 406                copy_highpage(dst, src);
 407
 408                i++;
 409                dst = mem_map_next(dst, dst_base, i);
 410                src = mem_map_next(src, src_base, i);
 411        }
 412}
 413
 414void copy_huge_page(struct page *dst, struct page *src)
 415{
 416        int i;
 417        struct hstate *h = page_hstate(src);
 418
 419        if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
 420                copy_gigantic_page(dst, src);
 421                return;
 422        }
 423
 424        might_sleep();
 425        for (i = 0; i < pages_per_huge_page(h); i++) {
 426                cond_resched();
 427                copy_highpage(dst + i, src + i);
 428        }
 429}
 430
 431static void enqueue_huge_page(struct hstate *h, struct page *page)
 432{
 433        int nid = page_to_nid(page);
 434        list_add(&page->lru, &h->hugepage_freelists[nid]);
 435        h->free_huge_pages++;
 436        h->free_huge_pages_node[nid]++;
 437}
 438
 439static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
 440{
 441        struct page *page;
 442
 443        if (list_empty(&h->hugepage_freelists[nid]))
 444                return NULL;
 445        page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
 446        list_del(&page->lru);
 447        set_page_refcounted(page);
 448        h->free_huge_pages--;
 449        h->free_huge_pages_node[nid]--;
 450        return page;
 451}
 452
 453static struct page *dequeue_huge_page_vma(struct hstate *h,
 454                                struct vm_area_struct *vma,
 455                                unsigned long address, int avoid_reserve)
 456{
 457        struct page *page = NULL;
 458        struct mempolicy *mpol;
 459        nodemask_t *nodemask;
 460        struct zonelist *zonelist;
 461        struct zone *zone;
 462        struct zoneref *z;
 463
 464        get_mems_allowed();
 465        zonelist = huge_zonelist(vma, address,
 466                                        htlb_alloc_mask, &mpol, &nodemask);
 467        /*
 468         * A child process with MAP_PRIVATE mappings created by their parent
 469         * have no page reserves. This check ensures that reservations are
 470         * not "stolen". The child may still get SIGKILLed
 471         */
 472        if (!vma_has_reserves(vma) &&
 473                        h->free_huge_pages - h->resv_huge_pages == 0)
 474                goto err;
 475
 476        /* If reserves cannot be used, ensure enough pages are in the pool */
 477        if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
 478                goto err;
 479
 480        for_each_zone_zonelist_nodemask(zone, z, zonelist,
 481                                                MAX_NR_ZONES - 1, nodemask) {
 482                if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
 483                        page = dequeue_huge_page_node(h, zone_to_nid(zone));
 484                        if (page) {
 485                                if (!avoid_reserve)
 486                                        decrement_hugepage_resv_vma(h, vma);
 487                                break;
 488                        }
 489                }
 490        }
 491err:
 492        mpol_cond_put(mpol);
 493        put_mems_allowed();
 494        return page;
 495}
 496
 497static void update_and_free_page(struct hstate *h, struct page *page)
 498{
 499        int i;
 500
 501        VM_BUG_ON(h->order >= MAX_ORDER);
 502
 503        h->nr_huge_pages--;
 504        h->nr_huge_pages_node[page_to_nid(page)]--;
 505        for (i = 0; i < pages_per_huge_page(h); i++) {
 506                page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 507                                1 << PG_referenced | 1 << PG_dirty |
 508                                1 << PG_active | 1 << PG_reserved |
 509                                1 << PG_private | 1 << PG_writeback);
 510        }
 511        set_compound_page_dtor(page, NULL);
 512        set_page_refcounted(page);
 513        arch_release_hugepage(page);
 514        __free_pages(page, huge_page_order(h));
 515}
 516
 517struct hstate *size_to_hstate(unsigned long size)
 518{
 519        struct hstate *h;
 520
 521        for_each_hstate(h) {
 522                if (huge_page_size(h) == size)
 523                        return h;
 524        }
 525        return NULL;
 526}
 527
 528static void free_huge_page(struct page *page)
 529{
 530        /*
 531         * Can't pass hstate in here because it is called from the
 532         * compound page destructor.
 533         */
 534        struct hstate *h = page_hstate(page);
 535        int nid = page_to_nid(page);
 536        struct address_space *mapping;
 537
 538        mapping = (struct address_space *) page_private(page);
 539        set_page_private(page, 0);
 540        page->mapping = NULL;
 541        BUG_ON(page_count(page));
 542        BUG_ON(page_mapcount(page));
 543        INIT_LIST_HEAD(&page->lru);
 544
 545        spin_lock(&hugetlb_lock);
 546        if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
 547                update_and_free_page(h, page);
 548                h->surplus_huge_pages--;
 549                h->surplus_huge_pages_node[nid]--;
 550        } else {
 551                enqueue_huge_page(h, page);
 552        }
 553        spin_unlock(&hugetlb_lock);
 554        if (mapping)
 555                hugetlb_put_quota(mapping, 1);
 556}
 557
 558static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
 559{
 560        set_compound_page_dtor(page, free_huge_page);
 561        spin_lock(&hugetlb_lock);
 562        h->nr_huge_pages++;
 563        h->nr_huge_pages_node[nid]++;
 564        spin_unlock(&hugetlb_lock);
 565        put_page(page); /* free it into the hugepage allocator */
 566}
 567
 568static void prep_compound_gigantic_page(struct page *page, unsigned long order)
 569{
 570        int i;
 571        int nr_pages = 1 << order;
 572        struct page *p = page + 1;
 573
 574        /* we rely on prep_new_huge_page to set the destructor */
 575        set_compound_order(page, order);
 576        __SetPageHead(page);
 577        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 578                __SetPageTail(p);
 579                set_page_count(p, 0);
 580                p->first_page = page;
 581        }
 582}
 583
 584int PageHuge(struct page *page)
 585{
 586        compound_page_dtor *dtor;
 587
 588        if (!PageCompound(page))
 589                return 0;
 590
 591        page = compound_head(page);
 592        dtor = get_compound_page_dtor(page);
 593
 594        return dtor == free_huge_page;
 595}
 596EXPORT_SYMBOL_GPL(PageHuge);
 597
 598static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 599{
 600        struct page *page;
 601
 602        if (h->order >= MAX_ORDER)
 603                return NULL;
 604
 605        page = alloc_pages_exact_node(nid,
 606                htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
 607                                                __GFP_REPEAT|__GFP_NOWARN,
 608                huge_page_order(h));
 609        if (page) {
 610                if (arch_prepare_hugepage(page)) {
 611                        __free_pages(page, huge_page_order(h));
 612                        return NULL;
 613                }
 614                prep_new_huge_page(h, page, nid);
 615        }
 616
 617        return page;
 618}
 619
 620/*
 621 * common helper functions for hstate_next_node_to_{alloc|free}.
 622 * We may have allocated or freed a huge page based on a different
 623 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
 624 * be outside of *nodes_allowed.  Ensure that we use an allowed
 625 * node for alloc or free.
 626 */
 627static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
 628{
 629        nid = next_node(nid, *nodes_allowed);
 630        if (nid == MAX_NUMNODES)
 631                nid = first_node(*nodes_allowed);
 632        VM_BUG_ON(nid >= MAX_NUMNODES);
 633
 634        return nid;
 635}
 636
 637static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
 638{
 639        if (!node_isset(nid, *nodes_allowed))
 640                nid = next_node_allowed(nid, nodes_allowed);
 641        return nid;
 642}
 643
 644/*
 645 * returns the previously saved node ["this node"] from which to
 646 * allocate a persistent huge page for the pool and advance the
 647 * next node from which to allocate, handling wrap at end of node
 648 * mask.
 649 */
 650static int hstate_next_node_to_alloc(struct hstate *h,
 651                                        nodemask_t *nodes_allowed)
 652{
 653        int nid;
 654
 655        VM_BUG_ON(!nodes_allowed);
 656
 657        nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
 658        h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
 659
 660        return nid;
 661}
 662
 663static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 664{
 665        struct page *page;
 666        int start_nid;
 667        int next_nid;
 668        int ret = 0;
 669
 670        start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
 671        next_nid = start_nid;
 672
 673        do {
 674                page = alloc_fresh_huge_page_node(h, next_nid);
 675                if (page) {
 676                        ret = 1;
 677                        break;
 678                }
 679                next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
 680        } while (next_nid != start_nid);
 681
 682        if (ret)
 683                count_vm_event(HTLB_BUDDY_PGALLOC);
 684        else
 685                count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 686
 687        return ret;
 688}
 689
 690/*
 691 * helper for free_pool_huge_page() - return the previously saved
 692 * node ["this node"] from which to free a huge page.  Advance the
 693 * next node id whether or not we find a free huge page to free so
 694 * that the next attempt to free addresses the next node.
 695 */
 696static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
 697{
 698        int nid;
 699
 700        VM_BUG_ON(!nodes_allowed);
 701
 702        nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
 703        h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
 704
 705        return nid;
 706}
 707
 708/*
 709 * Free huge page from pool from next node to free.
 710 * Attempt to keep persistent huge pages more or less
 711 * balanced over allowed nodes.
 712 * Called with hugetlb_lock locked.
 713 */
 714static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 715                                                         bool acct_surplus)
 716{
 717        int start_nid;
 718        int next_nid;
 719        int ret = 0;
 720
 721        start_nid = hstate_next_node_to_free(h, nodes_allowed);
 722        next_nid = start_nid;
 723
 724        do {
 725                /*
 726                 * If we're returning unused surplus pages, only examine
 727                 * nodes with surplus pages.
 728                 */
 729                if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
 730                    !list_empty(&h->hugepage_freelists[next_nid])) {
 731                        struct page *page =
 732                                list_entry(h->hugepage_freelists[next_nid].next,
 733                                          struct page, lru);
 734                        list_del(&page->lru);
 735                        h->free_huge_pages--;
 736                        h->free_huge_pages_node[next_nid]--;
 737                        if (acct_surplus) {
 738                                h->surplus_huge_pages--;
 739                                h->surplus_huge_pages_node[next_nid]--;
 740                        }
 741                        update_and_free_page(h, page);
 742                        ret = 1;
 743                        break;
 744                }
 745                next_nid = hstate_next_node_to_free(h, nodes_allowed);
 746        } while (next_nid != start_nid);
 747
 748        return ret;
 749}
 750
 751static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
 752{
 753        struct page *page;
 754        unsigned int r_nid;
 755
 756        if (h->order >= MAX_ORDER)
 757                return NULL;
 758
 759        /*
 760         * Assume we will successfully allocate the surplus page to
 761         * prevent racing processes from causing the surplus to exceed
 762         * overcommit
 763         *
 764         * This however introduces a different race, where a process B
 765         * tries to grow the static hugepage pool while alloc_pages() is
 766         * called by process A. B will only examine the per-node
 767         * counters in determining if surplus huge pages can be
 768         * converted to normal huge pages in adjust_pool_surplus(). A
 769         * won't be able to increment the per-node counter, until the
 770         * lock is dropped by B, but B doesn't drop hugetlb_lock until
 771         * no more huge pages can be converted from surplus to normal
 772         * state (and doesn't try to convert again). Thus, we have a
 773         * case where a surplus huge page exists, the pool is grown, and
 774         * the surplus huge page still exists after, even though it
 775         * should just have been converted to a normal huge page. This
 776         * does not leak memory, though, as the hugepage will be freed
 777         * once it is out of use. It also does not allow the counters to
 778         * go out of whack in adjust_pool_surplus() as we don't modify
 779         * the node values until we've gotten the hugepage and only the
 780         * per-node value is checked there.
 781         */
 782        spin_lock(&hugetlb_lock);
 783        if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
 784                spin_unlock(&hugetlb_lock);
 785                return NULL;
 786        } else {
 787                h->nr_huge_pages++;
 788                h->surplus_huge_pages++;
 789        }
 790        spin_unlock(&hugetlb_lock);
 791
 792        if (nid == NUMA_NO_NODE)
 793                page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
 794                                   __GFP_REPEAT|__GFP_NOWARN,
 795                                   huge_page_order(h));
 796        else
 797                page = alloc_pages_exact_node(nid,
 798                        htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
 799                        __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
 800
 801        if (page && arch_prepare_hugepage(page)) {
 802                __free_pages(page, huge_page_order(h));
 803                return NULL;
 804        }
 805
 806        spin_lock(&hugetlb_lock);
 807        if (page) {
 808                r_nid = page_to_nid(page);
 809                set_compound_page_dtor(page, free_huge_page);
 810                /*
 811                 * We incremented the global counters already
 812                 */
 813                h->nr_huge_pages_node[r_nid]++;
 814                h->surplus_huge_pages_node[r_nid]++;
 815                __count_vm_event(HTLB_BUDDY_PGALLOC);
 816        } else {
 817                h->nr_huge_pages--;
 818                h->surplus_huge_pages--;
 819                __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
 820        }
 821        spin_unlock(&hugetlb_lock);
 822
 823        return page;
 824}
 825
 826/*
 827 * This allocation function is useful in the context where vma is irrelevant.
 828 * E.g. soft-offlining uses this function because it only cares physical
 829 * address of error page.
 830 */
 831struct page *alloc_huge_page_node(struct hstate *h, int nid)
 832{
 833        struct page *page;
 834
 835        spin_lock(&hugetlb_lock);
 836        page = dequeue_huge_page_node(h, nid);
 837        spin_unlock(&hugetlb_lock);
 838
 839        if (!page)
 840                page = alloc_buddy_huge_page(h, nid);
 841
 842        return page;
 843}
 844
 845/*
 846 * Increase the hugetlb pool such that it can accommodate a reservation
 847 * of size 'delta'.
 848 */
 849static int gather_surplus_pages(struct hstate *h, int delta)
 850{
 851        struct list_head surplus_list;
 852        struct page *page, *tmp;
 853        int ret, i;
 854        int needed, allocated;
 855
 856        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
 857        if (needed <= 0) {
 858                h->resv_huge_pages += delta;
 859                return 0;
 860        }
 861
 862        allocated = 0;
 863        INIT_LIST_HEAD(&surplus_list);
 864
 865        ret = -ENOMEM;
 866retry:
 867        spin_unlock(&hugetlb_lock);
 868        for (i = 0; i < needed; i++) {
 869                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
 870                if (!page)
 871                        /*
 872                         * We were not able to allocate enough pages to
 873                         * satisfy the entire reservation so we free what
 874                         * we've allocated so far.
 875                         */
 876                        goto free;
 877
 878                list_add(&page->lru, &surplus_list);
 879        }
 880        allocated += needed;
 881
 882        /*
 883         * After retaking hugetlb_lock, we need to recalculate 'needed'
 884         * because either resv_huge_pages or free_huge_pages may have changed.
 885         */
 886        spin_lock(&hugetlb_lock);
 887        needed = (h->resv_huge_pages + delta) -
 888                        (h->free_huge_pages + allocated);
 889        if (needed > 0)
 890                goto retry;
 891
 892        /*
 893         * The surplus_list now contains _at_least_ the number of extra pages
 894         * needed to accommodate the reservation.  Add the appropriate number
 895         * of pages to the hugetlb pool and free the extras back to the buddy
 896         * allocator.  Commit the entire reservation here to prevent another
 897         * process from stealing the pages as they are added to the pool but
 898         * before they are reserved.
 899         */
 900        needed += allocated;
 901        h->resv_huge_pages += delta;
 902        ret = 0;
 903
 904        spin_unlock(&hugetlb_lock);
 905        /* Free the needed pages to the hugetlb pool */
 906        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 907                if ((--needed) < 0)
 908                        break;
 909                list_del(&page->lru);
 910                /*
 911                 * This page is now managed by the hugetlb allocator and has
 912                 * no users -- drop the buddy allocator's reference.
 913                 */
 914                put_page_testzero(page);
 915                VM_BUG_ON(page_count(page));
 916                enqueue_huge_page(h, page);
 917        }
 918
 919        /* Free unnecessary surplus pages to the buddy allocator */
 920free:
 921        if (!list_empty(&surplus_list)) {
 922                list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
 923                        list_del(&page->lru);
 924                        put_page(page);
 925                }
 926        }
 927        spin_lock(&hugetlb_lock);
 928
 929        return ret;
 930}
 931
 932/*
 933 * When releasing a hugetlb pool reservation, any surplus pages that were
 934 * allocated to satisfy the reservation must be explicitly freed if they were
 935 * never used.
 936 * Called with hugetlb_lock held.
 937 */
 938static void return_unused_surplus_pages(struct hstate *h,
 939                                        unsigned long unused_resv_pages)
 940{
 941        unsigned long nr_pages;
 942
 943        /* Uncommit the reservation */
 944        h->resv_huge_pages -= unused_resv_pages;
 945
 946        /* Cannot return gigantic pages currently */
 947        if (h->order >= MAX_ORDER)
 948                return;
 949
 950        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 951
 952        /*
 953         * We want to release as many surplus pages as possible, spread
 954         * evenly across all nodes with memory. Iterate across these nodes
 955         * until we can no longer free unreserved surplus pages. This occurs
 956         * when the nodes with surplus pages have no free pages.
 957         * free_pool_huge_page() will balance the the freed pages across the
 958         * on-line nodes with memory and will handle the hstate accounting.
 959         */
 960        while (nr_pages--) {
 961                if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
 962                        break;
 963        }
 964}
 965
 966/*
 967 * Determine if the huge page at addr within the vma has an associated
 968 * reservation.  Where it does not we will need to logically increase
 969 * reservation and actually increase quota before an allocation can occur.
 970 * Where any new reservation would be required the reservation change is
 971 * prepared, but not committed.  Once the page has been quota'd allocated
 972 * an instantiated the change should be committed via vma_commit_reservation.
 973 * No action is required on failure.
 974 */
 975static long vma_needs_reservation(struct hstate *h,
 976                        struct vm_area_struct *vma, unsigned long addr)
 977{
 978        struct address_space *mapping = vma->vm_file->f_mapping;
 979        struct inode *inode = mapping->host;
 980
 981        if (vma->vm_flags & VM_MAYSHARE) {
 982                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
 983                return region_chg(&inode->i_mapping->private_list,
 984                                                        idx, idx + 1);
 985
 986        } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
 987                return 1;
 988
 989        } else  {
 990                long err;
 991                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
 992                struct resv_map *reservations = vma_resv_map(vma);
 993
 994                err = region_chg(&reservations->regions, idx, idx + 1);
 995                if (err < 0)
 996                        return err;
 997                return 0;
 998        }
 999}
1000static void vma_commit_reservation(struct hstate *h,
1001                        struct vm_area_struct *vma, unsigned long addr)
1002{
1003        struct address_space *mapping = vma->vm_file->f_mapping;
1004        struct inode *inode = mapping->host;
1005
1006        if (vma->vm_flags & VM_MAYSHARE) {
1007                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1008                region_add(&inode->i_mapping->private_list, idx, idx + 1);
1009
1010        } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1011                pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1012                struct resv_map *reservations = vma_resv_map(vma);
1013
1014                /* Mark this page used in the map. */
1015                region_add(&reservations->regions, idx, idx + 1);
1016        }
1017}
1018
1019static struct page *alloc_huge_page(struct vm_area_struct *vma,
1020                                    unsigned long addr, int avoid_reserve)
1021{
1022        struct hstate *h = hstate_vma(vma);
1023        struct page *page;
1024        struct address_space *mapping = vma->vm_file->f_mapping;
1025        struct inode *inode = mapping->host;
1026        long chg;
1027
1028        /*
1029         * Processes that did not create the mapping will have no reserves and
1030         * will not have accounted against quota. Check that the quota can be
1031         * made before satisfying the allocation
1032         * MAP_NORESERVE mappings may also need pages and quota allocated
1033         * if no reserve mapping overlaps.
1034         */
1035        chg = vma_needs_reservation(h, vma, addr);
1036        if (chg < 0)
1037                return ERR_PTR(-VM_FAULT_OOM);
1038        if (chg)
1039                if (hugetlb_get_quota(inode->i_mapping, chg))
1040                        return ERR_PTR(-VM_FAULT_SIGBUS);
1041
1042        spin_lock(&hugetlb_lock);
1043        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1044        spin_unlock(&hugetlb_lock);
1045
1046        if (!page) {
1047                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1048                if (!page) {
1049                        hugetlb_put_quota(inode->i_mapping, chg);
1050                        return ERR_PTR(-VM_FAULT_SIGBUS);
1051                }
1052        }
1053
1054        set_page_private(page, (unsigned long) mapping);
1055
1056        vma_commit_reservation(h, vma, addr);
1057
1058        return page;
1059}
1060
1061int __weak alloc_bootmem_huge_page(struct hstate *h)
1062{
1063        struct huge_bootmem_page *m;
1064        int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
1065
1066        while (nr_nodes) {
1067                void *addr;
1068
1069                addr = __alloc_bootmem_node_nopanic(
1070                                NODE_DATA(hstate_next_node_to_alloc(h,
1071                                                &node_states[N_HIGH_MEMORY])),
1072                                huge_page_size(h), huge_page_size(h), 0);
1073
1074                if (addr) {
1075                        /*
1076                         * Use the beginning of the huge page to store the
1077                         * huge_bootmem_page struct (until gather_bootmem
1078                         * puts them into the mem_map).
1079                         */
1080                        m = addr;
1081                        goto found;
1082                }
1083                nr_nodes--;
1084        }
1085        return 0;
1086
1087found:
1088        BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1089        /* Put them into a private list first because mem_map is not up yet */
1090        list_add(&m->list, &huge_boot_pages);
1091        m->hstate = h;
1092        return 1;
1093}
1094
1095static void prep_compound_huge_page(struct page *page, int order)
1096{
1097        if (unlikely(order > (MAX_ORDER - 1)))
1098                prep_compound_gigantic_page(page, order);
1099        else
1100                prep_compound_page(page, order);
1101}
1102
1103/* Put bootmem huge pages into the standard lists after mem_map is up */
1104static void __init gather_bootmem_prealloc(void)
1105{
1106        struct huge_bootmem_page *m;
1107
1108        list_for_each_entry(m, &huge_boot_pages, list) {
1109                struct hstate *h = m->hstate;
1110                struct page *page;
1111
1112#ifdef CONFIG_HIGHMEM
1113                page = pfn_to_page(m->phys >> PAGE_SHIFT);
1114                free_bootmem_late((unsigned long)m,
1115                                  sizeof(struct huge_bootmem_page));
1116#else
1117                page = virt_to_page(m);
1118#endif
1119                __ClearPageReserved(page);
1120                WARN_ON(page_count(page) != 1);
1121                prep_compound_huge_page(page, h->order);
1122                prep_new_huge_page(h, page, page_to_nid(page));
1123                /*
1124                 * If we had gigantic hugepages allocated at boot time, we need
1125                 * to restore the 'stolen' pages to totalram_pages in order to
1126                 * fix confusing memory reports from free(1) and another
1127                 * side-effects, like CommitLimit going negative.
1128                 */
1129                if (h->order > (MAX_ORDER - 1))
1130                        totalram_pages += 1 << h->order;
1131        }
1132}
1133
1134static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1135{
1136        unsigned long i;
1137
1138        for (i = 0; i < h->max_huge_pages; ++i) {
1139                if (h->order >= MAX_ORDER) {
1140                        if (!alloc_bootmem_huge_page(h))
1141                                break;
1142                } else if (!alloc_fresh_huge_page(h,
1143                                         &node_states[N_HIGH_MEMORY]))
1144                        break;
1145        }
1146        h->max_huge_pages = i;
1147}
1148
1149static void __init hugetlb_init_hstates(void)
1150{
1151        struct hstate *h;
1152
1153        for_each_hstate(h) {
1154                /* oversize hugepages were init'ed in early boot */
1155                if (h->order < MAX_ORDER)
1156                        hugetlb_hstate_alloc_pages(h);
1157        }
1158}
1159
1160static char * __init memfmt(char *buf, unsigned long n)
1161{
1162        if (n >= (1UL << 30))
1163                sprintf(buf, "%lu GB", n >> 30);
1164        else if (n >= (1UL << 20))
1165                sprintf(buf, "%lu MB", n >> 20);
1166        else
1167                sprintf(buf, "%lu KB", n >> 10);
1168        return buf;
1169}
1170
1171static void __init report_hugepages(void)
1172{
1173        struct hstate *h;
1174
1175        for_each_hstate(h) {
1176                char buf[32];
1177                printk(KERN_INFO "HugeTLB registered %s page size, "
1178                                 "pre-allocated %ld pages\n",
1179                        memfmt(buf, huge_page_size(h)),
1180                        h->free_huge_pages);
1181        }
1182}
1183
1184#ifdef CONFIG_HIGHMEM
1185static void try_to_free_low(struct hstate *h, unsigned long count,
1186                                                nodemask_t *nodes_allowed)
1187{
1188        int i;
1189
1190        if (h->order >= MAX_ORDER)
1191                return;
1192
1193        for_each_node_mask(i, *nodes_allowed) {
1194                struct page *page, *next;
1195                struct list_head *freel = &h->hugepage_freelists[i];
1196                list_for_each_entry_safe(page, next, freel, lru) {
1197                        if (count >= h->nr_huge_pages)
1198                                return;
1199                        if (PageHighMem(page))
1200                                continue;
1201                        list_del(&page->lru);
1202                        update_and_free_page(h, page);
1203                        h->free_huge_pages--;
1204                        h->free_huge_pages_node[page_to_nid(page)]--;
1205                }
1206        }
1207}
1208#else
1209static inline void try_to_free_low(struct hstate *h, unsigned long count,
1210                                                nodemask_t *nodes_allowed)
1211{
1212}
1213#endif
1214
1215/*
1216 * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1217 * balanced by operating on them in a round-robin fashion.
1218 * Returns 1 if an adjustment was made.
1219 */
1220static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1221                                int delta)
1222{
1223        int start_nid, next_nid;
1224        int ret = 0;
1225
1226        VM_BUG_ON(delta != -1 && delta != 1);
1227
1228        if (delta < 0)
1229                start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1230        else
1231                start_nid = hstate_next_node_to_free(h, nodes_allowed);
1232        next_nid = start_nid;
1233
1234        do {
1235                int nid = next_nid;
1236                if (delta < 0)  {
1237                        /*
1238                         * To shrink on this node, there must be a surplus page
1239                         */
1240                        if (!h->surplus_huge_pages_node[nid]) {
1241                                next_nid = hstate_next_node_to_alloc(h,
1242                                                                nodes_allowed);
1243                                continue;
1244                        }
1245                }
1246                if (delta > 0) {
1247                        /*
1248                         * Surplus cannot exceed the total number of pages
1249                         */
1250                        if (h->surplus_huge_pages_node[nid] >=
1251                                                h->nr_huge_pages_node[nid]) {
1252                                next_nid = hstate_next_node_to_free(h,
1253                                                                nodes_allowed);
1254                                continue;
1255                        }
1256                }
1257
1258                h->surplus_huge_pages += delta;
1259                h->surplus_huge_pages_node[nid] += delta;
1260                ret = 1;
1261                break;
1262        } while (next_nid != start_nid);
1263
1264        return ret;
1265}
1266
1267#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1268static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1269                                                nodemask_t *nodes_allowed)
1270{
1271        unsigned long min_count, ret;
1272
1273        if (h->order >= MAX_ORDER)
1274                return h->max_huge_pages;
1275
1276        /*
1277         * Increase the pool size
1278         * First take pages out of surplus state.  Then make up the
1279         * remaining difference by allocating fresh huge pages.
1280         *
1281         * We might race with alloc_buddy_huge_page() here and be unable
1282         * to convert a surplus huge page to a normal huge page. That is
1283         * not critical, though, it just means the overall size of the
1284         * pool might be one hugepage larger than it needs to be, but
1285         * within all the constraints specified by the sysctls.
1286         */
1287        spin_lock(&hugetlb_lock);
1288        while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1289                if (!adjust_pool_surplus(h, nodes_allowed, -1))
1290                        break;
1291        }
1292
1293        while (count > persistent_huge_pages(h)) {
1294                /*
1295                 * If this allocation races such that we no longer need the
1296                 * page, free_huge_page will handle it by freeing the page
1297                 * and reducing the surplus.
1298                 */
1299                spin_unlock(&hugetlb_lock);
1300                ret = alloc_fresh_huge_page(h, nodes_allowed);
1301                spin_lock(&hugetlb_lock);
1302                if (!ret)
1303                        goto out;
1304
1305                /* Bail for signals. Probably ctrl-c from user */
1306                if (signal_pending(current))
1307                        goto out;
1308        }
1309
1310        /*
1311         * Decrease the pool size
1312         * First return free pages to the buddy allocator (being careful
1313         * to keep enough around to satisfy reservations).  Then place
1314         * pages into surplus state as needed so the pool will shrink
1315         * to the desired size as pages become free.
1316         *
1317         * By placing pages into the surplus state independent of the
1318         * overcommit value, we are allowing the surplus pool size to
1319         * exceed overcommit. There are few sane options here. Since
1320         * alloc_buddy_huge_page() is checking the global counter,
1321         * though, we'll note that we're not allowed to exceed surplus
1322         * and won't grow the pool anywhere else. Not until one of the
1323         * sysctls are changed, or the surplus pages go out of use.
1324         */
1325        min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1326        min_count = max(count, min_count);
1327        try_to_free_low(h, min_count, nodes_allowed);
1328        while (min_count < persistent_huge_pages(h)) {
1329                if (!free_pool_huge_page(h, nodes_allowed, 0))
1330                        break;
1331        }
1332        while (count < persistent_huge_pages(h)) {
1333                if (!adjust_pool_surplus(h, nodes_allowed, 1))
1334                        break;
1335        }
1336out:
1337        ret = persistent_huge_pages(h);
1338        spin_unlock(&hugetlb_lock);
1339        return ret;
1340}
1341
1342#define HSTATE_ATTR_RO(_name) \
1343        static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1344
1345#define HSTATE_ATTR(_name) \
1346        static struct kobj_attribute _name##_attr = \
1347                __ATTR(_name, 0644, _name##_show, _name##_store)
1348
1349static struct kobject *hugepages_kobj;
1350static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1351
1352static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1353
1354static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1355{
1356        int i;
1357
1358        for (i = 0; i < HUGE_MAX_HSTATE; i++)
1359                if (hstate_kobjs[i] == kobj) {
1360                        if (nidp)
1361                                *nidp = NUMA_NO_NODE;
1362                        return &hstates[i];
1363                }
1364
1365        return kobj_to_node_hstate(kobj, nidp);
1366}
1367
1368static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1369                                        struct kobj_attribute *attr, char *buf)
1370{
1371        struct hstate *h;
1372        unsigned long nr_huge_pages;
1373        int nid;
1374
1375        h = kobj_to_hstate(kobj, &nid);
1376        if (nid == NUMA_NO_NODE)
1377                nr_huge_pages = h->nr_huge_pages;
1378        else
1379                nr_huge_pages = h->nr_huge_pages_node[nid];
1380
1381        return sprintf(buf, "%lu\n", nr_huge_pages);
1382}
1383
1384static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1385                        struct kobject *kobj, struct kobj_attribute *attr,
1386                        const char *buf, size_t len)
1387{
1388        int err;
1389        int nid;
1390        unsigned long count;
1391        struct hstate *h;
1392        NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1393
1394        err = strict_strtoul(buf, 10, &count);
1395        if (err)
1396                goto out;
1397
1398        h = kobj_to_hstate(kobj, &nid);
1399        if (h->order >= MAX_ORDER) {
1400                err = -EINVAL;
1401                goto out;
1402        }
1403
1404        if (nid == NUMA_NO_NODE) {
1405                /*
1406                 * global hstate attribute
1407                 */
1408                if (!(obey_mempolicy &&
1409                                init_nodemask_of_mempolicy(nodes_allowed))) {
1410                        NODEMASK_FREE(nodes_allowed);
1411                        nodes_allowed = &node_states[N_HIGH_MEMORY];
1412                }
1413        } else if (nodes_allowed) {
1414                /*
1415                 * per node hstate attribute: adjust count to global,
1416                 * but restrict alloc/free to the specified node.
1417                 */
1418                count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1419                init_nodemask_of_node(nodes_allowed, nid);
1420        } else
1421                nodes_allowed = &node_states[N_HIGH_MEMORY];
1422
1423        h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1424
1425        if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1426                NODEMASK_FREE(nodes_allowed);
1427
1428        return len;
1429out:
1430        NODEMASK_FREE(nodes_allowed);
1431        return err;
1432}
1433
1434static ssize_t nr_hugepages_show(struct kobject *kobj,
1435                                       struct kobj_attribute *attr, char *buf)
1436{
1437        return nr_hugepages_show_common(kobj, attr, buf);
1438}
1439
1440static ssize_t nr_hugepages_store(struct kobject *kobj,
1441               struct kobj_attribute *attr, const char *buf, size_t len)
1442{
1443        return nr_hugepages_store_common(false, kobj, attr, buf, len);
1444}
1445HSTATE_ATTR(nr_hugepages);
1446
1447#ifdef CONFIG_NUMA
1448
1449/*
1450 * hstate attribute for optionally mempolicy-based constraint on persistent
1451 * huge page alloc/free.
1452 */
1453static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1454                                       struct kobj_attribute *attr, char *buf)
1455{
1456        return nr_hugepages_show_common(kobj, attr, buf);
1457}
1458
1459static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1460               struct kobj_attribute *attr, const char *buf, size_t len)
1461{
1462        return nr_hugepages_store_common(true, kobj, attr, buf, len);
1463}
1464HSTATE_ATTR(nr_hugepages_mempolicy);
1465#endif
1466
1467
1468static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1469                                        struct kobj_attribute *attr, char *buf)
1470{
1471        struct hstate *h = kobj_to_hstate(kobj, NULL);
1472        return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1473}
1474
1475static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1476                struct kobj_attribute *attr, const char *buf, size_t count)
1477{
1478        int err;
1479        unsigned long input;
1480        struct hstate *h = kobj_to_hstate(kobj, NULL);
1481
1482        if (h->order >= MAX_ORDER)
1483                return -EINVAL;
1484
1485        err = strict_strtoul(buf, 10, &input);
1486        if (err)
1487                return err;
1488
1489        spin_lock(&hugetlb_lock);
1490        h->nr_overcommit_huge_pages = input;
1491        spin_unlock(&hugetlb_lock);
1492
1493        return count;
1494}
1495HSTATE_ATTR(nr_overcommit_hugepages);
1496
1497static ssize_t free_hugepages_show(struct kobject *kobj,
1498                                        struct kobj_attribute *attr, char *buf)
1499{
1500        struct hstate *h;
1501        unsigned long free_huge_pages;
1502        int nid;
1503
1504        h = kobj_to_hstate(kobj, &nid);
1505        if (nid == NUMA_NO_NODE)
1506                free_huge_pages = h->free_huge_pages;
1507        else
1508                free_huge_pages = h->free_huge_pages_node[nid];
1509
1510        return sprintf(buf, "%lu\n", free_huge_pages);
1511}
1512HSTATE_ATTR_RO(free_hugepages);
1513
1514static ssize_t resv_hugepages_show(struct kobject *kobj,
1515                                        struct kobj_attribute *attr, char *buf)
1516{
1517        struct hstate *h = kobj_to_hstate(kobj, NULL);
1518        return sprintf(buf, "%lu\n", h->resv_huge_pages);
1519}
1520HSTATE_ATTR_RO(resv_hugepages);
1521
1522static ssize_t surplus_hugepages_show(struct kobject *kobj,
1523                                        struct kobj_attribute *attr, char *buf)
1524{
1525        struct hstate *h;
1526        unsigned long surplus_huge_pages;
1527        int nid;
1528
1529        h = kobj_to_hstate(kobj, &nid);
1530        if (nid == NUMA_NO_NODE)
1531                surplus_huge_pages = h->surplus_huge_pages;
1532        else
1533                surplus_huge_pages = h->surplus_huge_pages_node[nid];
1534
1535        return sprintf(buf, "%lu\n", surplus_huge_pages);
1536}
1537HSTATE_ATTR_RO(surplus_hugepages);
1538
1539static struct attribute *hstate_attrs[] = {
1540        &nr_hugepages_attr.attr,
1541        &nr_overcommit_hugepages_attr.attr,
1542        &free_hugepages_attr.attr,
1543        &resv_hugepages_attr.attr,
1544        &surplus_hugepages_attr.attr,
1545#ifdef CONFIG_NUMA
1546        &nr_hugepages_mempolicy_attr.attr,
1547#endif
1548        NULL,
1549};
1550
1551static struct attribute_group hstate_attr_group = {
1552        .attrs = hstate_attrs,
1553};
1554
1555static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1556                                    struct kobject **hstate_kobjs,
1557                                    struct attribute_group *hstate_attr_group)
1558{
1559        int retval;
1560        int hi = h - hstates;
1561
1562        hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1563        if (!hstate_kobjs[hi])
1564                return -ENOMEM;
1565
1566        retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1567        if (retval)
1568                kobject_put(hstate_kobjs[hi]);
1569
1570        return retval;
1571}
1572
1573static void __init hugetlb_sysfs_init(void)
1574{
1575        struct hstate *h;
1576        int err;
1577
1578        hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1579        if (!hugepages_kobj)
1580                return;
1581
1582        for_each_hstate(h) {
1583                err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1584                                         hstate_kobjs, &hstate_attr_group);
1585                if (err)
1586                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
1587                                                                h->name);
1588        }
1589}
1590
1591#ifdef CONFIG_NUMA
1592
1593/*
1594 * node_hstate/s - associate per node hstate attributes, via their kobjects,
1595 * with node sysdevs in node_devices[] using a parallel array.  The array
1596 * index of a node sysdev or _hstate == node id.
1597 * This is here to avoid any static dependency of the node sysdev driver, in
1598 * the base kernel, on the hugetlb module.
1599 */
1600struct node_hstate {
1601        struct kobject          *hugepages_kobj;
1602        struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
1603};
1604struct node_hstate node_hstates[MAX_NUMNODES];
1605
1606/*
1607 * A subset of global hstate attributes for node sysdevs
1608 */
1609static struct attribute *per_node_hstate_attrs[] = {
1610        &nr_hugepages_attr.attr,
1611        &free_hugepages_attr.attr,
1612        &surplus_hugepages_attr.attr,
1613        NULL,
1614};
1615
1616static struct attribute_group per_node_hstate_attr_group = {
1617        .attrs = per_node_hstate_attrs,
1618};
1619
1620/*
1621 * kobj_to_node_hstate - lookup global hstate for node sysdev hstate attr kobj.
1622 * Returns node id via non-NULL nidp.
1623 */
1624static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1625{
1626        int nid;
1627
1628        for (nid = 0; nid < nr_node_ids; nid++) {
1629                struct node_hstate *nhs = &node_hstates[nid];
1630                int i;
1631                for (i = 0; i < HUGE_MAX_HSTATE; i++)
1632                        if (nhs->hstate_kobjs[i] == kobj) {
1633                                if (nidp)
1634                                        *nidp = nid;
1635                                return &hstates[i];
1636                        }
1637        }
1638
1639        BUG();
1640        return NULL;
1641}
1642
1643/*
1644 * Unregister hstate attributes from a single node sysdev.
1645 * No-op if no hstate attributes attached.
1646 */
1647void hugetlb_unregister_node(struct node *node)
1648{
1649        struct hstate *h;
1650        struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1651
1652        if (!nhs->hugepages_kobj)
1653                return;         /* no hstate attributes */
1654
1655        for_each_hstate(h)
1656                if (nhs->hstate_kobjs[h - hstates]) {
1657                        kobject_put(nhs->hstate_kobjs[h - hstates]);
1658                        nhs->hstate_kobjs[h - hstates] = NULL;
1659                }
1660
1661        kobject_put(nhs->hugepages_kobj);
1662        nhs->hugepages_kobj = NULL;
1663}
1664
1665/*
1666 * hugetlb module exit:  unregister hstate attributes from node sysdevs
1667 * that have them.
1668 */
1669static void hugetlb_unregister_all_nodes(void)
1670{
1671        int nid;
1672
1673        /*
1674         * disable node sysdev registrations.
1675         */
1676        register_hugetlbfs_with_node(NULL, NULL);
1677
1678        /*
1679         * remove hstate attributes from any nodes that have them.
1680         */
1681        for (nid = 0; nid < nr_node_ids; nid++)
1682                hugetlb_unregister_node(&node_devices[nid]);
1683}
1684
1685/*
1686 * Register hstate attributes for a single node sysdev.
1687 * No-op if attributes already registered.
1688 */
1689void hugetlb_register_node(struct node *node)
1690{
1691        struct hstate *h;
1692        struct node_hstate *nhs = &node_hstates[node->sysdev.id];
1693        int err;
1694
1695        if (nhs->hugepages_kobj)
1696                return;         /* already allocated */
1697
1698        nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1699                                                        &node->sysdev.kobj);
1700        if (!nhs->hugepages_kobj)
1701                return;
1702
1703        for_each_hstate(h) {
1704                err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1705                                                nhs->hstate_kobjs,
1706                                                &per_node_hstate_attr_group);
1707                if (err) {
1708                        printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
1709                                        " for node %d\n",
1710                                                h->name, node->sysdev.id);
1711                        hugetlb_unregister_node(node);
1712                        break;
1713                }
1714        }
1715}
1716
1717/*
1718 * hugetlb init time:  register hstate attributes for all registered node
1719 * sysdevs of nodes that have memory.  All on-line nodes should have
1720 * registered their associated sysdev by this time.
1721 */
1722static void hugetlb_register_all_nodes(void)
1723{
1724        int nid;
1725
1726        for_each_node_state(nid, N_HIGH_MEMORY) {
1727                struct node *node = &node_devices[nid];
1728                if (node->sysdev.id == nid)
1729                        hugetlb_register_node(node);
1730        }
1731
1732        /*
1733         * Let the node sysdev driver know we're here so it can
1734         * [un]register hstate attributes on node hotplug.
1735         */
1736        register_hugetlbfs_with_node(hugetlb_register_node,
1737                                     hugetlb_unregister_node);
1738}
1739#else   /* !CONFIG_NUMA */
1740
1741static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1742{
1743        BUG();
1744        if (nidp)
1745                *nidp = -1;
1746        return NULL;
1747}
1748
1749static void hugetlb_unregister_all_nodes(void) { }
1750
1751static void hugetlb_register_all_nodes(void) { }
1752
1753#endif
1754
1755static void __exit hugetlb_exit(void)
1756{
1757        struct hstate *h;
1758
1759        hugetlb_unregister_all_nodes();
1760
1761        for_each_hstate(h) {
1762                kobject_put(hstate_kobjs[h - hstates]);
1763        }
1764
1765        kobject_put(hugepages_kobj);
1766}
1767module_exit(hugetlb_exit);
1768
1769static int __init hugetlb_init(void)
1770{
1771        /* Some platform decide whether they support huge pages at boot
1772         * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
1773         * there is no such support
1774         */
1775        if (HPAGE_SHIFT == 0)
1776                return 0;
1777
1778        if (!size_to_hstate(default_hstate_size)) {
1779                default_hstate_size = HPAGE_SIZE;
1780                if (!size_to_hstate(default_hstate_size))
1781                        hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1782        }
1783        default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
1784        if (default_hstate_max_huge_pages)
1785                default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1786
1787        hugetlb_init_hstates();
1788
1789        gather_bootmem_prealloc();
1790
1791        report_hugepages();
1792
1793        hugetlb_sysfs_init();
1794
1795        hugetlb_register_all_nodes();
1796
1797        return 0;
1798}
1799module_init(hugetlb_init);
1800
1801/* Should be called on processing a hugepagesz=... option */
1802void __init hugetlb_add_hstate(unsigned order)
1803{
1804        struct hstate *h;
1805        unsigned long i;
1806
1807        if (size_to_hstate(PAGE_SIZE << order)) {
1808                printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
1809                return;
1810        }
1811        BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
1812        BUG_ON(order == 0);
1813        h = &hstates[max_hstate++];
1814        h->order = order;
1815        h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1816        h->nr_huge_pages = 0;
1817        h->free_huge_pages = 0;
1818        for (i = 0; i < MAX_NUMNODES; ++i)
1819                INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1820        h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
1821        h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
1822        snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1823                                        huge_page_size(h)/1024);
1824
1825        parsed_hstate = h;
1826}
1827
1828static int __init hugetlb_nrpages_setup(char *s)
1829{
1830        unsigned long *mhp;
1831        static unsigned long *last_mhp;
1832
1833        /*
1834         * !max_hstate means we haven't parsed a hugepagesz= parameter yet,
1835         * so this hugepages= parameter goes to the "default hstate".
1836         */
1837        if (!max_hstate)
1838                mhp = &default_hstate_max_huge_pages;
1839        else
1840                mhp = &parsed_hstate->max_huge_pages;
1841
1842        if (mhp == last_mhp) {
1843                printk(KERN_WARNING "hugepages= specified twice without "
1844                        "interleaving hugepagesz=, ignoring\n");
1845                return 1;
1846        }
1847
1848        if (sscanf(s, "%lu", mhp) <= 0)
1849                *mhp = 0;
1850
1851        /*
1852         * Global state is always initialized later in hugetlb_init.
1853         * But we need to allocate >= MAX_ORDER hstates here early to still
1854         * use the bootmem allocator.
1855         */
1856        if (max_hstate && parsed_hstate->order >= MAX_ORDER)
1857                hugetlb_hstate_alloc_pages(parsed_hstate);
1858
1859        last_mhp = mhp;
1860
1861        return 1;
1862}
1863__setup("hugepages=", hugetlb_nrpages_setup);
1864
1865static int __init hugetlb_default_setup(char *s)
1866{
1867        default_hstate_size = memparse(s, &s);
1868        return 1;
1869}
1870__setup("default_hugepagesz=", hugetlb_default_setup);
1871
1872static unsigned int cpuset_mems_nr(unsigned int *array)
1873{
1874        int node;
1875        unsigned int nr = 0;
1876
1877        for_each_node_mask(node, cpuset_current_mems_allowed)
1878                nr += array[node];
1879
1880        return nr;
1881}
1882
1883#ifdef CONFIG_SYSCTL
1884static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
1885                         struct ctl_table *table, int write,
1886                         void __user *buffer, size_t *length, loff_t *ppos)
1887{
1888        struct hstate *h = &default_hstate;
1889        unsigned long tmp;
1890        int ret;
1891
1892        tmp = h->max_huge_pages;
1893
1894        if (write && h->order >= MAX_ORDER)
1895                return -EINVAL;
1896
1897        table->data = &tmp;
1898        table->maxlen = sizeof(unsigned long);
1899        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1900        if (ret)
1901                goto out;
1902
1903        if (write) {
1904                NODEMASK_ALLOC(nodemask_t, nodes_allowed,
1905                                                GFP_KERNEL | __GFP_NORETRY);
1906                if (!(obey_mempolicy &&
1907                               init_nodemask_of_mempolicy(nodes_allowed))) {
1908                        NODEMASK_FREE(nodes_allowed);
1909                        nodes_allowed = &node_states[N_HIGH_MEMORY];
1910                }
1911                h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
1912
1913                if (nodes_allowed != &node_states[N_HIGH_MEMORY])
1914                        NODEMASK_FREE(nodes_allowed);
1915        }
1916out:
1917        return ret;
1918}
1919
1920int hugetlb_sysctl_handler(struct ctl_table *table, int write,
1921                          void __user *buffer, size_t *length, loff_t *ppos)
1922{
1923
1924        return hugetlb_sysctl_handler_common(false, table, write,
1925                                                        buffer, length, ppos);
1926}
1927
1928#ifdef CONFIG_NUMA
1929int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
1930                          void __user *buffer, size_t *length, loff_t *ppos)
1931{
1932        return hugetlb_sysctl_handler_common(true, table, write,
1933                                                        buffer, length, ppos);
1934}
1935#endif /* CONFIG_NUMA */
1936
1937int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
1938                        void __user *buffer,
1939                        size_t *length, loff_t *ppos)
1940{
1941        proc_dointvec(table, write, buffer, length, ppos);
1942        if (hugepages_treat_as_movable)
1943                htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
1944        else
1945                htlb_alloc_mask = GFP_HIGHUSER;
1946        return 0;
1947}
1948
1949int hugetlb_overcommit_handler(struct ctl_table *table, int write,
1950                        void __user *buffer,
1951                        size_t *length, loff_t *ppos)
1952{
1953        struct hstate *h = &default_hstate;
1954        unsigned long tmp;
1955        int ret;
1956
1957        tmp = h->nr_overcommit_huge_pages;
1958
1959        if (write && h->order >= MAX_ORDER)
1960                return -EINVAL;
1961
1962        table->data = &tmp;
1963        table->maxlen = sizeof(unsigned long);
1964        ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
1965        if (ret)
1966                goto out;
1967
1968        if (write) {
1969                spin_lock(&hugetlb_lock);
1970                h->nr_overcommit_huge_pages = tmp;
1971                spin_unlock(&hugetlb_lock);
1972        }
1973out:
1974        return ret;
1975}
1976
1977#endif /* CONFIG_SYSCTL */
1978
1979void hugetlb_report_meminfo(struct seq_file *m)
1980{
1981        struct hstate *h = &default_hstate;
1982        seq_printf(m,
1983                        "HugePages_Total:   %5lu\n"
1984                        "HugePages_Free:    %5lu\n"
1985                        "HugePages_Rsvd:    %5lu\n"
1986                        "HugePages_Surp:    %5lu\n"
1987                        "Hugepagesize:   %8lu kB\n",
1988                        h->nr_huge_pages,
1989                        h->free_huge_pages,
1990                        h->resv_huge_pages,
1991                        h->surplus_huge_pages,
1992                        1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1993}
1994
1995int hugetlb_report_node_meminfo(int nid, char *buf)
1996{
1997        struct hstate *h = &default_hstate;
1998        return sprintf(buf,
1999                "Node %d HugePages_Total: %5u\n"
2000                "Node %d HugePages_Free:  %5u\n"
2001                "Node %d HugePages_Surp:  %5u\n",
2002                nid, h->nr_huge_pages_node[nid],
2003                nid, h->free_huge_pages_node[nid],
2004                nid, h->surplus_huge_pages_node[nid]);
2005}
2006
2007/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2008unsigned long hugetlb_total_pages(void)
2009{
2010        struct hstate *h = &default_hstate;
2011        return h->nr_huge_pages * pages_per_huge_page(h);
2012}
2013
2014static int hugetlb_acct_memory(struct hstate *h, long delta)
2015{
2016        int ret = -ENOMEM;
2017
2018        spin_lock(&hugetlb_lock);
2019        /*
2020         * When cpuset is configured, it breaks the strict hugetlb page
2021         * reservation as the accounting is done on a global variable. Such
2022         * reservation is completely rubbish in the presence of cpuset because
2023         * the reservation is not checked against page availability for the
2024         * current cpuset. Application can still potentially OOM'ed by kernel
2025         * with lack of free htlb page in cpuset that the task is in.
2026         * Attempt to enforce strict accounting with cpuset is almost
2027         * impossible (or too ugly) because cpuset is too fluid that
2028         * task or memory node can be dynamically moved between cpusets.
2029         *
2030         * The change of semantics for shared hugetlb mapping with cpuset is
2031         * undesirable. However, in order to preserve some of the semantics,
2032         * we fall back to check against current free page availability as
2033         * a best attempt and hopefully to minimize the impact of changing
2034         * semantics that cpuset has.
2035         */
2036        if (delta > 0) {
2037                if (gather_surplus_pages(h, delta) < 0)
2038                        goto out;
2039
2040                if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2041                        return_unused_surplus_pages(h, delta);
2042                        goto out;
2043                }
2044        }
2045
2046        ret = 0;
2047        if (delta < 0)
2048                return_unused_surplus_pages(h, (unsigned long) -delta);
2049
2050out:
2051        spin_unlock(&hugetlb_lock);
2052        return ret;
2053}
2054
2055static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2056{
2057        struct resv_map *reservations = vma_resv_map(vma);
2058
2059        /*
2060         * This new VMA should share its siblings reservation map if present.
2061         * The VMA will only ever have a valid reservation map pointer where
2062         * it is being copied for another still existing VMA.  As that VMA
2063         * has a reference to the reservation map it cannot disappear until
2064         * after this open call completes.  It is therefore safe to take a
2065         * new reference here without additional locking.
2066         */
2067        if (reservations)
2068                kref_get(&reservations->refs);
2069}
2070
2071static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2072{
2073        struct hstate *h = hstate_vma(vma);
2074        struct resv_map *reservations = vma_resv_map(vma);
2075        unsigned long reserve;
2076        unsigned long start;
2077        unsigned long end;
2078
2079        if (reservations) {
2080                start = vma_hugecache_offset(h, vma, vma->vm_start);
2081                end = vma_hugecache_offset(h, vma, vma->vm_end);
2082
2083                reserve = (end - start) -
2084                        region_count(&reservations->regions, start, end);
2085
2086                kref_put(&reservations->refs, resv_map_release);
2087
2088                if (reserve) {
2089                        hugetlb_acct_memory(h, -reserve);
2090                        hugetlb_put_quota(vma->vm_file->f_mapping, reserve);
2091                }
2092        }
2093}
2094
2095/*
2096 * We cannot handle pagefaults against hugetlb pages at all.  They cause
2097 * handle_mm_fault() to try to instantiate regular-sized pages in the
2098 * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2099 * this far.
2100 */
2101static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2102{
2103        BUG();
2104        return 0;
2105}
2106
2107const struct vm_operations_struct hugetlb_vm_ops = {
2108        .fault = hugetlb_vm_op_fault,
2109        .open = hugetlb_vm_op_open,
2110        .close = hugetlb_vm_op_close,
2111};
2112
2113static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2114                                int writable)
2115{
2116        pte_t entry;
2117
2118        if (writable) {
2119                entry =
2120                    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2121        } else {
2122                entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2123        }
2124        entry = pte_mkyoung(entry);
2125        entry = pte_mkhuge(entry);
2126
2127        return entry;
2128}
2129
2130static void set_huge_ptep_writable(struct vm_area_struct *vma,
2131                                   unsigned long address, pte_t *ptep)
2132{
2133        pte_t entry;
2134
2135        entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2136        if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2137                update_mmu_cache(vma, address, ptep);
2138}
2139
2140
2141int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2142                            struct vm_area_struct *vma)
2143{
2144        pte_t *src_pte, *dst_pte, entry;
2145        struct page *ptepage;
2146        unsigned long addr;
2147        int cow;
2148        struct hstate *h = hstate_vma(vma);
2149        unsigned long sz = huge_page_size(h);
2150
2151        cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2152
2153        for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2154                src_pte = huge_pte_offset(src, addr);
2155                if (!src_pte)
2156                        continue;
2157                dst_pte = huge_pte_alloc(dst, addr, sz);
2158                if (!dst_pte)
2159                        goto nomem;
2160
2161                /* If the pagetables are shared don't copy or take references */
2162                if (dst_pte == src_pte)
2163                        continue;
2164
2165                spin_lock(&dst->page_table_lock);
2166                spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2167                if (!huge_pte_none(huge_ptep_get(src_pte))) {
2168                        if (cow)
2169                                huge_ptep_set_wrprotect(src, addr, src_pte);
2170                        entry = huge_ptep_get(src_pte);
2171                        ptepage = pte_page(entry);
2172                        get_page(ptepage);
2173                        page_dup_rmap(ptepage);
2174                        set_huge_pte_at(dst, addr, dst_pte, entry);
2175                }
2176                spin_unlock(&src->page_table_lock);
2177                spin_unlock(&dst->page_table_lock);
2178        }
2179        return 0;
2180
2181nomem:
2182        return -ENOMEM;
2183}
2184
2185static int is_hugetlb_entry_migration(pte_t pte)
2186{
2187        swp_entry_t swp;
2188
2189        if (huge_pte_none(pte) || pte_present(pte))
2190                return 0;
2191        swp = pte_to_swp_entry(pte);
2192        if (non_swap_entry(swp) && is_migration_entry(swp))
2193                return 1;
2194        else
2195                return 0;
2196}
2197
2198static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2199{
2200        swp_entry_t swp;
2201
2202        if (huge_pte_none(pte) || pte_present(pte))
2203                return 0;
2204        swp = pte_to_swp_entry(pte);
2205        if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2206                return 1;
2207        else
2208                return 0;
2209}
2210
2211void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2212                            unsigned long end, struct page *ref_page)
2213{
2214        struct mm_struct *mm = vma->vm_mm;
2215        unsigned long address;
2216        pte_t *ptep;
2217        pte_t pte;
2218        struct page *page;
2219        struct page *tmp;
2220        struct hstate *h = hstate_vma(vma);
2221        unsigned long sz = huge_page_size(h);
2222
2223        /*
2224         * A page gathering list, protected by per file i_mmap_mutex. The
2225         * lock is used to avoid list corruption from multiple unmapping
2226         * of the same page since we are using page->lru.
2227         */
2228        LIST_HEAD(page_list);
2229
2230        WARN_ON(!is_vm_hugetlb_page(vma));
2231        BUG_ON(start & ~huge_page_mask(h));
2232        BUG_ON(end & ~huge_page_mask(h));
2233
2234        mmu_notifier_invalidate_range_start(mm, start, end);
2235        spin_lock(&mm->page_table_lock);
2236        for (address = start; address < end; address += sz) {
2237                ptep = huge_pte_offset(mm, address);
2238                if (!ptep)
2239                        continue;
2240
2241                if (huge_pmd_unshare(mm, &address, ptep))
2242                        continue;
2243
2244                /*
2245                 * If a reference page is supplied, it is because a specific
2246                 * page is being unmapped, not a range. Ensure the page we
2247                 * are about to unmap is the actual page of interest.
2248                 */
2249                if (ref_page) {
2250                        pte = huge_ptep_get(ptep);
2251                        if (huge_pte_none(pte))
2252                                continue;
2253                        page = pte_page(pte);
2254                        if (page != ref_page)
2255                                continue;
2256
2257                        /*
2258                         * Mark the VMA as having unmapped its page so that
2259                         * future faults in this VMA will fail rather than
2260                         * looking like data was lost
2261                         */
2262                        set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2263                }
2264
2265                pte = huge_ptep_get_and_clear(mm, address, ptep);
2266                if (huge_pte_none(pte))
2267                        continue;
2268
2269                /*
2270                 * HWPoisoned hugepage is already unmapped and dropped reference
2271                 */
2272                if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
2273                        continue;
2274
2275                page = pte_page(pte);
2276                if (pte_dirty(pte))
2277                        set_page_dirty(page);
2278                list_add(&page->lru, &page_list);
2279        }
2280        spin_unlock(&mm->page_table_lock);
2281        flush_tlb_range(vma, start, end);
2282        mmu_notifier_invalidate_range_end(mm, start, end);
2283        list_for_each_entry_safe(page, tmp, &page_list, lru) {
2284                page_remove_rmap(page);
2285                list_del(&page->lru);
2286                put_page(page);
2287        }
2288}
2289
2290void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2291                          unsigned long end, struct page *ref_page)
2292{
2293        mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2294        __unmap_hugepage_range(vma, start, end, ref_page);
2295        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2296}
2297
2298/*
2299 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2300 * mappping it owns the reserve page for. The intention is to unmap the page
2301 * from other VMAs and let the children be SIGKILLed if they are faulting the
2302 * same region.
2303 */
2304static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2305                                struct page *page, unsigned long address)
2306{
2307        struct hstate *h = hstate_vma(vma);
2308        struct vm_area_struct *iter_vma;
2309        struct address_space *mapping;
2310        struct prio_tree_iter iter;
2311        pgoff_t pgoff;
2312
2313        /*
2314         * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2315         * from page cache lookup which is in HPAGE_SIZE units.
2316         */
2317        address = address & huge_page_mask(h);
2318        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT)
2319                + (vma->vm_pgoff >> PAGE_SHIFT);
2320        mapping = (struct address_space *)page_private(page);
2321
2322        /*
2323         * Take the mapping lock for the duration of the table walk. As
2324         * this mapping should be shared between all the VMAs,
2325         * __unmap_hugepage_range() is called as the lock is already held
2326         */
2327        mutex_lock(&mapping->i_mmap_mutex);
2328        vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2329                /* Do not unmap the current VMA */
2330                if (iter_vma == vma)
2331                        continue;
2332
2333                /*
2334                 * Unmap the page from other VMAs without their own reserves.
2335                 * They get marked to be SIGKILLed if they fault in these
2336                 * areas. This is because a future no-page fault on this VMA
2337                 * could insert a zeroed page instead of the data existing
2338                 * from the time of fork. This would look like data corruption
2339                 */
2340                if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2341                        __unmap_hugepage_range(iter_vma,
2342                                address, address + huge_page_size(h),
2343                                page);
2344        }
2345        mutex_unlock(&mapping->i_mmap_mutex);
2346
2347        return 1;
2348}
2349
2350/*
2351 * Hugetlb_cow() should be called with page lock of the original hugepage held.
2352 */
2353static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2354                        unsigned long address, pte_t *ptep, pte_t pte,
2355                        struct page *pagecache_page)
2356{
2357        struct hstate *h = hstate_vma(vma);
2358        struct page *old_page, *new_page;
2359        int avoidcopy;
2360        int outside_reserve = 0;
2361
2362        old_page = pte_page(pte);
2363
2364retry_avoidcopy:
2365        /* If no-one else is actually using this page, avoid the copy
2366         * and just make the page writable */
2367        avoidcopy = (page_mapcount(old_page) == 1);
2368        if (avoidcopy) {
2369                if (PageAnon(old_page))
2370                        page_move_anon_rmap(old_page, vma, address);
2371                set_huge_ptep_writable(vma, address, ptep);
2372                return 0;
2373        }
2374
2375        /*
2376         * If the process that created a MAP_PRIVATE mapping is about to
2377         * perform a COW due to a shared page count, attempt to satisfy
2378         * the allocation without using the existing reserves. The pagecache
2379         * page is used to determine if the reserve at this address was
2380         * consumed or not. If reserves were used, a partial faulted mapping
2381         * at the time of fork() could consume its reserves on COW instead
2382         * of the full address range.
2383         */
2384        if (!(vma->vm_flags & VM_MAYSHARE) &&
2385                        is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2386                        old_page != pagecache_page)
2387                outside_reserve = 1;
2388
2389        page_cache_get(old_page);
2390
2391        /* Drop page_table_lock as buddy allocator may be called */
2392        spin_unlock(&mm->page_table_lock);
2393        new_page = alloc_huge_page(vma, address, outside_reserve);
2394
2395        if (IS_ERR(new_page)) {
2396                page_cache_release(old_page);
2397
2398                /*
2399                 * If a process owning a MAP_PRIVATE mapping fails to COW,
2400                 * it is due to references held by a child and an insufficient
2401                 * huge page pool. To guarantee the original mappers
2402                 * reliability, unmap the page from child processes. The child
2403                 * may get SIGKILLed if it later faults.
2404                 */
2405                if (outside_reserve) {
2406                        BUG_ON(huge_pte_none(pte));
2407                        if (unmap_ref_private(mm, vma, old_page, address)) {
2408                                BUG_ON(page_count(old_page) != 1);
2409                                BUG_ON(huge_pte_none(pte));
2410                                spin_lock(&mm->page_table_lock);
2411                                goto retry_avoidcopy;
2412                        }
2413                        WARN_ON_ONCE(1);
2414                }
2415
2416                /* Caller expects lock to be held */
2417                spin_lock(&mm->page_table_lock);
2418                return -PTR_ERR(new_page);
2419        }
2420
2421        /*
2422         * When the original hugepage is shared one, it does not have
2423         * anon_vma prepared.
2424         */
2425        if (unlikely(anon_vma_prepare(vma))) {
2426                page_cache_release(new_page);
2427                page_cache_release(old_page);
2428                /* Caller expects lock to be held */
2429                spin_lock(&mm->page_table_lock);
2430                return VM_FAULT_OOM;
2431        }
2432
2433        copy_user_huge_page(new_page, old_page, address, vma,
2434                            pages_per_huge_page(h));
2435        __SetPageUptodate(new_page);
2436
2437        /*
2438         * Retake the page_table_lock to check for racing updates
2439         * before the page tables are altered
2440         */
2441        spin_lock(&mm->page_table_lock);
2442        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2443        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2444                /* Break COW */
2445                mmu_notifier_invalidate_range_start(mm,
2446                        address & huge_page_mask(h),
2447                        (address & huge_page_mask(h)) + huge_page_size(h));
2448                huge_ptep_clear_flush(vma, address, ptep);
2449                set_huge_pte_at(mm, address, ptep,
2450                                make_huge_pte(vma, new_page, 1));
2451                page_remove_rmap(old_page);
2452                hugepage_add_new_anon_rmap(new_page, vma, address);
2453                /* Make the old page be freed below */
2454                new_page = old_page;
2455                mmu_notifier_invalidate_range_end(mm,
2456                        address & huge_page_mask(h),
2457                        (address & huge_page_mask(h)) + huge_page_size(h));
2458        }
2459        page_cache_release(new_page);
2460        page_cache_release(old_page);
2461        return 0;
2462}
2463
2464/* Return the pagecache page at a given address within a VMA */
2465static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2466                        struct vm_area_struct *vma, unsigned long address)
2467{
2468        struct address_space *mapping;
2469        pgoff_t idx;
2470
2471        mapping = vma->vm_file->f_mapping;
2472        idx = vma_hugecache_offset(h, vma, address);
2473
2474        return find_lock_page(mapping, idx);
2475}
2476
2477/*
2478 * Return whether there is a pagecache page to back given address within VMA.
2479 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
2480 */
2481static bool hugetlbfs_pagecache_present(struct hstate *h,
2482                        struct vm_area_struct *vma, unsigned long address)
2483{
2484        struct address_space *mapping;
2485        pgoff_t idx;
2486        struct page *page;
2487
2488        mapping = vma->vm_file->f_mapping;
2489        idx = vma_hugecache_offset(h, vma, address);
2490
2491        page = find_get_page(mapping, idx);
2492        if (page)
2493                put_page(page);
2494        return page != NULL;
2495}
2496
2497static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2498                        unsigned long address, pte_t *ptep, unsigned int flags)
2499{
2500        struct hstate *h = hstate_vma(vma);
2501        int ret = VM_FAULT_SIGBUS;
2502        pgoff_t idx;
2503        unsigned long size;
2504        struct page *page;
2505        struct address_space *mapping;
2506        pte_t new_pte;
2507
2508        /*
2509         * Currently, we are forced to kill the process in the event the
2510         * original mapper has unmapped pages from the child due to a failed
2511         * COW. Warn that such a situation has occurred as it may not be obvious
2512         */
2513        if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2514                printk(KERN_WARNING
2515                        "PID %d killed due to inadequate hugepage pool\n",
2516                        current->pid);
2517                return ret;
2518        }
2519
2520        mapping = vma->vm_file->f_mapping;
2521        idx = vma_hugecache_offset(h, vma, address);
2522
2523        /*
2524         * Use page lock to guard against racing truncation
2525         * before we get page_table_lock.
2526         */
2527retry:
2528        page = find_lock_page(mapping, idx);
2529        if (!page) {
2530                size = i_size_read(mapping->host) >> huge_page_shift(h);
2531                if (idx >= size)
2532                        goto out;
2533                page = alloc_huge_page(vma, address, 0);
2534                if (IS_ERR(page)) {
2535                        ret = -PTR_ERR(page);
2536                        goto out;
2537                }
2538                clear_huge_page(page, address, pages_per_huge_page(h));
2539                __SetPageUptodate(page);
2540
2541                if (vma->vm_flags & VM_MAYSHARE) {
2542                        int err;
2543                        struct inode *inode = mapping->host;
2544
2545                        err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2546                        if (err) {
2547                                put_page(page);
2548                                if (err == -EEXIST)
2549                                        goto retry;
2550                                goto out;
2551                        }
2552
2553                        spin_lock(&inode->i_lock);
2554                        inode->i_blocks += blocks_per_huge_page(h);
2555                        spin_unlock(&inode->i_lock);
2556                        page_dup_rmap(page);
2557                } else {
2558                        lock_page(page);
2559                        if (unlikely(anon_vma_prepare(vma))) {
2560                                ret = VM_FAULT_OOM;
2561                                goto backout_unlocked;
2562                        }
2563                        hugepage_add_new_anon_rmap(page, vma, address);
2564                }
2565        } else {
2566                /*
2567                 * If memory error occurs between mmap() and fault, some process
2568                 * don't have hwpoisoned swap entry for errored virtual address.
2569                 * So we need to block hugepage fault by PG_hwpoison bit check.
2570                 */
2571                if (unlikely(PageHWPoison(page))) {
2572                        ret = VM_FAULT_HWPOISON |
2573                              VM_FAULT_SET_HINDEX(h - hstates);
2574                        goto backout_unlocked;
2575                }
2576                page_dup_rmap(page);
2577        }
2578
2579        /*
2580         * If we are going to COW a private mapping later, we examine the
2581         * pending reservations for this page now. This will ensure that
2582         * any allocations necessary to record that reservation occur outside
2583         * the spinlock.
2584         */
2585        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2586                if (vma_needs_reservation(h, vma, address) < 0) {
2587                        ret = VM_FAULT_OOM;
2588                        goto backout_unlocked;
2589                }
2590
2591        spin_lock(&mm->page_table_lock);
2592        size = i_size_read(mapping->host) >> huge_page_shift(h);
2593        if (idx >= size)
2594                goto backout;
2595
2596        ret = 0;
2597        if (!huge_pte_none(huge_ptep_get(ptep)))
2598                goto backout;
2599
2600        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2601                                && (vma->vm_flags & VM_SHARED)));
2602        set_huge_pte_at(mm, address, ptep, new_pte);
2603
2604        if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2605                /* Optimization, do the COW without a second fault */
2606                ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2607        }
2608
2609        spin_unlock(&mm->page_table_lock);
2610        unlock_page(page);
2611out:
2612        return ret;
2613
2614backout:
2615        spin_unlock(&mm->page_table_lock);
2616backout_unlocked:
2617        unlock_page(page);
2618        put_page(page);
2619        goto out;
2620}
2621
2622int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2623                        unsigned long address, unsigned int flags)
2624{
2625        pte_t *ptep;
2626        pte_t entry;
2627        int ret;
2628        struct page *page = NULL;
2629        struct page *pagecache_page = NULL;
2630        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2631        struct hstate *h = hstate_vma(vma);
2632
2633        ptep = huge_pte_offset(mm, address);
2634        if (ptep) {
2635                entry = huge_ptep_get(ptep);
2636                if (unlikely(is_hugetlb_entry_migration(entry))) {
2637                        migration_entry_wait(mm, (pmd_t *)ptep, address);
2638                        return 0;
2639                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2640                        return VM_FAULT_HWPOISON_LARGE |
2641                               VM_FAULT_SET_HINDEX(h - hstates);
2642        }
2643
2644        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2645        if (!ptep)
2646                return VM_FAULT_OOM;
2647
2648        /*
2649         * Serialize hugepage allocation and instantiation, so that we don't
2650         * get spurious allocation failures if two CPUs race to instantiate
2651         * the same page in the page cache.
2652         */
2653        mutex_lock(&hugetlb_instantiation_mutex);
2654        entry = huge_ptep_get(ptep);
2655        if (huge_pte_none(entry)) {
2656                ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2657                goto out_mutex;
2658        }
2659
2660        ret = 0;
2661
2662        /*
2663         * If we are going to COW the mapping later, we examine the pending
2664         * reservations for this page now. This will ensure that any
2665         * allocations necessary to record that reservation occur outside the
2666         * spinlock. For private mappings, we also lookup the pagecache
2667         * page now as it is used to determine if a reservation has been
2668         * consumed.
2669         */
2670        if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2671                if (vma_needs_reservation(h, vma, address) < 0) {
2672                        ret = VM_FAULT_OOM;
2673                        goto out_mutex;
2674                }
2675
2676                if (!(vma->vm_flags & VM_MAYSHARE))
2677                        pagecache_page = hugetlbfs_pagecache_page(h,
2678                                                                vma, address);
2679        }
2680
2681        /*
2682         * hugetlb_cow() requires page locks of pte_page(entry) and
2683         * pagecache_page, so here we need take the former one
2684         * when page != pagecache_page or !pagecache_page.
2685         * Note that locking order is always pagecache_page -> page,
2686         * so no worry about deadlock.
2687         */
2688        page = pte_page(entry);
2689        if (page != pagecache_page)
2690                lock_page(page);
2691
2692        spin_lock(&mm->page_table_lock);
2693        /* Check for a racing update before calling hugetlb_cow */
2694        if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2695                goto out_page_table_lock;
2696
2697
2698        if (flags & FAULT_FLAG_WRITE) {
2699                if (!pte_write(entry)) {
2700                        ret = hugetlb_cow(mm, vma, address, ptep, entry,
2701                                                        pagecache_page);
2702                        goto out_page_table_lock;
2703                }
2704                entry = pte_mkdirty(entry);
2705        }
2706        entry = pte_mkyoung(entry);
2707        if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2708                                                flags & FAULT_FLAG_WRITE))
2709                update_mmu_cache(vma, address, ptep);
2710
2711out_page_table_lock:
2712        spin_unlock(&mm->page_table_lock);
2713
2714        if (pagecache_page) {
2715                unlock_page(pagecache_page);
2716                put_page(pagecache_page);
2717        }
2718        if (page != pagecache_page)
2719                unlock_page(page);
2720
2721out_mutex:
2722        mutex_unlock(&hugetlb_instantiation_mutex);
2723
2724        return ret;
2725}
2726
2727/* Can be overriden by architectures */
2728__attribute__((weak)) struct page *
2729follow_huge_pud(struct mm_struct *mm, unsigned long address,
2730               pud_t *pud, int write)
2731{
2732        BUG();
2733        return NULL;
2734}
2735
2736int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2737                        struct page **pages, struct vm_area_struct **vmas,
2738                        unsigned long *position, int *length, int i,
2739                        unsigned int flags)
2740{
2741        unsigned long pfn_offset;
2742        unsigned long vaddr = *position;
2743        int remainder = *length;
2744        struct hstate *h = hstate_vma(vma);
2745
2746        spin_lock(&mm->page_table_lock);
2747        while (vaddr < vma->vm_end && remainder) {
2748                pte_t *pte;
2749                int absent;
2750                struct page *page;
2751
2752                /*
2753                 * Some archs (sparc64, sh*) have multiple pte_ts to
2754                 * each hugepage.  We have to make sure we get the
2755                 * first, for the page indexing below to work.
2756                 */
2757                pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2758                absent = !pte || huge_pte_none(huge_ptep_get(pte));
2759
2760                /*
2761                 * When coredumping, it suits get_dump_page if we just return
2762                 * an error where there's an empty slot with no huge pagecache
2763                 * to back it.  This way, we avoid allocating a hugepage, and
2764                 * the sparse dumpfile avoids allocating disk blocks, but its
2765                 * huge holes still show up with zeroes where they need to be.
2766                 */
2767                if (absent && (flags & FOLL_DUMP) &&
2768                    !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2769                        remainder = 0;
2770                        break;
2771                }
2772
2773                if (absent ||
2774                    ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2775                        int ret;
2776
2777                        spin_unlock(&mm->page_table_lock);
2778                        ret = hugetlb_fault(mm, vma, vaddr,
2779                                (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2780                        spin_lock(&mm->page_table_lock);
2781                        if (!(ret & VM_FAULT_ERROR))
2782                                continue;
2783
2784                        remainder = 0;
2785                        break;
2786                }
2787
2788                pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2789                page = pte_page(huge_ptep_get(pte));
2790same_page:
2791                if (pages) {
2792                        pages[i] = mem_map_offset(page, pfn_offset);
2793                        get_page(pages[i]);
2794                }
2795
2796                if (vmas)
2797                        vmas[i] = vma;
2798
2799                vaddr += PAGE_SIZE;
2800                ++pfn_offset;
2801                --remainder;
2802                ++i;
2803                if (vaddr < vma->vm_end && remainder &&
2804                                pfn_offset < pages_per_huge_page(h)) {
2805                        /*
2806                         * We use pfn_offset to avoid touching the pageframes
2807                         * of this compound page.
2808                         */
2809                        goto same_page;
2810                }
2811        }
2812        spin_unlock(&mm->page_table_lock);
2813        *length = remainder;
2814        *position = vaddr;
2815
2816        return i ? i : -EFAULT;
2817}
2818
2819void hugetlb_change_protection(struct vm_area_struct *vma,
2820                unsigned long address, unsigned long end, pgprot_t newprot)
2821{
2822        struct mm_struct *mm = vma->vm_mm;
2823        unsigned long start = address;
2824        pte_t *ptep;
2825        pte_t pte;
2826        struct hstate *h = hstate_vma(vma);
2827
2828        BUG_ON(address >= end);
2829        flush_cache_range(vma, address, end);
2830
2831        mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2832        spin_lock(&mm->page_table_lock);
2833        for (; address < end; address += huge_page_size(h)) {
2834                ptep = huge_pte_offset(mm, address);
2835                if (!ptep)
2836                        continue;
2837                if (huge_pmd_unshare(mm, &address, ptep))
2838                        continue;
2839                if (!huge_pte_none(huge_ptep_get(ptep))) {
2840                        pte = huge_ptep_get_and_clear(mm, address, ptep);
2841                        pte = pte_mkhuge(pte_modify(pte, newprot));
2842                        set_huge_pte_at(mm, address, ptep, pte);
2843                }
2844        }
2845        spin_unlock(&mm->page_table_lock);
2846        mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2847
2848        flush_tlb_range(vma, start, end);
2849}
2850
2851int hugetlb_reserve_pages(struct inode *inode,
2852                                        long from, long to,
2853                                        struct vm_area_struct *vma,
2854                                        vm_flags_t vm_flags)
2855{
2856        long ret, chg;
2857        struct hstate *h = hstate_inode(inode);
2858
2859        /*
2860         * Only apply hugepage reservation if asked. At fault time, an
2861         * attempt will be made for VM_NORESERVE to allocate a page
2862         * and filesystem quota without using reserves
2863         */
2864        if (vm_flags & VM_NORESERVE)
2865                return 0;
2866
2867        /*
2868         * Shared mappings base their reservation on the number of pages that
2869         * are already allocated on behalf of the file. Private mappings need
2870         * to reserve the full area even if read-only as mprotect() may be
2871         * called to make the mapping read-write. Assume !vma is a shm mapping
2872         */
2873        if (!vma || vma->vm_flags & VM_MAYSHARE)
2874                chg = region_chg(&inode->i_mapping->private_list, from, to);
2875        else {
2876                struct resv_map *resv_map = resv_map_alloc();
2877                if (!resv_map)
2878                        return -ENOMEM;
2879
2880                chg = to - from;
2881
2882                set_vma_resv_map(vma, resv_map);
2883                set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2884        }
2885
2886        if (chg < 0)
2887                return chg;
2888
2889        /* There must be enough filesystem quota for the mapping */
2890        if (hugetlb_get_quota(inode->i_mapping, chg))
2891                return -ENOSPC;
2892
2893        /*
2894         * Check enough hugepages are available for the reservation.
2895         * Hand back the quota if there are not
2896         */
2897        ret = hugetlb_acct_memory(h, chg);
2898        if (ret < 0) {
2899                hugetlb_put_quota(inode->i_mapping, chg);
2900                return ret;
2901        }
2902
2903        /*
2904         * Account for the reservations made. Shared mappings record regions
2905         * that have reservations as they are shared by multiple VMAs.
2906         * When the last VMA disappears, the region map says how much
2907         * the reservation was and the page cache tells how much of
2908         * the reservation was consumed. Private mappings are per-VMA and
2909         * only the consumed reservations are tracked. When the VMA
2910         * disappears, the original reservation is the VMA size and the
2911         * consumed reservations are stored in the map. Hence, nothing
2912         * else has to be done for private mappings here
2913         */
2914        if (!vma || vma->vm_flags & VM_MAYSHARE)
2915                region_add(&inode->i_mapping->private_list, from, to);
2916        return 0;
2917}
2918
2919void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2920{
2921        struct hstate *h = hstate_inode(inode);
2922        long chg = region_truncate(&inode->i_mapping->private_list, offset);
2923
2924        spin_lock(&inode->i_lock);
2925        inode->i_blocks -= (blocks_per_huge_page(h) * freed);
2926        spin_unlock(&inode->i_lock);
2927
2928        hugetlb_put_quota(inode->i_mapping, (chg - freed));
2929        hugetlb_acct_memory(h, -(chg - freed));
2930}
2931
2932#ifdef CONFIG_MEMORY_FAILURE
2933
2934/* Should be called in hugetlb_lock */
2935static int is_hugepage_on_freelist(struct page *hpage)
2936{
2937        struct page *page;
2938        struct page *tmp;
2939        struct hstate *h = page_hstate(hpage);
2940        int nid = page_to_nid(hpage);
2941
2942        list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
2943                if (page == hpage)
2944                        return 1;
2945        return 0;
2946}
2947
2948/*
2949 * This function is called from memory failure code.
2950 * Assume the caller holds page lock of the head page.
2951 */
2952int dequeue_hwpoisoned_huge_page(struct page *hpage)
2953{
2954        struct hstate *h = page_hstate(hpage);
2955        int nid = page_to_nid(hpage);
2956        int ret = -EBUSY;
2957
2958        spin_lock(&hugetlb_lock);
2959        if (is_hugepage_on_freelist(hpage)) {
2960                list_del(&hpage->lru);
2961                set_page_refcounted(hpage);
2962                h->free_huge_pages--;
2963                h->free_huge_pages_node[nid]--;
2964                ret = 0;
2965        }
2966        spin_unlock(&hugetlb_lock);
2967        return ret;
2968}
2969#endif
2970
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.