linux/mm/huge_memory.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2009  Red Hat, Inc.
   3 *
   4 *  This work is licensed under the terms of the GNU GPL, version 2. See
   5 *  the COPYING file in the top-level directory.
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/highmem.h>
  11#include <linux/hugetlb.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/mm_inline.h>
  16#include <linux/kthread.h>
  17#include <linux/khugepaged.h>
  18#include <linux/freezer.h>
  19#include <linux/mman.h>
  20#include <asm/tlb.h>
  21#include <asm/pgalloc.h>
  22#include "internal.h"
  23
  24/*
  25 * By default transparent hugepage support is enabled for all mappings
  26 * and khugepaged scans all mappings. Defrag is only invoked by
  27 * khugepaged hugepage allocations and by page faults inside
  28 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  29 * allocations.
  30 */
  31unsigned long transparent_hugepage_flags __read_mostly =
  32#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  33        (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  34#endif
  35#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  36        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  37#endif
  38        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  39        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  40
  41/* default scan 8*512 pte (or vmas) every 30 second */
  42static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  43static unsigned int khugepaged_pages_collapsed;
  44static unsigned int khugepaged_full_scans;
  45static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  46/* during fragmentation poll the hugepage allocator once every minute */
  47static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  48static struct task_struct *khugepaged_thread __read_mostly;
  49static DEFINE_MUTEX(khugepaged_mutex);
  50static DEFINE_SPINLOCK(khugepaged_mm_lock);
  51static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  52/*
  53 * default collapse hugepages if there is at least one pte mapped like
  54 * it would have happened if the vma was large enough during page
  55 * fault.
  56 */
  57static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  58
  59static int khugepaged(void *none);
  60static int mm_slots_hash_init(void);
  61static int khugepaged_slab_init(void);
  62static void khugepaged_slab_free(void);
  63
  64#define MM_SLOTS_HASH_HEADS 1024
  65static struct hlist_head *mm_slots_hash __read_mostly;
  66static struct kmem_cache *mm_slot_cache __read_mostly;
  67
  68/**
  69 * struct mm_slot - hash lookup from mm to mm_slot
  70 * @hash: hash collision list
  71 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  72 * @mm: the mm that this information is valid for
  73 */
  74struct mm_slot {
  75        struct hlist_node hash;
  76        struct list_head mm_node;
  77        struct mm_struct *mm;
  78};
  79
  80/**
  81 * struct khugepaged_scan - cursor for scanning
  82 * @mm_head: the head of the mm list to scan
  83 * @mm_slot: the current mm_slot we are scanning
  84 * @address: the next address inside that to be scanned
  85 *
  86 * There is only the one khugepaged_scan instance of this cursor structure.
  87 */
  88struct khugepaged_scan {
  89        struct list_head mm_head;
  90        struct mm_slot *mm_slot;
  91        unsigned long address;
  92} khugepaged_scan = {
  93        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  94};
  95
  96
  97static int set_recommended_min_free_kbytes(void)
  98{
  99        struct zone *zone;
 100        int nr_zones = 0;
 101        unsigned long recommended_min;
 102        extern int min_free_kbytes;
 103
 104        if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
 105                      &transparent_hugepage_flags) &&
 106            !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 107                      &transparent_hugepage_flags))
 108                return 0;
 109
 110        for_each_populated_zone(zone)
 111                nr_zones++;
 112
 113        /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
 114        recommended_min = pageblock_nr_pages * nr_zones * 2;
 115
 116        /*
 117         * Make sure that on average at least two pageblocks are almost free
 118         * of another type, one for a migratetype to fall back to and a
 119         * second to avoid subsequent fallbacks of other types There are 3
 120         * MIGRATE_TYPES we care about.
 121         */
 122        recommended_min += pageblock_nr_pages * nr_zones *
 123                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
 124
 125        /* don't ever allow to reserve more than 5% of the lowmem */
 126        recommended_min = min(recommended_min,
 127                              (unsigned long) nr_free_buffer_pages() / 20);
 128        recommended_min <<= (PAGE_SHIFT-10);
 129
 130        if (recommended_min > min_free_kbytes)
 131                min_free_kbytes = recommended_min;
 132        setup_per_zone_wmarks();
 133        return 0;
 134}
 135late_initcall(set_recommended_min_free_kbytes);
 136
 137static int start_khugepaged(void)
 138{
 139        int err = 0;
 140        if (khugepaged_enabled()) {
 141                int wakeup;
 142                if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
 143                        err = -ENOMEM;
 144                        goto out;
 145                }
 146                mutex_lock(&khugepaged_mutex);
 147                if (!khugepaged_thread)
 148                        khugepaged_thread = kthread_run(khugepaged, NULL,
 149                                                        "khugepaged");
 150                if (unlikely(IS_ERR(khugepaged_thread))) {
 151                        printk(KERN_ERR
 152                               "khugepaged: kthread_run(khugepaged) failed\n");
 153                        err = PTR_ERR(khugepaged_thread);
 154                        khugepaged_thread = NULL;
 155                }
 156                wakeup = !list_empty(&khugepaged_scan.mm_head);
 157                mutex_unlock(&khugepaged_mutex);
 158                if (wakeup)
 159                        wake_up_interruptible(&khugepaged_wait);
 160
 161                set_recommended_min_free_kbytes();
 162        } else
 163                /* wakeup to exit */
 164                wake_up_interruptible(&khugepaged_wait);
 165out:
 166        return err;
 167}
 168
 169#ifdef CONFIG_SYSFS
 170
 171static ssize_t double_flag_show(struct kobject *kobj,
 172                                struct kobj_attribute *attr, char *buf,
 173                                enum transparent_hugepage_flag enabled,
 174                                enum transparent_hugepage_flag req_madv)
 175{
 176        if (test_bit(enabled, &transparent_hugepage_flags)) {
 177                VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
 178                return sprintf(buf, "[always] madvise never\n");
 179        } else if (test_bit(req_madv, &transparent_hugepage_flags))
 180                return sprintf(buf, "always [madvise] never\n");
 181        else
 182                return sprintf(buf, "always madvise [never]\n");
 183}
 184static ssize_t double_flag_store(struct kobject *kobj,
 185                                 struct kobj_attribute *attr,
 186                                 const char *buf, size_t count,
 187                                 enum transparent_hugepage_flag enabled,
 188                                 enum transparent_hugepage_flag req_madv)
 189{
 190        if (!memcmp("always", buf,
 191                    min(sizeof("always")-1, count))) {
 192                set_bit(enabled, &transparent_hugepage_flags);
 193                clear_bit(req_madv, &transparent_hugepage_flags);
 194        } else if (!memcmp("madvise", buf,
 195                           min(sizeof("madvise")-1, count))) {
 196                clear_bit(enabled, &transparent_hugepage_flags);
 197                set_bit(req_madv, &transparent_hugepage_flags);
 198        } else if (!memcmp("never", buf,
 199                           min(sizeof("never")-1, count))) {
 200                clear_bit(enabled, &transparent_hugepage_flags);
 201                clear_bit(req_madv, &transparent_hugepage_flags);
 202        } else
 203                return -EINVAL;
 204
 205        return count;
 206}
 207
 208static ssize_t enabled_show(struct kobject *kobj,
 209                            struct kobj_attribute *attr, char *buf)
 210{
 211        return double_flag_show(kobj, attr, buf,
 212                                TRANSPARENT_HUGEPAGE_FLAG,
 213                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 214}
 215static ssize_t enabled_store(struct kobject *kobj,
 216                             struct kobj_attribute *attr,
 217                             const char *buf, size_t count)
 218{
 219        ssize_t ret;
 220
 221        ret = double_flag_store(kobj, attr, buf, count,
 222                                TRANSPARENT_HUGEPAGE_FLAG,
 223                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 224
 225        if (ret > 0) {
 226                int err = start_khugepaged();
 227                if (err)
 228                        ret = err;
 229        }
 230
 231        if (ret > 0 &&
 232            (test_bit(TRANSPARENT_HUGEPAGE_FLAG,
 233                      &transparent_hugepage_flags) ||
 234             test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 235                      &transparent_hugepage_flags)))
 236                set_recommended_min_free_kbytes();
 237
 238        return ret;
 239}
 240static struct kobj_attribute enabled_attr =
 241        __ATTR(enabled, 0644, enabled_show, enabled_store);
 242
 243static ssize_t single_flag_show(struct kobject *kobj,
 244                                struct kobj_attribute *attr, char *buf,
 245                                enum transparent_hugepage_flag flag)
 246{
 247        return sprintf(buf, "%d\n",
 248                       !!test_bit(flag, &transparent_hugepage_flags));
 249}
 250
 251static ssize_t single_flag_store(struct kobject *kobj,
 252                                 struct kobj_attribute *attr,
 253                                 const char *buf, size_t count,
 254                                 enum transparent_hugepage_flag flag)
 255{
 256        unsigned long value;
 257        int ret;
 258
 259        ret = kstrtoul(buf, 10, &value);
 260        if (ret < 0)
 261                return ret;
 262        if (value > 1)
 263                return -EINVAL;
 264
 265        if (value)
 266                set_bit(flag, &transparent_hugepage_flags);
 267        else
 268                clear_bit(flag, &transparent_hugepage_flags);
 269
 270        return count;
 271}
 272
 273/*
 274 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
 275 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
 276 * memory just to allocate one more hugepage.
 277 */
 278static ssize_t defrag_show(struct kobject *kobj,
 279                           struct kobj_attribute *attr, char *buf)
 280{
 281        return double_flag_show(kobj, attr, buf,
 282                                TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 283                                TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 284}
 285static ssize_t defrag_store(struct kobject *kobj,
 286                            struct kobj_attribute *attr,
 287                            const char *buf, size_t count)
 288{
 289        return double_flag_store(kobj, attr, buf, count,
 290                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 291                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 292}
 293static struct kobj_attribute defrag_attr =
 294        __ATTR(defrag, 0644, defrag_show, defrag_store);
 295
 296#ifdef CONFIG_DEBUG_VM
 297static ssize_t debug_cow_show(struct kobject *kobj,
 298                                struct kobj_attribute *attr, char *buf)
 299{
 300        return single_flag_show(kobj, attr, buf,
 301                                TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 302}
 303static ssize_t debug_cow_store(struct kobject *kobj,
 304                               struct kobj_attribute *attr,
 305                               const char *buf, size_t count)
 306{
 307        return single_flag_store(kobj, attr, buf, count,
 308                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 309}
 310static struct kobj_attribute debug_cow_attr =
 311        __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
 312#endif /* CONFIG_DEBUG_VM */
 313
 314static struct attribute *hugepage_attr[] = {
 315        &enabled_attr.attr,
 316        &defrag_attr.attr,
 317#ifdef CONFIG_DEBUG_VM
 318        &debug_cow_attr.attr,
 319#endif
 320        NULL,
 321};
 322
 323static struct attribute_group hugepage_attr_group = {
 324        .attrs = hugepage_attr,
 325};
 326
 327static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 328                                         struct kobj_attribute *attr,
 329                                         char *buf)
 330{
 331        return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 332}
 333
 334static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 335                                          struct kobj_attribute *attr,
 336                                          const char *buf, size_t count)
 337{
 338        unsigned long msecs;
 339        int err;
 340
 341        err = strict_strtoul(buf, 10, &msecs);
 342        if (err || msecs > UINT_MAX)
 343                return -EINVAL;
 344
 345        khugepaged_scan_sleep_millisecs = msecs;
 346        wake_up_interruptible(&khugepaged_wait);
 347
 348        return count;
 349}
 350static struct kobj_attribute scan_sleep_millisecs_attr =
 351        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 352               scan_sleep_millisecs_store);
 353
 354static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 355                                          struct kobj_attribute *attr,
 356                                          char *buf)
 357{
 358        return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 359}
 360
 361static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 362                                           struct kobj_attribute *attr,
 363                                           const char *buf, size_t count)
 364{
 365        unsigned long msecs;
 366        int err;
 367
 368        err = strict_strtoul(buf, 10, &msecs);
 369        if (err || msecs > UINT_MAX)
 370                return -EINVAL;
 371
 372        khugepaged_alloc_sleep_millisecs = msecs;
 373        wake_up_interruptible(&khugepaged_wait);
 374
 375        return count;
 376}
 377static struct kobj_attribute alloc_sleep_millisecs_attr =
 378        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 379               alloc_sleep_millisecs_store);
 380
 381static ssize_t pages_to_scan_show(struct kobject *kobj,
 382                                  struct kobj_attribute *attr,
 383                                  char *buf)
 384{
 385        return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 386}
 387static ssize_t pages_to_scan_store(struct kobject *kobj,
 388                                   struct kobj_attribute *attr,
 389                                   const char *buf, size_t count)
 390{
 391        int err;
 392        unsigned long pages;
 393
 394        err = strict_strtoul(buf, 10, &pages);
 395        if (err || !pages || pages > UINT_MAX)
 396                return -EINVAL;
 397
 398        khugepaged_pages_to_scan = pages;
 399
 400        return count;
 401}
 402static struct kobj_attribute pages_to_scan_attr =
 403        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 404               pages_to_scan_store);
 405
 406static ssize_t pages_collapsed_show(struct kobject *kobj,
 407                                    struct kobj_attribute *attr,
 408                                    char *buf)
 409{
 410        return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 411}
 412static struct kobj_attribute pages_collapsed_attr =
 413        __ATTR_RO(pages_collapsed);
 414
 415static ssize_t full_scans_show(struct kobject *kobj,
 416                               struct kobj_attribute *attr,
 417                               char *buf)
 418{
 419        return sprintf(buf, "%u\n", khugepaged_full_scans);
 420}
 421static struct kobj_attribute full_scans_attr =
 422        __ATTR_RO(full_scans);
 423
 424static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 425                                      struct kobj_attribute *attr, char *buf)
 426{
 427        return single_flag_show(kobj, attr, buf,
 428                                TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 429}
 430static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 431                                       struct kobj_attribute *attr,
 432                                       const char *buf, size_t count)
 433{
 434        return single_flag_store(kobj, attr, buf, count,
 435                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 436}
 437static struct kobj_attribute khugepaged_defrag_attr =
 438        __ATTR(defrag, 0644, khugepaged_defrag_show,
 439               khugepaged_defrag_store);
 440
 441/*
 442 * max_ptes_none controls if khugepaged should collapse hugepages over
 443 * any unmapped ptes in turn potentially increasing the memory
 444 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 445 * reduce the available free memory in the system as it
 446 * runs. Increasing max_ptes_none will instead potentially reduce the
 447 * free memory in the system during the khugepaged scan.
 448 */
 449static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 450                                             struct kobj_attribute *attr,
 451                                             char *buf)
 452{
 453        return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 454}
 455static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 456                                              struct kobj_attribute *attr,
 457                                              const char *buf, size_t count)
 458{
 459        int err;
 460        unsigned long max_ptes_none;
 461
 462        err = strict_strtoul(buf, 10, &max_ptes_none);
 463        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 464                return -EINVAL;
 465
 466        khugepaged_max_ptes_none = max_ptes_none;
 467
 468        return count;
 469}
 470static struct kobj_attribute khugepaged_max_ptes_none_attr =
 471        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 472               khugepaged_max_ptes_none_store);
 473
 474static struct attribute *khugepaged_attr[] = {
 475        &khugepaged_defrag_attr.attr,
 476        &khugepaged_max_ptes_none_attr.attr,
 477        &pages_to_scan_attr.attr,
 478        &pages_collapsed_attr.attr,
 479        &full_scans_attr.attr,
 480        &scan_sleep_millisecs_attr.attr,
 481        &alloc_sleep_millisecs_attr.attr,
 482        NULL,
 483};
 484
 485static struct attribute_group khugepaged_attr_group = {
 486        .attrs = khugepaged_attr,
 487        .name = "khugepaged",
 488};
 489#endif /* CONFIG_SYSFS */
 490
 491static int __init hugepage_init(void)
 492{
 493        int err;
 494#ifdef CONFIG_SYSFS
 495        static struct kobject *hugepage_kobj;
 496#endif
 497
 498        err = -EINVAL;
 499        if (!has_transparent_hugepage()) {
 500                transparent_hugepage_flags = 0;
 501                goto out;
 502        }
 503
 504#ifdef CONFIG_SYSFS
 505        err = -ENOMEM;
 506        hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
 507        if (unlikely(!hugepage_kobj)) {
 508                printk(KERN_ERR "hugepage: failed kobject create\n");
 509                goto out;
 510        }
 511
 512        err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
 513        if (err) {
 514                printk(KERN_ERR "hugepage: failed register hugeage group\n");
 515                goto out;
 516        }
 517
 518        err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
 519        if (err) {
 520                printk(KERN_ERR "hugepage: failed register hugeage group\n");
 521                goto out;
 522        }
 523#endif
 524
 525        err = khugepaged_slab_init();
 526        if (err)
 527                goto out;
 528
 529        err = mm_slots_hash_init();
 530        if (err) {
 531                khugepaged_slab_free();
 532                goto out;
 533        }
 534
 535        /*
 536         * By default disable transparent hugepages on smaller systems,
 537         * where the extra memory used could hurt more than TLB overhead
 538         * is likely to save.  The admin can still enable it through /sys.
 539         */
 540        if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
 541                transparent_hugepage_flags = 0;
 542
 543        start_khugepaged();
 544
 545        set_recommended_min_free_kbytes();
 546
 547out:
 548        return err;
 549}
 550module_init(hugepage_init)
 551
 552static int __init setup_transparent_hugepage(char *str)
 553{
 554        int ret = 0;
 555        if (!str)
 556                goto out;
 557        if (!strcmp(str, "always")) {
 558                set_bit(TRANSPARENT_HUGEPAGE_FLAG,
 559                        &transparent_hugepage_flags);
 560                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 561                          &transparent_hugepage_flags);
 562                ret = 1;
 563        } else if (!strcmp(str, "madvise")) {
 564                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 565                          &transparent_hugepage_flags);
 566                set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 567                        &transparent_hugepage_flags);
 568                ret = 1;
 569        } else if (!strcmp(str, "never")) {
 570                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 571                          &transparent_hugepage_flags);
 572                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 573                          &transparent_hugepage_flags);
 574                ret = 1;
 575        }
 576out:
 577        if (!ret)
 578                printk(KERN_WARNING
 579                       "transparent_hugepage= cannot parse, ignored\n");
 580        return ret;
 581}
 582__setup("transparent_hugepage=", setup_transparent_hugepage);
 583
 584static void prepare_pmd_huge_pte(pgtable_t pgtable,
 585                                 struct mm_struct *mm)
 586{
 587        assert_spin_locked(&mm->page_table_lock);
 588
 589        /* FIFO */
 590        if (!mm->pmd_huge_pte)
 591                INIT_LIST_HEAD(&pgtable->lru);
 592        else
 593                list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
 594        mm->pmd_huge_pte = pgtable;
 595}
 596
 597static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 598{
 599        if (likely(vma->vm_flags & VM_WRITE))
 600                pmd = pmd_mkwrite(pmd);
 601        return pmd;
 602}
 603
 604static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 605                                        struct vm_area_struct *vma,
 606                                        unsigned long haddr, pmd_t *pmd,
 607                                        struct page *page)
 608{
 609        int ret = 0;
 610        pgtable_t pgtable;
 611
 612        VM_BUG_ON(!PageCompound(page));
 613        pgtable = pte_alloc_one(mm, haddr);
 614        if (unlikely(!pgtable)) {
 615                mem_cgroup_uncharge_page(page);
 616                put_page(page);
 617                return VM_FAULT_OOM;
 618        }
 619
 620        clear_huge_page(page, haddr, HPAGE_PMD_NR);
 621        __SetPageUptodate(page);
 622
 623        spin_lock(&mm->page_table_lock);
 624        if (unlikely(!pmd_none(*pmd))) {
 625                spin_unlock(&mm->page_table_lock);
 626                mem_cgroup_uncharge_page(page);
 627                put_page(page);
 628                pte_free(mm, pgtable);
 629        } else {
 630                pmd_t entry;
 631                entry = mk_pmd(page, vma->vm_page_prot);
 632                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 633                entry = pmd_mkhuge(entry);
 634                /*
 635                 * The spinlocking to take the lru_lock inside
 636                 * page_add_new_anon_rmap() acts as a full memory
 637                 * barrier to be sure clear_huge_page writes become
 638                 * visible after the set_pmd_at() write.
 639                 */
 640                page_add_new_anon_rmap(page, vma, haddr);
 641                set_pmd_at(mm, haddr, pmd, entry);
 642                prepare_pmd_huge_pte(pgtable, mm);
 643                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 644                spin_unlock(&mm->page_table_lock);
 645        }
 646
 647        return ret;
 648}
 649
 650static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 651{
 652        return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 653}
 654
 655static inline struct page *alloc_hugepage_vma(int defrag,
 656                                              struct vm_area_struct *vma,
 657                                              unsigned long haddr, int nd,
 658                                              gfp_t extra_gfp)
 659{
 660        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
 661                               HPAGE_PMD_ORDER, vma, haddr, nd);
 662}
 663
 664#ifndef CONFIG_NUMA
 665static inline struct page *alloc_hugepage(int defrag)
 666{
 667        return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
 668                           HPAGE_PMD_ORDER);
 669}
 670#endif
 671
 672int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 673                               unsigned long address, pmd_t *pmd,
 674                               unsigned int flags)
 675{
 676        struct page *page;
 677        unsigned long haddr = address & HPAGE_PMD_MASK;
 678        pte_t *pte;
 679
 680        if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
 681                if (unlikely(anon_vma_prepare(vma)))
 682                        return VM_FAULT_OOM;
 683                if (unlikely(khugepaged_enter(vma)))
 684                        return VM_FAULT_OOM;
 685                page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 686                                          vma, haddr, numa_node_id(), 0);
 687                if (unlikely(!page)) {
 688                        count_vm_event(THP_FAULT_FALLBACK);
 689                        goto out;
 690                }
 691                count_vm_event(THP_FAULT_ALLOC);
 692                if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
 693                        put_page(page);
 694                        goto out;
 695                }
 696
 697                return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
 698        }
 699out:
 700        /*
 701         * Use __pte_alloc instead of pte_alloc_map, because we can't
 702         * run pte_offset_map on the pmd, if an huge pmd could
 703         * materialize from under us from a different thread.
 704         */
 705        if (unlikely(__pte_alloc(mm, vma, pmd, address)))
 706                return VM_FAULT_OOM;
 707        /* if an huge pmd materialized from under us just retry later */
 708        if (unlikely(pmd_trans_huge(*pmd)))
 709                return 0;
 710        /*
 711         * A regular pmd is established and it can't morph into a huge pmd
 712         * from under us anymore at this point because we hold the mmap_sem
 713         * read mode and khugepaged takes it in write mode. So now it's
 714         * safe to run pte_offset_map().
 715         */
 716        pte = pte_offset_map(pmd, address);
 717        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 718}
 719
 720int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 721                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 722                  struct vm_area_struct *vma)
 723{
 724        struct page *src_page;
 725        pmd_t pmd;
 726        pgtable_t pgtable;
 727        int ret;
 728
 729        ret = -ENOMEM;
 730        pgtable = pte_alloc_one(dst_mm, addr);
 731        if (unlikely(!pgtable))
 732                goto out;
 733
 734        spin_lock(&dst_mm->page_table_lock);
 735        spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
 736
 737        ret = -EAGAIN;
 738        pmd = *src_pmd;
 739        if (unlikely(!pmd_trans_huge(pmd))) {
 740                pte_free(dst_mm, pgtable);
 741                goto out_unlock;
 742        }
 743        if (unlikely(pmd_trans_splitting(pmd))) {
 744                /* split huge page running from under us */
 745                spin_unlock(&src_mm->page_table_lock);
 746                spin_unlock(&dst_mm->page_table_lock);
 747                pte_free(dst_mm, pgtable);
 748
 749                wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
 750                goto out;
 751        }
 752        src_page = pmd_page(pmd);
 753        VM_BUG_ON(!PageHead(src_page));
 754        get_page(src_page);
 755        page_dup_rmap(src_page);
 756        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 757
 758        pmdp_set_wrprotect(src_mm, addr, src_pmd);
 759        pmd = pmd_mkold(pmd_wrprotect(pmd));
 760        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 761        prepare_pmd_huge_pte(pgtable, dst_mm);
 762
 763        ret = 0;
 764out_unlock:
 765        spin_unlock(&src_mm->page_table_lock);
 766        spin_unlock(&dst_mm->page_table_lock);
 767out:
 768        return ret;
 769}
 770
 771/* no "address" argument so destroys page coloring of some arch */
 772pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
 773{
 774        pgtable_t pgtable;
 775
 776        assert_spin_locked(&mm->page_table_lock);
 777
 778        /* FIFO */
 779        pgtable = mm->pmd_huge_pte;
 780        if (list_empty(&pgtable->lru))
 781                mm->pmd_huge_pte = NULL;
 782        else {
 783                mm->pmd_huge_pte = list_entry(pgtable->lru.next,
 784                                              struct page, lru);
 785                list_del(&pgtable->lru);
 786        }
 787        return pgtable;
 788}
 789
 790static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
 791                                        struct vm_area_struct *vma,
 792                                        unsigned long address,
 793                                        pmd_t *pmd, pmd_t orig_pmd,
 794                                        struct page *page,
 795                                        unsigned long haddr)
 796{
 797        pgtable_t pgtable;
 798        pmd_t _pmd;
 799        int ret = 0, i;
 800        struct page **pages;
 801
 802        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
 803                        GFP_KERNEL);
 804        if (unlikely(!pages)) {
 805                ret |= VM_FAULT_OOM;
 806                goto out;
 807        }
 808
 809        for (i = 0; i < HPAGE_PMD_NR; i++) {
 810                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
 811                                               __GFP_OTHER_NODE,
 812                                               vma, address, page_to_nid(page));
 813                if (unlikely(!pages[i] ||
 814                             mem_cgroup_newpage_charge(pages[i], mm,
 815                                                       GFP_KERNEL))) {
 816                        if (pages[i])
 817                                put_page(pages[i]);
 818                        mem_cgroup_uncharge_start();
 819                        while (--i >= 0) {
 820                                mem_cgroup_uncharge_page(pages[i]);
 821                                put_page(pages[i]);
 822                        }
 823                        mem_cgroup_uncharge_end();
 824                        kfree(pages);
 825                        ret |= VM_FAULT_OOM;
 826                        goto out;
 827                }
 828        }
 829
 830        for (i = 0; i < HPAGE_PMD_NR; i++) {
 831                copy_user_highpage(pages[i], page + i,
 832                                   haddr + PAGE_SHIFT*i, vma);
 833                __SetPageUptodate(pages[i]);
 834                cond_resched();
 835        }
 836
 837        spin_lock(&mm->page_table_lock);
 838        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 839                goto out_free_pages;
 840        VM_BUG_ON(!PageHead(page));
 841
 842        pmdp_clear_flush_notify(vma, haddr, pmd);
 843        /* leave pmd empty until pte is filled */
 844
 845        pgtable = get_pmd_huge_pte(mm);
 846        pmd_populate(mm, &_pmd, pgtable);
 847
 848        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
 849                pte_t *pte, entry;
 850                entry = mk_pte(pages[i], vma->vm_page_prot);
 851                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 852                page_add_new_anon_rmap(pages[i], vma, haddr);
 853                pte = pte_offset_map(&_pmd, haddr);
 854                VM_BUG_ON(!pte_none(*pte));
 855                set_pte_at(mm, haddr, pte, entry);
 856                pte_unmap(pte);
 857        }
 858        kfree(pages);
 859
 860        mm->nr_ptes++;
 861        smp_wmb(); /* make pte visible before pmd */
 862        pmd_populate(mm, pmd, pgtable);
 863        page_remove_rmap(page);
 864        spin_unlock(&mm->page_table_lock);
 865
 866        ret |= VM_FAULT_WRITE;
 867        put_page(page);
 868
 869out:
 870        return ret;
 871
 872out_free_pages:
 873        spin_unlock(&mm->page_table_lock);
 874        mem_cgroup_uncharge_start();
 875        for (i = 0; i < HPAGE_PMD_NR; i++) {
 876                mem_cgroup_uncharge_page(pages[i]);
 877                put_page(pages[i]);
 878        }
 879        mem_cgroup_uncharge_end();
 880        kfree(pages);
 881        goto out;
 882}
 883
 884int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 885                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
 886{
 887        int ret = 0;
 888        struct page *page, *new_page;
 889        unsigned long haddr;
 890
 891        VM_BUG_ON(!vma->anon_vma);
 892        spin_lock(&mm->page_table_lock);
 893        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 894                goto out_unlock;
 895
 896        page = pmd_page(orig_pmd);
 897        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
 898        haddr = address & HPAGE_PMD_MASK;
 899        if (page_mapcount(page) == 1) {
 900                pmd_t entry;
 901                entry = pmd_mkyoung(orig_pmd);
 902                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 903                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
 904                        update_mmu_cache(vma, address, entry);
 905                ret |= VM_FAULT_WRITE;
 906                goto out_unlock;
 907        }
 908        get_page(page);
 909        spin_unlock(&mm->page_table_lock);
 910
 911        if (transparent_hugepage_enabled(vma) &&
 912            !transparent_hugepage_debug_cow())
 913                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 914                                              vma, haddr, numa_node_id(), 0);
 915        else
 916                new_page = NULL;
 917
 918        if (unlikely(!new_page)) {
 919                count_vm_event(THP_FAULT_FALLBACK);
 920                ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
 921                                                   pmd, orig_pmd, page, haddr);
 922                put_page(page);
 923                goto out;
 924        }
 925        count_vm_event(THP_FAULT_ALLOC);
 926
 927        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
 928                put_page(new_page);
 929                put_page(page);
 930                ret |= VM_FAULT_OOM;
 931                goto out;
 932        }
 933
 934        copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
 935        __SetPageUptodate(new_page);
 936
 937        spin_lock(&mm->page_table_lock);
 938        put_page(page);
 939        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
 940                mem_cgroup_uncharge_page(new_page);
 941                put_page(new_page);
 942        } else {
 943                pmd_t entry;
 944                VM_BUG_ON(!PageHead(page));
 945                entry = mk_pmd(new_page, vma->vm_page_prot);
 946                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 947                entry = pmd_mkhuge(entry);
 948                pmdp_clear_flush_notify(vma, haddr, pmd);
 949                page_add_new_anon_rmap(new_page, vma, haddr);
 950                set_pmd_at(mm, haddr, pmd, entry);
 951                update_mmu_cache(vma, address, entry);
 952                page_remove_rmap(page);
 953                put_page(page);
 954                ret |= VM_FAULT_WRITE;
 955        }
 956out_unlock:
 957        spin_unlock(&mm->page_table_lock);
 958out:
 959        return ret;
 960}
 961
 962struct page *follow_trans_huge_pmd(struct mm_struct *mm,
 963                                   unsigned long addr,
 964                                   pmd_t *pmd,
 965                                   unsigned int flags)
 966{
 967        struct page *page = NULL;
 968
 969        assert_spin_locked(&mm->page_table_lock);
 970
 971        if (flags & FOLL_WRITE && !pmd_write(*pmd))
 972                goto out;
 973
 974        page = pmd_page(*pmd);
 975        VM_BUG_ON(!PageHead(page));
 976        if (flags & FOLL_TOUCH) {
 977                pmd_t _pmd;
 978                /*
 979                 * We should set the dirty bit only for FOLL_WRITE but
 980                 * for now the dirty bit in the pmd is meaningless.
 981                 * And if the dirty bit will become meaningful and
 982                 * we'll only set it with FOLL_WRITE, an atomic
 983                 * set_bit will be required on the pmd to set the
 984                 * young bit, instead of the current set_pmd_at.
 985                 */
 986                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
 987                set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
 988        }
 989        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
 990        VM_BUG_ON(!PageCompound(page));
 991        if (flags & FOLL_GET)
 992                get_page_foll(page);
 993
 994out:
 995        return page;
 996}
 997
 998int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 999                 pmd_t *pmd)
1000{
1001        int ret = 0;
1002
1003        spin_lock(&tlb->mm->page_table_lock);
1004        if (likely(pmd_trans_huge(*pmd))) {
1005                if (unlikely(pmd_trans_splitting(*pmd))) {
1006                        spin_unlock(&tlb->mm->page_table_lock);
1007                        wait_split_huge_page(vma->anon_vma,
1008                                             pmd);
1009                } else {
1010                        struct page *page;
1011                        pgtable_t pgtable;
1012                        pgtable = get_pmd_huge_pte(tlb->mm);
1013                        page = pmd_page(*pmd);
1014                        pmd_clear(pmd);
1015                        page_remove_rmap(page);
1016                        VM_BUG_ON(page_mapcount(page) < 0);
1017                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1018                        VM_BUG_ON(!PageHead(page));
1019                        spin_unlock(&tlb->mm->page_table_lock);
1020                        tlb_remove_page(tlb, page);
1021                        pte_free(tlb->mm, pgtable);
1022                        ret = 1;
1023                }
1024        } else
1025                spin_unlock(&tlb->mm->page_table_lock);
1026
1027        return ret;
1028}
1029
1030int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1031                unsigned long addr, unsigned long end,
1032                unsigned char *vec)
1033{
1034        int ret = 0;
1035
1036        spin_lock(&vma->vm_mm->page_table_lock);
1037        if (likely(pmd_trans_huge(*pmd))) {
1038                ret = !pmd_trans_splitting(*pmd);
1039                spin_unlock(&vma->vm_mm->page_table_lock);
1040                if (unlikely(!ret))
1041                        wait_split_huge_page(vma->anon_vma, pmd);
1042                else {
1043                        /*
1044                         * All logical pages in the range are present
1045                         * if backed by a huge page.
1046                         */
1047                        memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1048                }
1049        } else
1050                spin_unlock(&vma->vm_mm->page_table_lock);
1051
1052        return ret;
1053}
1054
1055int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1056                unsigned long addr, pgprot_t newprot)
1057{
1058        struct mm_struct *mm = vma->vm_mm;
1059        int ret = 0;
1060
1061        spin_lock(&mm->page_table_lock);
1062        if (likely(pmd_trans_huge(*pmd))) {
1063                if (unlikely(pmd_trans_splitting(*pmd))) {
1064                        spin_unlock(&mm->page_table_lock);
1065                        wait_split_huge_page(vma->anon_vma, pmd);
1066                } else {
1067                        pmd_t entry;
1068
1069                        entry = pmdp_get_and_clear(mm, addr, pmd);
1070                        entry = pmd_modify(entry, newprot);
1071                        set_pmd_at(mm, addr, pmd, entry);
1072                        spin_unlock(&vma->vm_mm->page_table_lock);
1073                        flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
1074                        ret = 1;
1075                }
1076        } else
1077                spin_unlock(&vma->vm_mm->page_table_lock);
1078
1079        return ret;
1080}
1081
1082pmd_t *page_check_address_pmd(struct page *page,
1083                              struct mm_struct *mm,
1084                              unsigned long address,
1085                              enum page_check_address_pmd_flag flag)
1086{
1087        pgd_t *pgd;
1088        pud_t *pud;
1089        pmd_t *pmd, *ret = NULL;
1090
1091        if (address & ~HPAGE_PMD_MASK)
1092                goto out;
1093
1094        pgd = pgd_offset(mm, address);
1095        if (!pgd_present(*pgd))
1096                goto out;
1097
1098        pud = pud_offset(pgd, address);
1099        if (!pud_present(*pud))
1100                goto out;
1101
1102        pmd = pmd_offset(pud, address);
1103        if (pmd_none(*pmd))
1104                goto out;
1105        if (pmd_page(*pmd) != page)
1106                goto out;
1107        /*
1108         * split_vma() may create temporary aliased mappings. There is
1109         * no risk as long as all huge pmd are found and have their
1110         * splitting bit set before __split_huge_page_refcount
1111         * runs. Finding the same huge pmd more than once during the
1112         * same rmap walk is not a problem.
1113         */
1114        if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1115            pmd_trans_splitting(*pmd))
1116                goto out;
1117        if (pmd_trans_huge(*pmd)) {
1118                VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1119                          !pmd_trans_splitting(*pmd));
1120                ret = pmd;
1121        }
1122out:
1123        return ret;
1124}
1125
1126static int __split_huge_page_splitting(struct page *page,
1127                                       struct vm_area_struct *vma,
1128                                       unsigned long address)
1129{
1130        struct mm_struct *mm = vma->vm_mm;
1131        pmd_t *pmd;
1132        int ret = 0;
1133
1134        spin_lock(&mm->page_table_lock);
1135        pmd = page_check_address_pmd(page, mm, address,
1136                                     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1137        if (pmd) {
1138                /*
1139                 * We can't temporarily set the pmd to null in order
1140                 * to split it, the pmd must remain marked huge at all
1141                 * times or the VM won't take the pmd_trans_huge paths
1142                 * and it won't wait on the anon_vma->root->mutex to
1143                 * serialize against split_huge_page*.
1144                 */
1145                pmdp_splitting_flush_notify(vma, address, pmd);
1146                ret = 1;
1147        }
1148        spin_unlock(&mm->page_table_lock);
1149
1150        return ret;
1151}
1152
1153static void __split_huge_page_refcount(struct page *page)
1154{
1155        int i;
1156        unsigned long head_index = page->index;
1157        struct zone *zone = page_zone(page);
1158        int zonestat;
1159        int tail_count = 0;
1160
1161        /* prevent PageLRU to go away from under us, and freeze lru stats */
1162        spin_lock_irq(&zone->lru_lock);
1163        compound_lock(page);
1164
1165        for (i = 1; i < HPAGE_PMD_NR; i++) {
1166                struct page *page_tail = page + i;
1167
1168                /* tail_page->_mapcount cannot change */
1169                BUG_ON(page_mapcount(page_tail) < 0);
1170                tail_count += page_mapcount(page_tail);
1171                /* check for overflow */
1172                BUG_ON(tail_count < 0);
1173                BUG_ON(atomic_read(&page_tail->_count) != 0);
1174                /*
1175                 * tail_page->_count is zero and not changing from
1176                 * under us. But get_page_unless_zero() may be running
1177                 * from under us on the tail_page. If we used
1178                 * atomic_set() below instead of atomic_add(), we
1179                 * would then run atomic_set() concurrently with
1180                 * get_page_unless_zero(), and atomic_set() is
1181                 * implemented in C not using locked ops. spin_unlock
1182                 * on x86 sometime uses locked ops because of PPro
1183                 * errata 66, 92, so unless somebody can guarantee
1184                 * atomic_set() here would be safe on all archs (and
1185                 * not only on x86), it's safer to use atomic_add().
1186                 */
1187                atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1188                           &page_tail->_count);
1189
1190                /* after clearing PageTail the gup refcount can be released */
1191                smp_mb();
1192
1193                /*
1194                 * retain hwpoison flag of the poisoned tail page:
1195                 *   fix for the unsuitable process killed on Guest Machine(KVM)
1196                 *   by the memory-failure.
1197                 */
1198                page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1199                page_tail->flags |= (page->flags &
1200                                     ((1L << PG_referenced) |
1201                                      (1L << PG_swapbacked) |
1202                                      (1L << PG_mlocked) |
1203                                      (1L << PG_uptodate)));
1204                page_tail->flags |= (1L << PG_dirty);
1205
1206                /* clear PageTail before overwriting first_page */
1207                smp_wmb();
1208
1209                /*
1210                 * __split_huge_page_splitting() already set the
1211                 * splitting bit in all pmd that could map this
1212                 * hugepage, that will ensure no CPU can alter the
1213                 * mapcount on the head page. The mapcount is only
1214                 * accounted in the head page and it has to be
1215                 * transferred to all tail pages in the below code. So
1216                 * for this code to be safe, the split the mapcount
1217                 * can't change. But that doesn't mean userland can't
1218                 * keep changing and reading the page contents while
1219                 * we transfer the mapcount, so the pmd splitting
1220                 * status is achieved setting a reserved bit in the
1221                 * pmd, not by clearing the present bit.
1222                */
1223                page_tail->_mapcount = page->_mapcount;
1224
1225                BUG_ON(page_tail->mapping);
1226                page_tail->mapping = page->mapping;
1227
1228                page_tail->index = ++head_index;
1229
1230                BUG_ON(!PageAnon(page_tail));
1231                BUG_ON(!PageUptodate(page_tail));
1232                BUG_ON(!PageDirty(page_tail));
1233                BUG_ON(!PageSwapBacked(page_tail));
1234
1235                mem_cgroup_split_huge_fixup(page, page_tail);
1236
1237                lru_add_page_tail(zone, page, page_tail);
1238        }
1239        atomic_sub(tail_count, &page->_count);
1240        BUG_ON(atomic_read(&page->_count) <= 0);
1241
1242        __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1243        __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1244
1245        /*
1246         * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1247         * so adjust those appropriately if this page is on the LRU.
1248         */
1249        if (PageLRU(page)) {
1250                zonestat = NR_LRU_BASE + page_lru(page);
1251                __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1252        }
1253
1254        ClearPageCompound(page);
1255        compound_unlock(page);
1256        spin_unlock_irq(&zone->lru_lock);
1257
1258        for (i = 1; i < HPAGE_PMD_NR; i++) {
1259                struct page *page_tail = page + i;
1260                BUG_ON(page_count(page_tail) <= 0);
1261                /*
1262                 * Tail pages may be freed if there wasn't any mapping
1263                 * like if add_to_swap() is running on a lru page that
1264                 * had its mapping zapped. And freeing these pages
1265                 * requires taking the lru_lock so we do the put_page
1266                 * of the tail pages after the split is complete.
1267                 */
1268                put_page(page_tail);
1269        }
1270
1271        /*
1272         * Only the head page (now become a regular page) is required
1273         * to be pinned by the caller.
1274         */
1275        BUG_ON(page_count(page) <= 0);
1276}
1277
1278static int __split_huge_page_map(struct page *page,
1279                                 struct vm_area_struct *vma,
1280                                 unsigned long address)
1281{
1282        struct mm_struct *mm = vma->vm_mm;
1283        pmd_t *pmd, _pmd;
1284        int ret = 0, i;
1285        pgtable_t pgtable;
1286        unsigned long haddr;
1287
1288        spin_lock(&mm->page_table_lock);
1289        pmd = page_check_address_pmd(page, mm, address,
1290                                     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1291        if (pmd) {
1292                pgtable = get_pmd_huge_pte(mm);
1293                pmd_populate(mm, &_pmd, pgtable);
1294
1295                for (i = 0, haddr = address; i < HPAGE_PMD_NR;
1296                     i++, haddr += PAGE_SIZE) {
1297                        pte_t *pte, entry;
1298                        BUG_ON(PageCompound(page+i));
1299                        entry = mk_pte(page + i, vma->vm_page_prot);
1300                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1301                        if (!pmd_write(*pmd))
1302                                entry = pte_wrprotect(entry);
1303                        else
1304                                BUG_ON(page_mapcount(page) != 1);
1305                        if (!pmd_young(*pmd))
1306                                entry = pte_mkold(entry);
1307                        pte = pte_offset_map(&_pmd, haddr);
1308                        BUG_ON(!pte_none(*pte));
1309                        set_pte_at(mm, haddr, pte, entry);
1310                        pte_unmap(pte);
1311                }
1312
1313                mm->nr_ptes++;
1314                smp_wmb(); /* make pte visible before pmd */
1315                /*
1316                 * Up to this point the pmd is present and huge and
1317                 * userland has the whole access to the hugepage
1318                 * during the split (which happens in place). If we
1319                 * overwrite the pmd with the not-huge version
1320                 * pointing to the pte here (which of course we could
1321                 * if all CPUs were bug free), userland could trigger
1322                 * a small page size TLB miss on the small sized TLB
1323                 * while the hugepage TLB entry is still established
1324                 * in the huge TLB. Some CPU doesn't like that. See
1325                 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1326                 * Erratum 383 on page 93. Intel should be safe but is
1327                 * also warns that it's only safe if the permission
1328                 * and cache attributes of the two entries loaded in
1329                 * the two TLB is identical (which should be the case
1330                 * here). But it is generally safer to never allow
1331                 * small and huge TLB entries for the same virtual
1332                 * address to be loaded simultaneously. So instead of
1333                 * doing "pmd_populate(); flush_tlb_range();" we first
1334                 * mark the current pmd notpresent (atomically because
1335                 * here the pmd_trans_huge and pmd_trans_splitting
1336                 * must remain set at all times on the pmd until the
1337                 * split is complete for this pmd), then we flush the
1338                 * SMP TLB and finally we write the non-huge version
1339                 * of the pmd entry with pmd_populate.
1340                 */
1341                set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
1342                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
1343                pmd_populate(mm, pmd, pgtable);
1344                ret = 1;
1345        }
1346        spin_unlock(&mm->page_table_lock);
1347
1348        return ret;
1349}
1350
1351/* must be called with anon_vma->root->mutex hold */
1352static void __split_huge_page(struct page *page,
1353                              struct anon_vma *anon_vma)
1354{
1355        int mapcount, mapcount2;
1356        struct anon_vma_chain *avc;
1357
1358        BUG_ON(!PageHead(page));
1359        BUG_ON(PageTail(page));
1360
1361        mapcount = 0;
1362        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1363                struct vm_area_struct *vma = avc->vma;
1364                unsigned long addr = vma_address(page, vma);
1365                BUG_ON(is_vma_temporary_stack(vma));
1366                if (addr == -EFAULT)
1367                        continue;
1368                mapcount += __split_huge_page_splitting(page, vma, addr);
1369        }
1370        /*
1371         * It is critical that new vmas are added to the tail of the
1372         * anon_vma list. This guarantes that if copy_huge_pmd() runs
1373         * and establishes a child pmd before
1374         * __split_huge_page_splitting() freezes the parent pmd (so if
1375         * we fail to prevent copy_huge_pmd() from running until the
1376         * whole __split_huge_page() is complete), we will still see
1377         * the newly established pmd of the child later during the
1378         * walk, to be able to set it as pmd_trans_splitting too.
1379         */
1380        if (mapcount != page_mapcount(page))
1381                printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1382                       mapcount, page_mapcount(page));
1383        BUG_ON(mapcount != page_mapcount(page));
1384
1385        __split_huge_page_refcount(page);
1386
1387        mapcount2 = 0;
1388        list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1389                struct vm_area_struct *vma = avc->vma;
1390                unsigned long addr = vma_address(page, vma);
1391                BUG_ON(is_vma_temporary_stack(vma));
1392                if (addr == -EFAULT)
1393                        continue;
1394                mapcount2 += __split_huge_page_map(page, vma, addr);
1395        }
1396        if (mapcount != mapcount2)
1397                printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1398                       mapcount, mapcount2, page_mapcount(page));
1399        BUG_ON(mapcount != mapcount2);
1400}
1401
1402int split_huge_page(struct page *page)
1403{
1404        struct anon_vma *anon_vma;
1405        int ret = 1;
1406
1407        BUG_ON(!PageAnon(page));
1408        anon_vma = page_lock_anon_vma(page);
1409        if (!anon_vma)
1410                goto out;
1411        ret = 0;
1412        if (!PageCompound(page))
1413                goto out_unlock;
1414
1415        BUG_ON(!PageSwapBacked(page));
1416        __split_huge_page(page, anon_vma);
1417        count_vm_event(THP_SPLIT);
1418
1419        BUG_ON(PageCompound(page));
1420out_unlock:
1421        page_unlock_anon_vma(anon_vma);
1422out:
1423        return ret;
1424}
1425
1426#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
1427                   VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1428
1429int hugepage_madvise(struct vm_area_struct *vma,
1430                     unsigned long *vm_flags, int advice)
1431{
1432        switch (advice) {
1433        case MADV_HUGEPAGE:
1434                /*
1435                 * Be somewhat over-protective like KSM for now!
1436                 */
1437                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1438                        return -EINVAL;
1439                *vm_flags &= ~VM_NOHUGEPAGE;
1440                *vm_flags |= VM_HUGEPAGE;
1441                /*
1442                 * If the vma become good for khugepaged to scan,
1443                 * register it here without waiting a page fault that
1444                 * may not happen any time soon.
1445                 */
1446                if (unlikely(khugepaged_enter_vma_merge(vma)))
1447                        return -ENOMEM;
1448                break;
1449        case MADV_NOHUGEPAGE:
1450                /*
1451                 * Be somewhat over-protective like KSM for now!
1452                 */
1453                if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1454                        return -EINVAL;
1455                *vm_flags &= ~VM_HUGEPAGE;
1456                *vm_flags |= VM_NOHUGEPAGE;
1457                /*
1458                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1459                 * this vma even if we leave the mm registered in khugepaged if
1460                 * it got registered before VM_NOHUGEPAGE was set.
1461                 */
1462                break;
1463        }
1464
1465        return 0;
1466}
1467
1468static int __init khugepaged_slab_init(void)
1469{
1470        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1471                                          sizeof(struct mm_slot),
1472                                          __alignof__(struct mm_slot), 0, NULL);
1473        if (!mm_slot_cache)
1474                return -ENOMEM;
1475
1476        return 0;
1477}
1478
1479static void __init khugepaged_slab_free(void)
1480{
1481        kmem_cache_destroy(mm_slot_cache);
1482        mm_slot_cache = NULL;
1483}
1484
1485static inline struct mm_slot *alloc_mm_slot(void)
1486{
1487        if (!mm_slot_cache)     /* initialization failed */
1488                return NULL;
1489        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1490}
1491
1492static inline void free_mm_slot(struct mm_slot *mm_slot)
1493{
1494        kmem_cache_free(mm_slot_cache, mm_slot);
1495}
1496
1497static int __init mm_slots_hash_init(void)
1498{
1499        mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
1500                                GFP_KERNEL);
1501        if (!mm_slots_hash)
1502                return -ENOMEM;
1503        return 0;
1504}
1505
1506#if 0
1507static void __init mm_slots_hash_free(void)
1508{
1509        kfree(mm_slots_hash);
1510        mm_slots_hash = NULL;
1511}
1512#endif
1513
1514static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1515{
1516        struct mm_slot *mm_slot;
1517        struct hlist_head *bucket;
1518        struct hlist_node *node;
1519
1520        bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1521                                % MM_SLOTS_HASH_HEADS];
1522        hlist_for_each_entry(mm_slot, node, bucket, hash) {
1523                if (mm == mm_slot->mm)
1524                        return mm_slot;
1525        }
1526        return NULL;
1527}
1528
1529static void insert_to_mm_slots_hash(struct mm_struct *mm,
1530                                    struct mm_slot *mm_slot)
1531{
1532        struct hlist_head *bucket;
1533
1534        bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
1535                                % MM_SLOTS_HASH_HEADS];
1536        mm_slot->mm = mm;
1537        hlist_add_head(&mm_slot->hash, bucket);
1538}
1539
1540static inline int khugepaged_test_exit(struct mm_struct *mm)
1541{
1542        return atomic_read(&mm->mm_users) == 0;
1543}
1544
1545int __khugepaged_enter(struct mm_struct *mm)
1546{
1547        struct mm_slot *mm_slot;
1548        int wakeup;
1549
1550        mm_slot = alloc_mm_slot();
1551        if (!mm_slot)
1552                return -ENOMEM;
1553
1554        /* __khugepaged_exit() must not run from under us */
1555        VM_BUG_ON(khugepaged_test_exit(mm));
1556        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1557                free_mm_slot(mm_slot);
1558                return 0;
1559        }
1560
1561        spin_lock(&khugepaged_mm_lock);
1562        insert_to_mm_slots_hash(mm, mm_slot);
1563        /*
1564         * Insert just behind the scanning cursor, to let the area settle
1565         * down a little.
1566         */
1567        wakeup = list_empty(&khugepaged_scan.mm_head);
1568        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1569        spin_unlock(&khugepaged_mm_lock);
1570
1571        atomic_inc(&mm->mm_count);
1572        if (wakeup)
1573                wake_up_interruptible(&khugepaged_wait);
1574
1575        return 0;
1576}
1577
1578int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1579{
1580        unsigned long hstart, hend;
1581        if (!vma->anon_vma)
1582                /*
1583                 * Not yet faulted in so we will register later in the
1584                 * page fault if needed.
1585                 */
1586                return 0;
1587        if (vma->vm_ops)
1588                /* khugepaged not yet working on file or special mappings */
1589                return 0;
1590        /*
1591         * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1592         * true too, verify it here.
1593         */
1594        VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1595        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1596        hend = vma->vm_end & HPAGE_PMD_MASK;
1597        if (hstart < hend)
1598                return khugepaged_enter(vma);
1599        return 0;
1600}
1601
1602void __khugepaged_exit(struct mm_struct *mm)
1603{
1604        struct mm_slot *mm_slot;
1605        int free = 0;
1606
1607        spin_lock(&khugepaged_mm_lock);
1608        mm_slot = get_mm_slot(mm);
1609        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
1610                hlist_del(&mm_slot->hash);
1611                list_del(&mm_slot->mm_node);
1612                free = 1;
1613        }
1614        spin_unlock(&khugepaged_mm_lock);
1615
1616        if (free) {
1617                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1618                free_mm_slot(mm_slot);
1619                mmdrop(mm);
1620        } else if (mm_slot) {
1621                /*
1622                 * This is required to serialize against
1623                 * khugepaged_test_exit() (which is guaranteed to run
1624                 * under mmap sem read mode). Stop here (after we
1625                 * return all pagetables will be destroyed) until
1626                 * khugepaged has finished working on the pagetables
1627                 * under the mmap_sem.
1628                 */
1629                down_write(&mm->mmap_sem);
1630                up_write(&mm->mmap_sem);
1631        }
1632}
1633
1634static void release_pte_page(struct page *page)
1635{
1636        /* 0 stands for page_is_file_cache(page) == false */
1637        dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
1638        unlock_page(page);
1639        putback_lru_page(page);
1640}
1641
1642static void release_pte_pages(pte_t *pte, pte_t *_pte)
1643{
1644        while (--_pte >= pte) {
1645                pte_t pteval = *_pte;
1646                if (!pte_none(pteval))
1647                        release_pte_page(pte_page(pteval));
1648        }
1649}
1650
1651static void release_all_pte_pages(pte_t *pte)
1652{
1653        release_pte_pages(pte, pte + HPAGE_PMD_NR);
1654}
1655
1656static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
1657                                        unsigned long address,
1658                                        pte_t *pte)
1659{
1660        struct page *page;
1661        pte_t *_pte;
1662        int referenced = 0, isolated = 0, none = 0;
1663        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
1664             _pte++, address += PAGE_SIZE) {
1665                pte_t pteval = *_pte;
1666                if (pte_none(pteval)) {
1667                        if (++none <= khugepaged_max_ptes_none)
1668                                continue;
1669                        else {
1670                                release_pte_pages(pte, _pte);
1671                                goto out;
1672                        }
1673                }
1674                if (!pte_present(pteval) || !pte_write(pteval)) {
1675                        release_pte_pages(pte, _pte);
1676                        goto out;
1677                }
1678                page = vm_normal_page(vma, address, pteval);
1679                if (unlikely(!page)) {
1680                        release_pte_pages(pte, _pte);
1681                        goto out;
1682                }
1683                VM_BUG_ON(PageCompound(page));
1684                BUG_ON(!PageAnon(page));
1685                VM_BUG_ON(!PageSwapBacked(page));
1686
1687                /* cannot use mapcount: can't collapse if there's a gup pin */
1688                if (page_count(page) != 1) {
1689                        release_pte_pages(pte, _pte);
1690                        goto out;
1691                }
1692                /*
1693                 * We can do it before isolate_lru_page because the
1694                 * page can't be freed from under us. NOTE: PG_lock
1695                 * is needed to serialize against split_huge_page
1696                 * when invoked from the VM.
1697                 */
1698                if (!trylock_page(page)) {
1699                        release_pte_pages(pte, _pte);
1700                        goto out;
1701                }
1702                /*
1703                 * Isolate the page to avoid collapsing an hugepage
1704                 * currently in use by the VM.
1705                 */
1706                if (isolate_lru_page(page)) {
1707                        unlock_page(page);
1708                        release_pte_pages(pte, _pte);
1709                        goto out;
1710                }
1711                /* 0 stands for page_is_file_cache(page) == false */
1712                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
1713                VM_BUG_ON(!PageLocked(page));
1714                VM_BUG_ON(PageLRU(page));
1715
1716                /* If there is no mapped pte young don't collapse the page */
1717                if (pte_young(pteval) || PageReferenced(page) ||
1718                    mmu_notifier_test_young(vma->vm_mm, address))
1719                        referenced = 1;
1720        }
1721        if (unlikely(!referenced))
1722                release_all_pte_pages(pte);
1723        else
1724                isolated = 1;
1725out:
1726        return isolated;
1727}
1728
1729static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1730                                      struct vm_area_struct *vma,
1731                                      unsigned long address,
1732                                      spinlock_t *ptl)
1733{
1734        pte_t *_pte;
1735        for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
1736                pte_t pteval = *_pte;
1737                struct page *src_page;
1738
1739                if (pte_none(pteval)) {
1740                        clear_user_highpage(page, address);
1741                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
1742                } else {
1743                        src_page = pte_page(pteval);
1744                        copy_user_highpage(page, src_page, address, vma);
1745                        VM_BUG_ON(page_mapcount(src_page) != 1);
1746                        VM_BUG_ON(page_count(src_page) != 2);
1747                        release_pte_page(src_page);
1748                        /*
1749                         * ptl mostly unnecessary, but preempt has to
1750                         * be disabled to update the per-cpu stats
1751                         * inside page_remove_rmap().
1752                         */
1753                        spin_lock(ptl);
1754                        /*
1755                         * paravirt calls inside pte_clear here are
1756                         * superfluous.
1757                         */
1758                        pte_clear(vma->vm_mm, address, _pte);
1759                        page_remove_rmap(src_page);
1760                        spin_unlock(ptl);
1761                        free_page_and_swap_cache(src_page);
1762                }
1763
1764                address += PAGE_SIZE;
1765                page++;
1766        }
1767}
1768
1769static void collapse_huge_page(struct mm_struct *mm,
1770                               unsigned long address,
1771                               struct page **hpage,
1772                               struct vm_area_struct *vma,
1773                               int node)
1774{
1775        pgd_t *pgd;
1776        pud_t *pud;
1777        pmd_t *pmd, _pmd;
1778        pte_t *pte;
1779        pgtable_t pgtable;
1780        struct page *new_page;
1781        spinlock_t *ptl;
1782        int isolated;
1783        unsigned long hstart, hend;
1784
1785        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1786#ifndef CONFIG_NUMA
1787        up_read(&mm->mmap_sem);
1788        VM_BUG_ON(!*hpage);
1789        new_page = *hpage;
1790#else
1791        VM_BUG_ON(*hpage);
1792        /*
1793         * Allocate the page while the vma is still valid and under
1794         * the mmap_sem read mode so there is no memory allocation
1795         * later when we take the mmap_sem in write mode. This is more
1796         * friendly behavior (OTOH it may actually hide bugs) to
1797         * filesystems in userland with daemons allocating memory in
1798         * the userland I/O paths.  Allocating memory with the
1799         * mmap_sem in read mode is good idea also to allow greater
1800         * scalability.
1801         */
1802        new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1803                                      node, __GFP_OTHER_NODE);
1804
1805        /*
1806         * After allocating the hugepage, release the mmap_sem read lock in
1807         * preparation for taking it in write mode.
1808         */
1809        up_read(&mm->mmap_sem);
1810        if (unlikely(!new_page)) {
1811                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
1812                *hpage = ERR_PTR(-ENOMEM);
1813                return;
1814        }
1815#endif
1816
1817        count_vm_event(THP_COLLAPSE_ALLOC);
1818        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1819#ifdef CONFIG_NUMA
1820                put_page(new_page);
1821#endif
1822                return;
1823        }
1824
1825        /*
1826         * Prevent all access to pagetables with the exception of
1827         * gup_fast later hanlded by the ptep_clear_flush and the VM
1828         * handled by the anon_vma lock + PG_lock.
1829         */
1830        down_write(&mm->mmap_sem);
1831        if (unlikely(khugepaged_test_exit(mm)))
1832                goto out;
1833
1834        vma = find_vma(mm, address);
1835        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1836        hend = vma->vm_end & HPAGE_PMD_MASK;
1837        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
1838                goto out;
1839
1840        if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
1841            (vma->vm_flags & VM_NOHUGEPAGE))
1842                goto out;
1843
1844        if (!vma->anon_vma || vma->vm_ops)
1845                goto out;
1846        if (is_vma_temporary_stack(vma))
1847                goto out;
1848        /*
1849         * If is_pfn_mapping() is true is_learn_pfn_mapping() must be
1850         * true too, verify it here.
1851         */
1852        VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
1853
1854        pgd = pgd_offset(mm, address);
1855        if (!pgd_present(*pgd))
1856                goto out;
1857
1858        pud = pud_offset(pgd, address);
1859        if (!pud_present(*pud))
1860                goto out;
1861
1862        pmd = pmd_offset(pud, address);
1863        /* pmd can't go away or become huge under us */
1864        if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1865                goto out;
1866
1867        anon_vma_lock(vma->anon_vma);
1868
1869        pte = pte_offset_map(pmd, address);
1870        ptl = pte_lockptr(mm, pmd);
1871
1872        spin_lock(&mm->page_table_lock); /* probably unnecessary */
1873        /*
1874         * After this gup_fast can't run anymore. This also removes
1875         * any huge TLB entry from the CPU so we won't allow
1876         * huge and small TLB entries for the same virtual address
1877         * to avoid the risk of CPU bugs in that area.
1878         */
1879        _pmd = pmdp_clear_flush_notify(vma, address, pmd);
1880        spin_unlock(&mm->page_table_lock);
1881
1882        spin_lock(ptl);
1883        isolated = __collapse_huge_page_isolate(vma, address, pte);
1884        spin_unlock(ptl);
1885
1886        if (unlikely(!isolated)) {
1887                pte_unmap(pte);
1888                spin_lock(&mm->page_table_lock);
1889                BUG_ON(!pmd_none(*pmd));
1890                set_pmd_at(mm, address, pmd, _pmd);
1891                spin_unlock(&mm->page_table_lock);
1892                anon_vma_unlock(vma->anon_vma);
1893                goto out;
1894        }
1895
1896        /*
1897         * All pages are isolated and locked so anon_vma rmap
1898         * can't run anymore.
1899         */
1900        anon_vma_unlock(vma->anon_vma);
1901
1902        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
1903        pte_unmap(pte);
1904        __SetPageUptodate(new_page);
1905        pgtable = pmd_pgtable(_pmd);
1906        VM_BUG_ON(page_count(pgtable) != 1);
1907        VM_BUG_ON(page_mapcount(pgtable) != 0);
1908
1909        _pmd = mk_pmd(new_page, vma->vm_page_prot);
1910        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1911        _pmd = pmd_mkhuge(_pmd);
1912
1913        /*
1914         * spin_lock() below is not the equivalent of smp_wmb(), so
1915         * this is needed to avoid the copy_huge_page writes to become
1916         * visible after the set_pmd_at() write.
1917         */
1918        smp_wmb();
1919
1920        spin_lock(&mm->page_table_lock);
1921        BUG_ON(!pmd_none(*pmd));
1922        page_add_new_anon_rmap(new_page, vma, address);
1923        set_pmd_at(mm, address, pmd, _pmd);
1924        update_mmu_cache(vma, address, entry);
1925        prepare_pmd_huge_pte(pgtable, mm);
1926        mm->nr_ptes--;
1927        spin_unlock(&mm->page_table_lock);
1928
1929#ifndef CONFIG_NUMA
1930        *hpage = NULL;
1931#endif
1932        khugepaged_pages_collapsed++;
1933out_up_write:
1934        up_write(&mm->mmap_sem);
1935        return;
1936
1937out:
1938        mem_cgroup_uncharge_page(new_page);
1939#ifdef CONFIG_NUMA
1940        put_page(new_page);
1941#endif
1942        goto out_up_write;
1943}
1944
1945static int khugepaged_scan_pmd(struct mm_struct *mm,
1946                               struct vm_area_struct *vma,
1947                               unsigned long address,
1948                               struct page **hpage)
1949{
1950        pgd_t *pgd;
1951        pud_t *pud;
1952        pmd_t *pmd;
1953        pte_t *pte, *_pte;
1954        int ret = 0, referenced = 0, none = 0;
1955        struct page *page;
1956        unsigned long _address;
1957        spinlock_t *ptl;
1958        int node = -1;
1959
1960        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1961
1962        pgd = pgd_offset(mm, address);
1963        if (!pgd_present(*pgd))
1964                goto out;
1965
1966        pud = pud_offset(pgd, address);
1967        if (!pud_present(*pud))
1968                goto out;
1969
1970        pmd = pmd_offset(pud, address);
1971        if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
1972                goto out;
1973
1974        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1975        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1976             _pte++, _address += PAGE_SIZE) {
1977                pte_t pteval = *_pte;
1978                if (pte_none(pteval)) {
1979                        if (++none <= khugepaged_max_ptes_none)
1980                                continue;
1981                        else
1982                                goto out_unmap;
1983                }
1984                if (!pte_present(pteval) || !pte_write(pteval))
1985                        goto out_unmap;
1986                page = vm_normal_page(vma, _address, pteval);
1987                if (unlikely(!page))
1988                        goto out_unmap;
1989                /*
1990                 * Chose the node of the first page. This could
1991                 * be more sophisticated and look at more pages,
1992                 * but isn't for now.
1993                 */
1994                if (node == -1)
1995                        node = page_to_nid(page);
1996                VM_BUG_ON(PageCompound(page));
1997                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1998                        goto out_unmap;
1999                /* cannot use mapcount: can't collapse if there's a gup pin */
2000                if (page_count(page) != 1)
2001                        goto out_unmap;
2002                if (pte_young(pteval) || PageReferenced(page) ||
2003                    mmu_notifier_test_young(vma->vm_mm, address))
2004                        referenced = 1;
2005        }
2006        if (referenced)
2007                ret = 1;
2008out_unmap:
2009        pte_unmap_unlock(pte, ptl);
2010        if (ret)
2011                /* collapse_huge_page will return with the mmap_sem released */
2012                collapse_huge_page(mm, address, hpage, vma, node);
2013out:
2014        return ret;
2015}
2016
2017static void collect_mm_slot(struct mm_slot *mm_slot)
2018{
2019        struct mm_struct *mm = mm_slot->mm;
2020
2021        VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2022
2023        if (khugepaged_test_exit(mm)) {
2024                /* free mm_slot */
2025                hlist_del(&mm_slot->hash);
2026                list_del(&mm_slot->mm_node);
2027
2028                /*
2029                 * Not strictly needed because the mm exited already.
2030                 *
2031                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2032                 */
2033
2034                /* khugepaged_mm_lock actually not necessary for the below */
2035                free_mm_slot(mm_slot);
2036                mmdrop(mm);
2037        }
2038}
2039
2040static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2041                                            struct page **hpage)
2042{
2043        struct mm_slot *mm_slot;
2044        struct mm_struct *mm;
2045        struct vm_area_struct *vma;
2046        int progress = 0;
2047
2048        VM_BUG_ON(!pages);
2049        VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
2050
2051        if (khugepaged_scan.mm_slot)
2052                mm_slot = khugepaged_scan.mm_slot;
2053        else {
2054                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2055                                     struct mm_slot, mm_node);
2056                khugepaged_scan.address = 0;
2057                khugepaged_scan.mm_slot = mm_slot;
2058        }
2059        spin_unlock(&khugepaged_mm_lock);
2060
2061        mm = mm_slot->mm;
2062        down_read(&mm->mmap_sem);
2063        if (unlikely(khugepaged_test_exit(mm)))
2064                vma = NULL;
2065        else
2066                vma = find_vma(mm, khugepaged_scan.address);
2067
2068        progress++;
2069        for (; vma; vma = vma->vm_next) {
2070                unsigned long hstart, hend;
2071
2072                cond_resched();
2073                if (unlikely(khugepaged_test_exit(mm))) {
2074                        progress++;
2075                        break;
2076                }
2077
2078                if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2079                     !khugepaged_always()) ||
2080                    (vma->vm_flags & VM_NOHUGEPAGE)) {
2081                skip:
2082                        progress++;
2083                        continue;
2084                }
2085                if (!vma->anon_vma || vma->vm_ops)
2086                        goto skip;
2087                if (is_vma_temporary_stack(vma))
2088                        goto skip;
2089                /*
2090                 * If is_pfn_mapping() is true is_learn_pfn_mapping()
2091                 * must be true too, verify it here.
2092                 */
2093                VM_BUG_ON(is_linear_pfn_mapping(vma) ||
2094                          vma->vm_flags & VM_NO_THP);
2095
2096                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2097                hend = vma->vm_end & HPAGE_PMD_MASK;
2098                if (hstart >= hend)
2099                        goto skip;
2100                if (khugepaged_scan.address > hend)
2101                        goto skip;
2102                if (khugepaged_scan.address < hstart)
2103                        khugepaged_scan.address = hstart;
2104                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2105
2106                while (khugepaged_scan.address < hend) {
2107                        int ret;
2108                        cond_resched();
2109                        if (unlikely(khugepaged_test_exit(mm)))
2110                                goto breakouterloop;
2111
2112                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2113                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2114                                  hend);
2115                        ret = khugepaged_scan_pmd(mm, vma,
2116                                                  khugepaged_scan.address,
2117                                                  hpage);
2118                        /* move to next address */
2119                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2120                        progress += HPAGE_PMD_NR;
2121                        if (ret)
2122                                /* we released mmap_sem so break loop */
2123                                goto breakouterloop_mmap_sem;
2124                        if (progress >= pages)
2125                                goto breakouterloop;
2126                }
2127        }
2128breakouterloop:
2129        up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2130breakouterloop_mmap_sem:
2131
2132        spin_lock(&khugepaged_mm_lock);
2133        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2134        /*
2135         * Release the current mm_slot if this mm is about to die, or
2136         * if we scanned all vmas of this mm.
2137         */
2138        if (khugepaged_test_exit(mm) || !vma) {
2139                /*
2140                 * Make sure that if mm_users is reaching zero while
2141                 * khugepaged runs here, khugepaged_exit will find
2142                 * mm_slot not pointing to the exiting mm.
2143                 */
2144                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2145                        khugepaged_scan.mm_slot = list_entry(
2146                                mm_slot->mm_node.next,
2147                                struct mm_slot, mm_node);
2148                        khugepaged_scan.address = 0;
2149                } else {
2150                        khugepaged_scan.mm_slot = NULL;
2151                        khugepaged_full_scans++;
2152                }
2153
2154                collect_mm_slot(mm_slot);
2155        }
2156
2157        return progress;
2158}
2159
2160static int khugepaged_has_work(void)
2161{
2162        return !list_empty(&khugepaged_scan.mm_head) &&
2163                khugepaged_enabled();
2164}
2165
2166static int khugepaged_wait_event(void)
2167{
2168        return !list_empty(&khugepaged_scan.mm_head) ||
2169                !khugepaged_enabled();
2170}
2171
2172static void khugepaged_do_scan(struct page **hpage)
2173{
2174        unsigned int progress = 0, pass_through_head = 0;
2175        unsigned int pages = khugepaged_pages_to_scan;
2176
2177        barrier(); /* write khugepaged_pages_to_scan to local stack */
2178
2179        while (progress < pages) {
2180                cond_resched();
2181
2182#ifndef CONFIG_NUMA
2183                if (!*hpage) {
2184                        *hpage = alloc_hugepage(khugepaged_defrag());
2185                        if (unlikely(!*hpage)) {
2186                                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2187                                break;
2188                        }
2189                        count_vm_event(THP_COLLAPSE_ALLOC);
2190                }
2191#else
2192                if (IS_ERR(*hpage))
2193                        break;
2194#endif
2195
2196                if (unlikely(kthread_should_stop() || freezing(current)))
2197                        break;
2198
2199                spin_lock(&khugepaged_mm_lock);
2200                if (!khugepaged_scan.mm_slot)
2201                        pass_through_head++;
2202                if (khugepaged_has_work() &&
2203                    pass_through_head < 2)
2204                        progress += khugepaged_scan_mm_slot(pages - progress,
2205                                                            hpage);
2206                else
2207                        progress = pages;
2208                spin_unlock(&khugepaged_mm_lock);
2209        }
2210}
2211
2212static void khugepaged_alloc_sleep(void)
2213{
2214        DEFINE_WAIT(wait);
2215        add_wait_queue(&khugepaged_wait, &wait);
2216        schedule_timeout_interruptible(
2217                msecs_to_jiffies(
2218                        khugepaged_alloc_sleep_millisecs));
2219        remove_wait_queue(&khugepaged_wait, &wait);
2220}
2221
2222#ifndef CONFIG_NUMA
2223static struct page *khugepaged_alloc_hugepage(void)
2224{
2225        struct page *hpage;
2226
2227        do {
2228                hpage = alloc_hugepage(khugepaged_defrag());
2229                if (!hpage) {
2230                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2231                        khugepaged_alloc_sleep();
2232                } else
2233                        count_vm_event(THP_COLLAPSE_ALLOC);
2234        } while (unlikely(!hpage) &&
2235                 likely(khugepaged_enabled()));
2236        return hpage;
2237}
2238#endif
2239
2240static void khugepaged_loop(void)
2241{
2242        struct page *hpage;
2243
2244#ifdef CONFIG_NUMA
2245        hpage = NULL;
2246#endif
2247        while (likely(khugepaged_enabled())) {
2248#ifndef CONFIG_NUMA
2249                hpage = khugepaged_alloc_hugepage();
2250                if (unlikely(!hpage))
2251                        break;
2252#else
2253                if (IS_ERR(hpage)) {
2254                        khugepaged_alloc_sleep();
2255                        hpage = NULL;
2256                }
2257#endif
2258
2259                khugepaged_do_scan(&hpage);
2260#ifndef CONFIG_NUMA
2261                if (hpage)
2262                        put_page(hpage);
2263#endif
2264                try_to_freeze();
2265                if (unlikely(kthread_should_stop()))
2266                        break;
2267                if (khugepaged_has_work()) {
2268                        DEFINE_WAIT(wait);
2269                        if (!khugepaged_scan_sleep_millisecs)
2270                                continue;
2271                        add_wait_queue(&khugepaged_wait, &wait);
2272                        schedule_timeout_interruptible(
2273                                msecs_to_jiffies(
2274                                        khugepaged_scan_sleep_millisecs));
2275                        remove_wait_queue(&khugepaged_wait, &wait);
2276                } else if (khugepaged_enabled())
2277                        wait_event_freezable(khugepaged_wait,
2278                                             khugepaged_wait_event());
2279        }
2280}
2281
2282static int khugepaged(void *none)
2283{
2284        struct mm_slot *mm_slot;
2285
2286        set_freezable();
2287        set_user_nice(current, 19);
2288
2289        /* serialize with start_khugepaged() */
2290        mutex_lock(&khugepaged_mutex);
2291
2292        for (;;) {
2293                mutex_unlock(&khugepaged_mutex);
2294                VM_BUG_ON(khugepaged_thread != current);
2295                khugepaged_loop();
2296                VM_BUG_ON(khugepaged_thread != current);
2297
2298                mutex_lock(&khugepaged_mutex);
2299                if (!khugepaged_enabled())
2300                        break;
2301                if (unlikely(kthread_should_stop()))
2302                        break;
2303        }
2304
2305        spin_lock(&khugepaged_mm_lock);
2306        mm_slot = khugepaged_scan.mm_slot;
2307        khugepaged_scan.mm_slot = NULL;
2308        if (mm_slot)
2309                collect_mm_slot(mm_slot);
2310        spin_unlock(&khugepaged_mm_lock);
2311
2312        khugepaged_thread = NULL;
2313        mutex_unlock(&khugepaged_mutex);
2314
2315        return 0;
2316}
2317
2318void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2319{
2320        struct page *page;
2321
2322        spin_lock(&mm->page_table_lock);
2323        if (unlikely(!pmd_trans_huge(*pmd))) {
2324                spin_unlock(&mm->page_table_lock);
2325                return;
2326        }
2327        page = pmd_page(*pmd);
2328        VM_BUG_ON(!page_count(page));
2329        get_page(page);
2330        spin_unlock(&mm->page_table_lock);
2331
2332        split_huge_page(page);
2333
2334        put_page(page);
2335        BUG_ON(pmd_trans_huge(*pmd));
2336}
2337
2338static void split_huge_page_address(struct mm_struct *mm,
2339                                    unsigned long address)
2340{
2341        pgd_t *pgd;
2342        pud_t *pud;
2343        pmd_t *pmd;
2344
2345        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2346
2347        pgd = pgd_offset(mm, address);
2348        if (!pgd_present(*pgd))
2349                return;
2350
2351        pud = pud_offset(pgd, address);
2352        if (!pud_present(*pud))
2353                return;
2354
2355        pmd = pmd_offset(pud, address);
2356        if (!pmd_present(*pmd))
2357                return;
2358        /*
2359         * Caller holds the mmap_sem write mode, so a huge pmd cannot
2360         * materialize from under us.
2361         */
2362        split_huge_page_pmd(mm, pmd);
2363}
2364
2365void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2366                             unsigned long start,
2367                             unsigned long end,
2368                             long adjust_next)
2369{
2370        /*
2371         * If the new start address isn't hpage aligned and it could
2372         * previously contain an hugepage: check if we need to split
2373         * an huge pmd.
2374         */
2375        if (start & ~HPAGE_PMD_MASK &&
2376            (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2377            (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2378                split_huge_page_address(vma->vm_mm, start);
2379
2380        /*
2381         * If the new end address isn't hpage aligned and it could
2382         * previously contain an hugepage: check if we need to split
2383         * an huge pmd.
2384         */
2385        if (end & ~HPAGE_PMD_MASK &&
2386            (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2387            (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2388                split_huge_page_address(vma->vm_mm, end);
2389
2390        /*
2391         * If we're also updating the vma->vm_next->vm_start, if the new
2392         * vm_next->vm_start isn't page aligned and it could previously
2393         * contain an hugepage: check if we need to split an huge pmd.
2394         */
2395        if (adjust_next > 0) {
2396                struct vm_area_struct *next = vma->vm_next;
2397                unsigned long nstart = next->vm_start;
2398                nstart += adjust_next << PAGE_SHIFT;
2399                if (nstart & ~HPAGE_PMD_MASK &&
2400                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2401                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2402                        split_huge_page_address(next->vm_mm, nstart);
2403        }
2404}
2405
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.