linux/mm/huge_memory.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 2009  Red Hat, Inc.
   3 *
   4 *  This work is licensed under the terms of the GNU GPL, version 2. See
   5 *  the COPYING file in the top-level directory.
   6 */
   7
   8#include <linux/mm.h>
   9#include <linux/sched.h>
  10#include <linux/highmem.h>
  11#include <linux/hugetlb.h>
  12#include <linux/mmu_notifier.h>
  13#include <linux/rmap.h>
  14#include <linux/swap.h>
  15#include <linux/shrinker.h>
  16#include <linux/mm_inline.h>
  17#include <linux/kthread.h>
  18#include <linux/khugepaged.h>
  19#include <linux/freezer.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/migrate.h>
  23#include <linux/hashtable.h>
  24
  25#include <asm/tlb.h>
  26#include <asm/pgalloc.h>
  27#include "internal.h"
  28
  29/*
  30 * By default transparent hugepage support is enabled for all mappings
  31 * and khugepaged scans all mappings. Defrag is only invoked by
  32 * khugepaged hugepage allocations and by page faults inside
  33 * MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
  34 * allocations.
  35 */
  36unsigned long transparent_hugepage_flags __read_mostly =
  37#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  38        (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  39#endif
  40#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  41        (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  42#endif
  43        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  44        (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
  45        (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  46
  47/* default scan 8*512 pte (or vmas) every 30 second */
  48static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  49static unsigned int khugepaged_pages_collapsed;
  50static unsigned int khugepaged_full_scans;
  51static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  52/* during fragmentation poll the hugepage allocator once every minute */
  53static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  54static struct task_struct *khugepaged_thread __read_mostly;
  55static DEFINE_MUTEX(khugepaged_mutex);
  56static DEFINE_SPINLOCK(khugepaged_mm_lock);
  57static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  58/*
  59 * default collapse hugepages if there is at least one pte mapped like
  60 * it would have happened if the vma was large enough during page
  61 * fault.
  62 */
  63static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  64
  65static int khugepaged(void *none);
  66static int khugepaged_slab_init(void);
  67
  68#define MM_SLOTS_HASH_BITS 10
  69static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  70
  71static struct kmem_cache *mm_slot_cache __read_mostly;
  72
  73/**
  74 * struct mm_slot - hash lookup from mm to mm_slot
  75 * @hash: hash collision list
  76 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  77 * @mm: the mm that this information is valid for
  78 */
  79struct mm_slot {
  80        struct hlist_node hash;
  81        struct list_head mm_node;
  82        struct mm_struct *mm;
  83};
  84
  85/**
  86 * struct khugepaged_scan - cursor for scanning
  87 * @mm_head: the head of the mm list to scan
  88 * @mm_slot: the current mm_slot we are scanning
  89 * @address: the next address inside that to be scanned
  90 *
  91 * There is only the one khugepaged_scan instance of this cursor structure.
  92 */
  93struct khugepaged_scan {
  94        struct list_head mm_head;
  95        struct mm_slot *mm_slot;
  96        unsigned long address;
  97};
  98static struct khugepaged_scan khugepaged_scan = {
  99        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 100};
 101
 102
 103static int set_recommended_min_free_kbytes(void)
 104{
 105        struct zone *zone;
 106        int nr_zones = 0;
 107        unsigned long recommended_min;
 108
 109        if (!khugepaged_enabled())
 110                return 0;
 111
 112        for_each_populated_zone(zone)
 113                nr_zones++;
 114
 115        /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
 116        recommended_min = pageblock_nr_pages * nr_zones * 2;
 117
 118        /*
 119         * Make sure that on average at least two pageblocks are almost free
 120         * of another type, one for a migratetype to fall back to and a
 121         * second to avoid subsequent fallbacks of other types There are 3
 122         * MIGRATE_TYPES we care about.
 123         */
 124        recommended_min += pageblock_nr_pages * nr_zones *
 125                           MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
 126
 127        /* don't ever allow to reserve more than 5% of the lowmem */
 128        recommended_min = min(recommended_min,
 129                              (unsigned long) nr_free_buffer_pages() / 20);
 130        recommended_min <<= (PAGE_SHIFT-10);
 131
 132        if (recommended_min > min_free_kbytes)
 133                min_free_kbytes = recommended_min;
 134        setup_per_zone_wmarks();
 135        return 0;
 136}
 137late_initcall(set_recommended_min_free_kbytes);
 138
 139static int start_khugepaged(void)
 140{
 141        int err = 0;
 142        if (khugepaged_enabled()) {
 143                if (!khugepaged_thread)
 144                        khugepaged_thread = kthread_run(khugepaged, NULL,
 145                                                        "khugepaged");
 146                if (unlikely(IS_ERR(khugepaged_thread))) {
 147                        printk(KERN_ERR
 148                               "khugepaged: kthread_run(khugepaged) failed\n");
 149                        err = PTR_ERR(khugepaged_thread);
 150                        khugepaged_thread = NULL;
 151                }
 152
 153                if (!list_empty(&khugepaged_scan.mm_head))
 154                        wake_up_interruptible(&khugepaged_wait);
 155
 156                set_recommended_min_free_kbytes();
 157        } else if (khugepaged_thread) {
 158                kthread_stop(khugepaged_thread);
 159                khugepaged_thread = NULL;
 160        }
 161
 162        return err;
 163}
 164
 165static atomic_t huge_zero_refcount;
 166static struct page *huge_zero_page __read_mostly;
 167
 168static inline bool is_huge_zero_page(struct page *page)
 169{
 170        return ACCESS_ONCE(huge_zero_page) == page;
 171}
 172
 173static inline bool is_huge_zero_pmd(pmd_t pmd)
 174{
 175        return is_huge_zero_page(pmd_page(pmd));
 176}
 177
 178static struct page *get_huge_zero_page(void)
 179{
 180        struct page *zero_page;
 181retry:
 182        if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
 183                return ACCESS_ONCE(huge_zero_page);
 184
 185        zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
 186                        HPAGE_PMD_ORDER);
 187        if (!zero_page) {
 188                count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
 189                return NULL;
 190        }
 191        count_vm_event(THP_ZERO_PAGE_ALLOC);
 192        preempt_disable();
 193        if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
 194                preempt_enable();
 195                __free_page(zero_page);
 196                goto retry;
 197        }
 198
 199        /* We take additional reference here. It will be put back by shrinker */
 200        atomic_set(&huge_zero_refcount, 2);
 201        preempt_enable();
 202        return ACCESS_ONCE(huge_zero_page);
 203}
 204
 205static void put_huge_zero_page(void)
 206{
 207        /*
 208         * Counter should never go to zero here. Only shrinker can put
 209         * last reference.
 210         */
 211        BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
 212}
 213
 214static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
 215                                        struct shrink_control *sc)
 216{
 217        /* we can free zero page only if last reference remains */
 218        return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
 219}
 220
 221static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
 222                                       struct shrink_control *sc)
 223{
 224        if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
 225                struct page *zero_page = xchg(&huge_zero_page, NULL);
 226                BUG_ON(zero_page == NULL);
 227                __free_page(zero_page);
 228                return HPAGE_PMD_NR;
 229        }
 230
 231        return 0;
 232}
 233
 234static struct shrinker huge_zero_page_shrinker = {
 235        .count_objects = shrink_huge_zero_page_count,
 236        .scan_objects = shrink_huge_zero_page_scan,
 237        .seeks = DEFAULT_SEEKS,
 238};
 239
 240#ifdef CONFIG_SYSFS
 241
 242static ssize_t double_flag_show(struct kobject *kobj,
 243                                struct kobj_attribute *attr, char *buf,
 244                                enum transparent_hugepage_flag enabled,
 245                                enum transparent_hugepage_flag req_madv)
 246{
 247        if (test_bit(enabled, &transparent_hugepage_flags)) {
 248                VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
 249                return sprintf(buf, "[always] madvise never\n");
 250        } else if (test_bit(req_madv, &transparent_hugepage_flags))
 251                return sprintf(buf, "always [madvise] never\n");
 252        else
 253                return sprintf(buf, "always madvise [never]\n");
 254}
 255static ssize_t double_flag_store(struct kobject *kobj,
 256                                 struct kobj_attribute *attr,
 257                                 const char *buf, size_t count,
 258                                 enum transparent_hugepage_flag enabled,
 259                                 enum transparent_hugepage_flag req_madv)
 260{
 261        if (!memcmp("always", buf,
 262                    min(sizeof("always")-1, count))) {
 263                set_bit(enabled, &transparent_hugepage_flags);
 264                clear_bit(req_madv, &transparent_hugepage_flags);
 265        } else if (!memcmp("madvise", buf,
 266                           min(sizeof("madvise")-1, count))) {
 267                clear_bit(enabled, &transparent_hugepage_flags);
 268                set_bit(req_madv, &transparent_hugepage_flags);
 269        } else if (!memcmp("never", buf,
 270                           min(sizeof("never")-1, count))) {
 271                clear_bit(enabled, &transparent_hugepage_flags);
 272                clear_bit(req_madv, &transparent_hugepage_flags);
 273        } else
 274                return -EINVAL;
 275
 276        return count;
 277}
 278
 279static ssize_t enabled_show(struct kobject *kobj,
 280                            struct kobj_attribute *attr, char *buf)
 281{
 282        return double_flag_show(kobj, attr, buf,
 283                                TRANSPARENT_HUGEPAGE_FLAG,
 284                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 285}
 286static ssize_t enabled_store(struct kobject *kobj,
 287                             struct kobj_attribute *attr,
 288                             const char *buf, size_t count)
 289{
 290        ssize_t ret;
 291
 292        ret = double_flag_store(kobj, attr, buf, count,
 293                                TRANSPARENT_HUGEPAGE_FLAG,
 294                                TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
 295
 296        if (ret > 0) {
 297                int err;
 298
 299                mutex_lock(&khugepaged_mutex);
 300                err = start_khugepaged();
 301                mutex_unlock(&khugepaged_mutex);
 302
 303                if (err)
 304                        ret = err;
 305        }
 306
 307        return ret;
 308}
 309static struct kobj_attribute enabled_attr =
 310        __ATTR(enabled, 0644, enabled_show, enabled_store);
 311
 312static ssize_t single_flag_show(struct kobject *kobj,
 313                                struct kobj_attribute *attr, char *buf,
 314                                enum transparent_hugepage_flag flag)
 315{
 316        return sprintf(buf, "%d\n",
 317                       !!test_bit(flag, &transparent_hugepage_flags));
 318}
 319
 320static ssize_t single_flag_store(struct kobject *kobj,
 321                                 struct kobj_attribute *attr,
 322                                 const char *buf, size_t count,
 323                                 enum transparent_hugepage_flag flag)
 324{
 325        unsigned long value;
 326        int ret;
 327
 328        ret = kstrtoul(buf, 10, &value);
 329        if (ret < 0)
 330                return ret;
 331        if (value > 1)
 332                return -EINVAL;
 333
 334        if (value)
 335                set_bit(flag, &transparent_hugepage_flags);
 336        else
 337                clear_bit(flag, &transparent_hugepage_flags);
 338
 339        return count;
 340}
 341
 342/*
 343 * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
 344 * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
 345 * memory just to allocate one more hugepage.
 346 */
 347static ssize_t defrag_show(struct kobject *kobj,
 348                           struct kobj_attribute *attr, char *buf)
 349{
 350        return double_flag_show(kobj, attr, buf,
 351                                TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 352                                TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 353}
 354static ssize_t defrag_store(struct kobject *kobj,
 355                            struct kobj_attribute *attr,
 356                            const char *buf, size_t count)
 357{
 358        return double_flag_store(kobj, attr, buf, count,
 359                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
 360                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
 361}
 362static struct kobj_attribute defrag_attr =
 363        __ATTR(defrag, 0644, defrag_show, defrag_store);
 364
 365static ssize_t use_zero_page_show(struct kobject *kobj,
 366                struct kobj_attribute *attr, char *buf)
 367{
 368        return single_flag_show(kobj, attr, buf,
 369                                TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 370}
 371static ssize_t use_zero_page_store(struct kobject *kobj,
 372                struct kobj_attribute *attr, const char *buf, size_t count)
 373{
 374        return single_flag_store(kobj, attr, buf, count,
 375                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
 376}
 377static struct kobj_attribute use_zero_page_attr =
 378        __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
 379#ifdef CONFIG_DEBUG_VM
 380static ssize_t debug_cow_show(struct kobject *kobj,
 381                                struct kobj_attribute *attr, char *buf)
 382{
 383        return single_flag_show(kobj, attr, buf,
 384                                TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 385}
 386static ssize_t debug_cow_store(struct kobject *kobj,
 387                               struct kobj_attribute *attr,
 388                               const char *buf, size_t count)
 389{
 390        return single_flag_store(kobj, attr, buf, count,
 391                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
 392}
 393static struct kobj_attribute debug_cow_attr =
 394        __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
 395#endif /* CONFIG_DEBUG_VM */
 396
 397static struct attribute *hugepage_attr[] = {
 398        &enabled_attr.attr,
 399        &defrag_attr.attr,
 400        &use_zero_page_attr.attr,
 401#ifdef CONFIG_DEBUG_VM
 402        &debug_cow_attr.attr,
 403#endif
 404        NULL,
 405};
 406
 407static struct attribute_group hugepage_attr_group = {
 408        .attrs = hugepage_attr,
 409};
 410
 411static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
 412                                         struct kobj_attribute *attr,
 413                                         char *buf)
 414{
 415        return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
 416}
 417
 418static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
 419                                          struct kobj_attribute *attr,
 420                                          const char *buf, size_t count)
 421{
 422        unsigned long msecs;
 423        int err;
 424
 425        err = kstrtoul(buf, 10, &msecs);
 426        if (err || msecs > UINT_MAX)
 427                return -EINVAL;
 428
 429        khugepaged_scan_sleep_millisecs = msecs;
 430        wake_up_interruptible(&khugepaged_wait);
 431
 432        return count;
 433}
 434static struct kobj_attribute scan_sleep_millisecs_attr =
 435        __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
 436               scan_sleep_millisecs_store);
 437
 438static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
 439                                          struct kobj_attribute *attr,
 440                                          char *buf)
 441{
 442        return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
 443}
 444
 445static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
 446                                           struct kobj_attribute *attr,
 447                                           const char *buf, size_t count)
 448{
 449        unsigned long msecs;
 450        int err;
 451
 452        err = kstrtoul(buf, 10, &msecs);
 453        if (err || msecs > UINT_MAX)
 454                return -EINVAL;
 455
 456        khugepaged_alloc_sleep_millisecs = msecs;
 457        wake_up_interruptible(&khugepaged_wait);
 458
 459        return count;
 460}
 461static struct kobj_attribute alloc_sleep_millisecs_attr =
 462        __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
 463               alloc_sleep_millisecs_store);
 464
 465static ssize_t pages_to_scan_show(struct kobject *kobj,
 466                                  struct kobj_attribute *attr,
 467                                  char *buf)
 468{
 469        return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
 470}
 471static ssize_t pages_to_scan_store(struct kobject *kobj,
 472                                   struct kobj_attribute *attr,
 473                                   const char *buf, size_t count)
 474{
 475        int err;
 476        unsigned long pages;
 477
 478        err = kstrtoul(buf, 10, &pages);
 479        if (err || !pages || pages > UINT_MAX)
 480                return -EINVAL;
 481
 482        khugepaged_pages_to_scan = pages;
 483
 484        return count;
 485}
 486static struct kobj_attribute pages_to_scan_attr =
 487        __ATTR(pages_to_scan, 0644, pages_to_scan_show,
 488               pages_to_scan_store);
 489
 490static ssize_t pages_collapsed_show(struct kobject *kobj,
 491                                    struct kobj_attribute *attr,
 492                                    char *buf)
 493{
 494        return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
 495}
 496static struct kobj_attribute pages_collapsed_attr =
 497        __ATTR_RO(pages_collapsed);
 498
 499static ssize_t full_scans_show(struct kobject *kobj,
 500                               struct kobj_attribute *attr,
 501                               char *buf)
 502{
 503        return sprintf(buf, "%u\n", khugepaged_full_scans);
 504}
 505static struct kobj_attribute full_scans_attr =
 506        __ATTR_RO(full_scans);
 507
 508static ssize_t khugepaged_defrag_show(struct kobject *kobj,
 509                                      struct kobj_attribute *attr, char *buf)
 510{
 511        return single_flag_show(kobj, attr, buf,
 512                                TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 513}
 514static ssize_t khugepaged_defrag_store(struct kobject *kobj,
 515                                       struct kobj_attribute *attr,
 516                                       const char *buf, size_t count)
 517{
 518        return single_flag_store(kobj, attr, buf, count,
 519                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
 520}
 521static struct kobj_attribute khugepaged_defrag_attr =
 522        __ATTR(defrag, 0644, khugepaged_defrag_show,
 523               khugepaged_defrag_store);
 524
 525/*
 526 * max_ptes_none controls if khugepaged should collapse hugepages over
 527 * any unmapped ptes in turn potentially increasing the memory
 528 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
 529 * reduce the available free memory in the system as it
 530 * runs. Increasing max_ptes_none will instead potentially reduce the
 531 * free memory in the system during the khugepaged scan.
 532 */
 533static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
 534                                             struct kobj_attribute *attr,
 535                                             char *buf)
 536{
 537        return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
 538}
 539static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
 540                                              struct kobj_attribute *attr,
 541                                              const char *buf, size_t count)
 542{
 543        int err;
 544        unsigned long max_ptes_none;
 545
 546        err = kstrtoul(buf, 10, &max_ptes_none);
 547        if (err || max_ptes_none > HPAGE_PMD_NR-1)
 548                return -EINVAL;
 549
 550        khugepaged_max_ptes_none = max_ptes_none;
 551
 552        return count;
 553}
 554static struct kobj_attribute khugepaged_max_ptes_none_attr =
 555        __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
 556               khugepaged_max_ptes_none_store);
 557
 558static struct attribute *khugepaged_attr[] = {
 559        &khugepaged_defrag_attr.attr,
 560        &khugepaged_max_ptes_none_attr.attr,
 561        &pages_to_scan_attr.attr,
 562        &pages_collapsed_attr.attr,
 563        &full_scans_attr.attr,
 564        &scan_sleep_millisecs_attr.attr,
 565        &alloc_sleep_millisecs_attr.attr,
 566        NULL,
 567};
 568
 569static struct attribute_group khugepaged_attr_group = {
 570        .attrs = khugepaged_attr,
 571        .name = "khugepaged",
 572};
 573
 574static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
 575{
 576        int err;
 577
 578        *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
 579        if (unlikely(!*hugepage_kobj)) {
 580                printk(KERN_ERR "hugepage: failed to create transparent hugepage kobject\n");
 581                return -ENOMEM;
 582        }
 583
 584        err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
 585        if (err) {
 586                printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
 587                goto delete_obj;
 588        }
 589
 590        err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
 591        if (err) {
 592                printk(KERN_ERR "hugepage: failed to register transparent hugepage group\n");
 593                goto remove_hp_group;
 594        }
 595
 596        return 0;
 597
 598remove_hp_group:
 599        sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
 600delete_obj:
 601        kobject_put(*hugepage_kobj);
 602        return err;
 603}
 604
 605static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 606{
 607        sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
 608        sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
 609        kobject_put(hugepage_kobj);
 610}
 611#else
 612static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
 613{
 614        return 0;
 615}
 616
 617static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
 618{
 619}
 620#endif /* CONFIG_SYSFS */
 621
 622static int __init hugepage_init(void)
 623{
 624        int err;
 625        struct kobject *hugepage_kobj;
 626
 627        if (!has_transparent_hugepage()) {
 628                transparent_hugepage_flags = 0;
 629                return -EINVAL;
 630        }
 631
 632        err = hugepage_init_sysfs(&hugepage_kobj);
 633        if (err)
 634                return err;
 635
 636        err = khugepaged_slab_init();
 637        if (err)
 638                goto out;
 639
 640        register_shrinker(&huge_zero_page_shrinker);
 641
 642        /*
 643         * By default disable transparent hugepages on smaller systems,
 644         * where the extra memory used could hurt more than TLB overhead
 645         * is likely to save.  The admin can still enable it through /sys.
 646         */
 647        if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
 648                transparent_hugepage_flags = 0;
 649
 650        start_khugepaged();
 651
 652        return 0;
 653out:
 654        hugepage_exit_sysfs(hugepage_kobj);
 655        return err;
 656}
 657module_init(hugepage_init)
 658
 659static int __init setup_transparent_hugepage(char *str)
 660{
 661        int ret = 0;
 662        if (!str)
 663                goto out;
 664        if (!strcmp(str, "always")) {
 665                set_bit(TRANSPARENT_HUGEPAGE_FLAG,
 666                        &transparent_hugepage_flags);
 667                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 668                          &transparent_hugepage_flags);
 669                ret = 1;
 670        } else if (!strcmp(str, "madvise")) {
 671                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 672                          &transparent_hugepage_flags);
 673                set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 674                        &transparent_hugepage_flags);
 675                ret = 1;
 676        } else if (!strcmp(str, "never")) {
 677                clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
 678                          &transparent_hugepage_flags);
 679                clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
 680                          &transparent_hugepage_flags);
 681                ret = 1;
 682        }
 683out:
 684        if (!ret)
 685                printk(KERN_WARNING
 686                       "transparent_hugepage= cannot parse, ignored\n");
 687        return ret;
 688}
 689__setup("transparent_hugepage=", setup_transparent_hugepage);
 690
 691pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 692{
 693        if (likely(vma->vm_flags & VM_WRITE))
 694                pmd = pmd_mkwrite(pmd);
 695        return pmd;
 696}
 697
 698static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
 699{
 700        pmd_t entry;
 701        entry = mk_pmd(page, prot);
 702        entry = pmd_mkhuge(entry);
 703        return entry;
 704}
 705
 706static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
 707                                        struct vm_area_struct *vma,
 708                                        unsigned long haddr, pmd_t *pmd,
 709                                        struct page *page)
 710{
 711        pgtable_t pgtable;
 712
 713        VM_BUG_ON(!PageCompound(page));
 714        pgtable = pte_alloc_one(mm, haddr);
 715        if (unlikely(!pgtable))
 716                return VM_FAULT_OOM;
 717
 718        clear_huge_page(page, haddr, HPAGE_PMD_NR);
 719        /*
 720         * The memory barrier inside __SetPageUptodate makes sure that
 721         * clear_huge_page writes become visible before the set_pmd_at()
 722         * write.
 723         */
 724        __SetPageUptodate(page);
 725
 726        spin_lock(&mm->page_table_lock);
 727        if (unlikely(!pmd_none(*pmd))) {
 728                spin_unlock(&mm->page_table_lock);
 729                mem_cgroup_uncharge_page(page);
 730                put_page(page);
 731                pte_free(mm, pgtable);
 732        } else {
 733                pmd_t entry;
 734                entry = mk_huge_pmd(page, vma->vm_page_prot);
 735                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
 736                page_add_new_anon_rmap(page, vma, haddr);
 737                pgtable_trans_huge_deposit(mm, pmd, pgtable);
 738                set_pmd_at(mm, haddr, pmd, entry);
 739                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
 740                mm->nr_ptes++;
 741                spin_unlock(&mm->page_table_lock);
 742        }
 743
 744        return 0;
 745}
 746
 747static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
 748{
 749        return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
 750}
 751
 752static inline struct page *alloc_hugepage_vma(int defrag,
 753                                              struct vm_area_struct *vma,
 754                                              unsigned long haddr, int nd,
 755                                              gfp_t extra_gfp)
 756{
 757        return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
 758                               HPAGE_PMD_ORDER, vma, haddr, nd);
 759}
 760
 761#ifndef CONFIG_NUMA
 762static inline struct page *alloc_hugepage(int defrag)
 763{
 764        return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
 765                           HPAGE_PMD_ORDER);
 766}
 767#endif
 768
 769static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
 770                struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
 771                struct page *zero_page)
 772{
 773        pmd_t entry;
 774        if (!pmd_none(*pmd))
 775                return false;
 776        entry = mk_pmd(zero_page, vma->vm_page_prot);
 777        entry = pmd_wrprotect(entry);
 778        entry = pmd_mkhuge(entry);
 779        pgtable_trans_huge_deposit(mm, pmd, pgtable);
 780        set_pmd_at(mm, haddr, pmd, entry);
 781        mm->nr_ptes++;
 782        return true;
 783}
 784
 785int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 786                               unsigned long address, pmd_t *pmd,
 787                               unsigned int flags)
 788{
 789        struct page *page;
 790        unsigned long haddr = address & HPAGE_PMD_MASK;
 791
 792        if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
 793                return VM_FAULT_FALLBACK;
 794        if (unlikely(anon_vma_prepare(vma)))
 795                return VM_FAULT_OOM;
 796        if (unlikely(khugepaged_enter(vma)))
 797                return VM_FAULT_OOM;
 798        if (!(flags & FAULT_FLAG_WRITE) &&
 799                        transparent_hugepage_use_zero_page()) {
 800                pgtable_t pgtable;
 801                struct page *zero_page;
 802                bool set;
 803                pgtable = pte_alloc_one(mm, haddr);
 804                if (unlikely(!pgtable))
 805                        return VM_FAULT_OOM;
 806                zero_page = get_huge_zero_page();
 807                if (unlikely(!zero_page)) {
 808                        pte_free(mm, pgtable);
 809                        count_vm_event(THP_FAULT_FALLBACK);
 810                        return VM_FAULT_FALLBACK;
 811                }
 812                spin_lock(&mm->page_table_lock);
 813                set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
 814                                zero_page);
 815                spin_unlock(&mm->page_table_lock);
 816                if (!set) {
 817                        pte_free(mm, pgtable);
 818                        put_huge_zero_page();
 819                }
 820                return 0;
 821        }
 822        page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
 823                        vma, haddr, numa_node_id(), 0);
 824        if (unlikely(!page)) {
 825                count_vm_event(THP_FAULT_FALLBACK);
 826                return VM_FAULT_FALLBACK;
 827        }
 828        if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
 829                put_page(page);
 830                count_vm_event(THP_FAULT_FALLBACK);
 831                return VM_FAULT_FALLBACK;
 832        }
 833        if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
 834                mem_cgroup_uncharge_page(page);
 835                put_page(page);
 836                count_vm_event(THP_FAULT_FALLBACK);
 837                return VM_FAULT_FALLBACK;
 838        }
 839
 840        count_vm_event(THP_FAULT_ALLOC);
 841        return 0;
 842}
 843
 844int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 845                  pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
 846                  struct vm_area_struct *vma)
 847{
 848        struct page *src_page;
 849        pmd_t pmd;
 850        pgtable_t pgtable;
 851        int ret;
 852
 853        ret = -ENOMEM;
 854        pgtable = pte_alloc_one(dst_mm, addr);
 855        if (unlikely(!pgtable))
 856                goto out;
 857
 858        spin_lock(&dst_mm->page_table_lock);
 859        spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
 860
 861        ret = -EAGAIN;
 862        pmd = *src_pmd;
 863        if (unlikely(!pmd_trans_huge(pmd))) {
 864                pte_free(dst_mm, pgtable);
 865                goto out_unlock;
 866        }
 867        /*
 868         * mm->page_table_lock is enough to be sure that huge zero pmd is not
 869         * under splitting since we don't split the page itself, only pmd to
 870         * a page table.
 871         */
 872        if (is_huge_zero_pmd(pmd)) {
 873                struct page *zero_page;
 874                bool set;
 875                /*
 876                 * get_huge_zero_page() will never allocate a new page here,
 877                 * since we already have a zero page to copy. It just takes a
 878                 * reference.
 879                 */
 880                zero_page = get_huge_zero_page();
 881                set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
 882                                zero_page);
 883                BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
 884                ret = 0;
 885                goto out_unlock;
 886        }
 887        if (unlikely(pmd_trans_splitting(pmd))) {
 888                /* split huge page running from under us */
 889                spin_unlock(&src_mm->page_table_lock);
 890                spin_unlock(&dst_mm->page_table_lock);
 891                pte_free(dst_mm, pgtable);
 892
 893                wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
 894                goto out;
 895        }
 896        src_page = pmd_page(pmd);
 897        VM_BUG_ON(!PageHead(src_page));
 898        get_page(src_page);
 899        page_dup_rmap(src_page);
 900        add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
 901
 902        pmdp_set_wrprotect(src_mm, addr, src_pmd);
 903        pmd = pmd_mkold(pmd_wrprotect(pmd));
 904        pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
 905        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
 906        dst_mm->nr_ptes++;
 907
 908        ret = 0;
 909out_unlock:
 910        spin_unlock(&src_mm->page_table_lock);
 911        spin_unlock(&dst_mm->page_table_lock);
 912out:
 913        return ret;
 914}
 915
 916void huge_pmd_set_accessed(struct mm_struct *mm,
 917                           struct vm_area_struct *vma,
 918                           unsigned long address,
 919                           pmd_t *pmd, pmd_t orig_pmd,
 920                           int dirty)
 921{
 922        pmd_t entry;
 923        unsigned long haddr;
 924
 925        spin_lock(&mm->page_table_lock);
 926        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 927                goto unlock;
 928
 929        entry = pmd_mkyoung(orig_pmd);
 930        haddr = address & HPAGE_PMD_MASK;
 931        if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
 932                update_mmu_cache_pmd(vma, address, pmd);
 933
 934unlock:
 935        spin_unlock(&mm->page_table_lock);
 936}
 937
 938static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm,
 939                struct vm_area_struct *vma, unsigned long address,
 940                pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr)
 941{
 942        pgtable_t pgtable;
 943        pmd_t _pmd;
 944        struct page *page;
 945        int i, ret = 0;
 946        unsigned long mmun_start;       /* For mmu_notifiers */
 947        unsigned long mmun_end;         /* For mmu_notifiers */
 948
 949        page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 950        if (!page) {
 951                ret |= VM_FAULT_OOM;
 952                goto out;
 953        }
 954
 955        if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
 956                put_page(page);
 957                ret |= VM_FAULT_OOM;
 958                goto out;
 959        }
 960
 961        clear_user_highpage(page, address);
 962        __SetPageUptodate(page);
 963
 964        mmun_start = haddr;
 965        mmun_end   = haddr + HPAGE_PMD_SIZE;
 966        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
 967
 968        spin_lock(&mm->page_table_lock);
 969        if (unlikely(!pmd_same(*pmd, orig_pmd)))
 970                goto out_free_page;
 971
 972        pmdp_clear_flush(vma, haddr, pmd);
 973        /* leave pmd empty until pte is filled */
 974
 975        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
 976        pmd_populate(mm, &_pmd, pgtable);
 977
 978        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
 979                pte_t *pte, entry;
 980                if (haddr == (address & PAGE_MASK)) {
 981                        entry = mk_pte(page, vma->vm_page_prot);
 982                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 983                        page_add_new_anon_rmap(page, vma, haddr);
 984                } else {
 985                        entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
 986                        entry = pte_mkspecial(entry);
 987                }
 988                pte = pte_offset_map(&_pmd, haddr);
 989                VM_BUG_ON(!pte_none(*pte));
 990                set_pte_at(mm, haddr, pte, entry);
 991                pte_unmap(pte);
 992        }
 993        smp_wmb(); /* make pte visible before pmd */
 994        pmd_populate(mm, pmd, pgtable);
 995        spin_unlock(&mm->page_table_lock);
 996        put_huge_zero_page();
 997        inc_mm_counter(mm, MM_ANONPAGES);
 998
 999        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1000
1001        ret |= VM_FAULT_WRITE;
1002out:
1003        return ret;
1004out_free_page:
1005        spin_unlock(&mm->page_table_lock);
1006        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1007        mem_cgroup_uncharge_page(page);
1008        put_page(page);
1009        goto out;
1010}
1011
1012static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1013                                        struct vm_area_struct *vma,
1014                                        unsigned long address,
1015                                        pmd_t *pmd, pmd_t orig_pmd,
1016                                        struct page *page,
1017                                        unsigned long haddr)
1018{
1019        pgtable_t pgtable;
1020        pmd_t _pmd;
1021        int ret = 0, i;
1022        struct page **pages;
1023        unsigned long mmun_start;       /* For mmu_notifiers */
1024        unsigned long mmun_end;         /* For mmu_notifiers */
1025
1026        pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1027                        GFP_KERNEL);
1028        if (unlikely(!pages)) {
1029                ret |= VM_FAULT_OOM;
1030                goto out;
1031        }
1032
1033        for (i = 0; i < HPAGE_PMD_NR; i++) {
1034                pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1035                                               __GFP_OTHER_NODE,
1036                                               vma, address, page_to_nid(page));
1037                if (unlikely(!pages[i] ||
1038                             mem_cgroup_newpage_charge(pages[i], mm,
1039                                                       GFP_KERNEL))) {
1040                        if (pages[i])
1041                                put_page(pages[i]);
1042                        mem_cgroup_uncharge_start();
1043                        while (--i >= 0) {
1044                                mem_cgroup_uncharge_page(pages[i]);
1045                                put_page(pages[i]);
1046                        }
1047                        mem_cgroup_uncharge_end();
1048                        kfree(pages);
1049                        ret |= VM_FAULT_OOM;
1050                        goto out;
1051                }
1052        }
1053
1054        for (i = 0; i < HPAGE_PMD_NR; i++) {
1055                copy_user_highpage(pages[i], page + i,
1056                                   haddr + PAGE_SIZE * i, vma);
1057                __SetPageUptodate(pages[i]);
1058                cond_resched();
1059        }
1060
1061        mmun_start = haddr;
1062        mmun_end   = haddr + HPAGE_PMD_SIZE;
1063        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1064
1065        spin_lock(&mm->page_table_lock);
1066        if (unlikely(!pmd_same(*pmd, orig_pmd)))
1067                goto out_free_pages;
1068        VM_BUG_ON(!PageHead(page));
1069
1070        pmdp_clear_flush(vma, haddr, pmd);
1071        /* leave pmd empty until pte is filled */
1072
1073        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1074        pmd_populate(mm, &_pmd, pgtable);
1075
1076        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1077                pte_t *pte, entry;
1078                entry = mk_pte(pages[i], vma->vm_page_prot);
1079                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1080                page_add_new_anon_rmap(pages[i], vma, haddr);
1081                pte = pte_offset_map(&_pmd, haddr);
1082                VM_BUG_ON(!pte_none(*pte));
1083                set_pte_at(mm, haddr, pte, entry);
1084                pte_unmap(pte);
1085        }
1086        kfree(pages);
1087
1088        smp_wmb(); /* make pte visible before pmd */
1089        pmd_populate(mm, pmd, pgtable);
1090        page_remove_rmap(page);
1091        spin_unlock(&mm->page_table_lock);
1092
1093        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1094
1095        ret |= VM_FAULT_WRITE;
1096        put_page(page);
1097
1098out:
1099        return ret;
1100
1101out_free_pages:
1102        spin_unlock(&mm->page_table_lock);
1103        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1104        mem_cgroup_uncharge_start();
1105        for (i = 0; i < HPAGE_PMD_NR; i++) {
1106                mem_cgroup_uncharge_page(pages[i]);
1107                put_page(pages[i]);
1108        }
1109        mem_cgroup_uncharge_end();
1110        kfree(pages);
1111        goto out;
1112}
1113
1114int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1115                        unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1116{
1117        int ret = 0;
1118        struct page *page = NULL, *new_page;
1119        unsigned long haddr;
1120        unsigned long mmun_start;       /* For mmu_notifiers */
1121        unsigned long mmun_end;         /* For mmu_notifiers */
1122
1123        VM_BUG_ON(!vma->anon_vma);
1124        haddr = address & HPAGE_PMD_MASK;
1125        if (is_huge_zero_pmd(orig_pmd))
1126                goto alloc;
1127        spin_lock(&mm->page_table_lock);
1128        if (unlikely(!pmd_same(*pmd, orig_pmd)))
1129                goto out_unlock;
1130
1131        page = pmd_page(orig_pmd);
1132        VM_BUG_ON(!PageCompound(page) || !PageHead(page));
1133        if (page_mapcount(page) == 1) {
1134                pmd_t entry;
1135                entry = pmd_mkyoung(orig_pmd);
1136                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1137                if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1138                        update_mmu_cache_pmd(vma, address, pmd);
1139                ret |= VM_FAULT_WRITE;
1140                goto out_unlock;
1141        }
1142        get_page(page);
1143        spin_unlock(&mm->page_table_lock);
1144alloc:
1145        if (transparent_hugepage_enabled(vma) &&
1146            !transparent_hugepage_debug_cow())
1147                new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
1148                                              vma, haddr, numa_node_id(), 0);
1149        else
1150                new_page = NULL;
1151
1152        if (unlikely(!new_page)) {
1153                if (is_huge_zero_pmd(orig_pmd)) {
1154                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
1155                                        address, pmd, orig_pmd, haddr);
1156                } else {
1157                        ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1158                                        pmd, orig_pmd, page, haddr);
1159                        if (ret & VM_FAULT_OOM)
1160                                split_huge_page(page);
1161                        put_page(page);
1162                }
1163                count_vm_event(THP_FAULT_FALLBACK);
1164                goto out;
1165        }
1166
1167        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1168                put_page(new_page);
1169                if (page) {
1170                        split_huge_page(page);
1171                        put_page(page);
1172                }
1173                count_vm_event(THP_FAULT_FALLBACK);
1174                ret |= VM_FAULT_OOM;
1175                goto out;
1176        }
1177
1178        count_vm_event(THP_FAULT_ALLOC);
1179
1180        if (is_huge_zero_pmd(orig_pmd))
1181                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1182        else
1183                copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1184        __SetPageUptodate(new_page);
1185
1186        mmun_start = haddr;
1187        mmun_end   = haddr + HPAGE_PMD_SIZE;
1188        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1189
1190        spin_lock(&mm->page_table_lock);
1191        if (page)
1192                put_page(page);
1193        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1194                spin_unlock(&mm->page_table_lock);
1195                mem_cgroup_uncharge_page(new_page);
1196                put_page(new_page);
1197                goto out_mn;
1198        } else {
1199                pmd_t entry;
1200                entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1201                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1202                pmdp_clear_flush(vma, haddr, pmd);
1203                page_add_new_anon_rmap(new_page, vma, haddr);
1204                set_pmd_at(mm, haddr, pmd, entry);
1205                update_mmu_cache_pmd(vma, address, pmd);
1206                if (is_huge_zero_pmd(orig_pmd)) {
1207                        add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1208                        put_huge_zero_page();
1209                } else {
1210                        VM_BUG_ON(!PageHead(page));
1211                        page_remove_rmap(page);
1212                        put_page(page);
1213                }
1214                ret |= VM_FAULT_WRITE;
1215        }
1216        spin_unlock(&mm->page_table_lock);
1217out_mn:
1218        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1219out:
1220        return ret;
1221out_unlock:
1222        spin_unlock(&mm->page_table_lock);
1223        return ret;
1224}
1225
1226struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1227                                   unsigned long addr,
1228                                   pmd_t *pmd,
1229                                   unsigned int flags)
1230{
1231        struct mm_struct *mm = vma->vm_mm;
1232        struct page *page = NULL;
1233
1234        assert_spin_locked(&mm->page_table_lock);
1235
1236        if (flags & FOLL_WRITE && !pmd_write(*pmd))
1237                goto out;
1238
1239        /* Avoid dumping huge zero page */
1240        if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1241                return ERR_PTR(-EFAULT);
1242
1243        page = pmd_page(*pmd);
1244        VM_BUG_ON(!PageHead(page));
1245        if (flags & FOLL_TOUCH) {
1246                pmd_t _pmd;
1247                /*
1248                 * We should set the dirty bit only for FOLL_WRITE but
1249                 * for now the dirty bit in the pmd is meaningless.
1250                 * And if the dirty bit will become meaningful and
1251                 * we'll only set it with FOLL_WRITE, an atomic
1252                 * set_bit will be required on the pmd to set the
1253                 * young bit, instead of the current set_pmd_at.
1254                 */
1255                _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1256                if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1257                                          pmd, _pmd,  1))
1258                        update_mmu_cache_pmd(vma, addr, pmd);
1259        }
1260        if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1261                if (page->mapping && trylock_page(page)) {
1262                        lru_add_drain();
1263                        if (page->mapping)
1264                                mlock_vma_page(page);
1265                        unlock_page(page);
1266                }
1267        }
1268        page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1269        VM_BUG_ON(!PageCompound(page));
1270        if (flags & FOLL_GET)
1271                get_page_foll(page);
1272
1273out:
1274        return page;
1275}
1276
1277/* NUMA hinting page fault entry point for trans huge pmds */
1278int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1279                                unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1280{
1281        struct anon_vma *anon_vma = NULL;
1282        struct page *page;
1283        unsigned long haddr = addr & HPAGE_PMD_MASK;
1284        int page_nid = -1, this_nid = numa_node_id();
1285        int target_nid;
1286        bool page_locked;
1287        bool migrated = false;
1288
1289        spin_lock(&mm->page_table_lock);
1290        if (unlikely(!pmd_same(pmd, *pmdp)))
1291                goto out_unlock;
1292
1293        page = pmd_page(pmd);
1294        page_nid = page_to_nid(page);
1295        count_vm_numa_event(NUMA_HINT_FAULTS);
1296        if (page_nid == this_nid)
1297                count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1298
1299        /*
1300         * Acquire the page lock to serialise THP migrations but avoid dropping
1301         * page_table_lock if at all possible
1302         */
1303        page_locked = trylock_page(page);
1304        target_nid = mpol_misplaced(page, vma, haddr);
1305        if (target_nid == -1) {
1306                /* If the page was locked, there are no parallel migrations */
1307                if (page_locked)
1308                        goto clear_pmdnuma;
1309
1310                /*
1311                 * Otherwise wait for potential migrations and retry. We do
1312                 * relock and check_same as the page may no longer be mapped.
1313                 * As the fault is being retried, do not account for it.
1314                 */
1315                spin_unlock(&mm->page_table_lock);
1316                wait_on_page_locked(page);
1317                page_nid = -1;
1318                goto out;
1319        }
1320
1321        /* Page is misplaced, serialise migrations and parallel THP splits */
1322        get_page(page);
1323        spin_unlock(&mm->page_table_lock);
1324        if (!page_locked)
1325                lock_page(page);
1326        anon_vma = page_lock_anon_vma_read(page);
1327
1328        /* Confirm the PTE did not while locked */
1329        spin_lock(&mm->page_table_lock);
1330        if (unlikely(!pmd_same(pmd, *pmdp))) {
1331                unlock_page(page);
1332                put_page(page);
1333                page_nid = -1;
1334                goto out_unlock;
1335        }
1336
1337        /*
1338         * Migrate the THP to the requested node, returns with page unlocked
1339         * and pmd_numa cleared.
1340         */
1341        spin_unlock(&mm->page_table_lock);
1342        migrated = migrate_misplaced_transhuge_page(mm, vma,
1343                                pmdp, pmd, addr, page, target_nid);
1344        if (migrated)
1345                page_nid = target_nid;
1346
1347        goto out;
1348clear_pmdnuma:
1349        BUG_ON(!PageLocked(page));
1350        pmd = pmd_mknonnuma(pmd);
1351        set_pmd_at(mm, haddr, pmdp, pmd);
1352        VM_BUG_ON(pmd_numa(*pmdp));
1353        update_mmu_cache_pmd(vma, addr, pmdp);
1354        unlock_page(page);
1355out_unlock:
1356        spin_unlock(&mm->page_table_lock);
1357
1358out:
1359        if (anon_vma)
1360                page_unlock_anon_vma_read(anon_vma);
1361
1362        if (page_nid != -1)
1363                task_numa_fault(page_nid, HPAGE_PMD_NR, migrated);
1364
1365        return 0;
1366}
1367
1368int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1369                 pmd_t *pmd, unsigned long addr)
1370{
1371        int ret = 0;
1372
1373        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1374                struct page *page;
1375                pgtable_t pgtable;
1376                pmd_t orig_pmd;
1377                /*
1378                 * For architectures like ppc64 we look at deposited pgtable
1379                 * when calling pmdp_get_and_clear. So do the
1380                 * pgtable_trans_huge_withdraw after finishing pmdp related
1381                 * operations.
1382                 */
1383                orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd);
1384                tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1385                pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1386                if (is_huge_zero_pmd(orig_pmd)) {
1387                        tlb->mm->nr_ptes--;
1388                        spin_unlock(&tlb->mm->page_table_lock);
1389                        put_huge_zero_page();
1390                } else {
1391                        page = pmd_page(orig_pmd);
1392                        page_remove_rmap(page);
1393                        VM_BUG_ON(page_mapcount(page) < 0);
1394                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1395                        VM_BUG_ON(!PageHead(page));
1396                        tlb->mm->nr_ptes--;
1397                        spin_unlock(&tlb->mm->page_table_lock);
1398                        tlb_remove_page(tlb, page);
1399                }
1400                pte_free(tlb->mm, pgtable);
1401                ret = 1;
1402        }
1403        return ret;
1404}
1405
1406int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1407                unsigned long addr, unsigned long end,
1408                unsigned char *vec)
1409{
1410        int ret = 0;
1411
1412        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1413                /*
1414                 * All logical pages in the range are present
1415                 * if backed by a huge page.
1416                 */
1417                spin_unlock(&vma->vm_mm->page_table_lock);
1418                memset(vec, 1, (end - addr) >> PAGE_SHIFT);
1419                ret = 1;
1420        }
1421
1422        return ret;
1423}
1424
1425int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1426                  unsigned long old_addr,
1427                  unsigned long new_addr, unsigned long old_end,
1428                  pmd_t *old_pmd, pmd_t *new_pmd)
1429{
1430        int ret = 0;
1431        pmd_t pmd;
1432
1433        struct mm_struct *mm = vma->vm_mm;
1434
1435        if ((old_addr & ~HPAGE_PMD_MASK) ||
1436            (new_addr & ~HPAGE_PMD_MASK) ||
1437            old_end - old_addr < HPAGE_PMD_SIZE ||
1438            (new_vma->vm_flags & VM_NOHUGEPAGE))
1439                goto out;
1440
1441        /*
1442         * The destination pmd shouldn't be established, free_pgtables()
1443         * should have release it.
1444         */
1445        if (WARN_ON(!pmd_none(*new_pmd))) {
1446                VM_BUG_ON(pmd_trans_huge(*new_pmd));
1447                goto out;
1448        }
1449
1450        ret = __pmd_trans_huge_lock(old_pmd, vma);
1451        if (ret == 1) {
1452                pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
1453                VM_BUG_ON(!pmd_none(*new_pmd));
1454                set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1455                spin_unlock(&mm->page_table_lock);
1456        }
1457out:
1458        return ret;
1459}
1460
1461int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1462                unsigned long addr, pgprot_t newprot, int prot_numa)
1463{
1464        struct mm_struct *mm = vma->vm_mm;
1465        int ret = 0;
1466
1467        if (__pmd_trans_huge_lock(pmd, vma) == 1) {
1468                pmd_t entry;
1469                entry = pmdp_get_and_clear(mm, addr, pmd);
1470                if (!prot_numa) {
1471                        entry = pmd_modify(entry, newprot);
1472                        BUG_ON(pmd_write(entry));
1473                } else {
1474                        struct page *page = pmd_page(*pmd);
1475
1476                        /* only check non-shared pages */
1477                        if (page_mapcount(page) == 1 &&
1478                            !pmd_numa(*pmd)) {
1479                                entry = pmd_mknuma(entry);
1480                        }
1481                }
1482                set_pmd_at(mm, addr, pmd, entry);
1483                spin_unlock(&vma->vm_mm->page_table_lock);
1484                ret = 1;
1485        }
1486
1487        return ret;
1488}
1489
1490/*
1491 * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1492 * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1493 *
1494 * Note that if it returns 1, this routine returns without unlocking page
1495 * table locks. So callers must unlock them.
1496 */
1497int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
1498{
1499        spin_lock(&vma->vm_mm->page_table_lock);
1500        if (likely(pmd_trans_huge(*pmd))) {
1501                if (unlikely(pmd_trans_splitting(*pmd))) {
1502                        spin_unlock(&vma->vm_mm->page_table_lock);
1503                        wait_split_huge_page(vma->anon_vma, pmd);
1504                        return -1;
1505                } else {
1506                        /* Thp mapped by 'pmd' is stable, so we can
1507                         * handle it as it is. */
1508                        return 1;
1509                }
1510        }
1511        spin_unlock(&vma->vm_mm->page_table_lock);
1512        return 0;
1513}
1514
1515pmd_t *page_check_address_pmd(struct page *page,
1516                              struct mm_struct *mm,
1517                              unsigned long address,
1518                              enum page_check_address_pmd_flag flag)
1519{
1520        pmd_t *pmd, *ret = NULL;
1521
1522        if (address & ~HPAGE_PMD_MASK)
1523                goto out;
1524
1525        pmd = mm_find_pmd(mm, address);
1526        if (!pmd)
1527                goto out;
1528        if (pmd_none(*pmd))
1529                goto out;
1530        if (pmd_page(*pmd) != page)
1531                goto out;
1532        /*
1533         * split_vma() may create temporary aliased mappings. There is
1534         * no risk as long as all huge pmd are found and have their
1535         * splitting bit set before __split_huge_page_refcount
1536         * runs. Finding the same huge pmd more than once during the
1537         * same rmap walk is not a problem.
1538         */
1539        if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1540            pmd_trans_splitting(*pmd))
1541                goto out;
1542        if (pmd_trans_huge(*pmd)) {
1543                VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1544                          !pmd_trans_splitting(*pmd));
1545                ret = pmd;
1546        }
1547out:
1548        return ret;
1549}
1550
1551static int __split_huge_page_splitting(struct page *page,
1552                                       struct vm_area_struct *vma,
1553                                       unsigned long address)
1554{
1555        struct mm_struct *mm = vma->vm_mm;
1556        pmd_t *pmd;
1557        int ret = 0;
1558        /* For mmu_notifiers */
1559        const unsigned long mmun_start = address;
1560        const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
1561
1562        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1563        spin_lock(&mm->page_table_lock);
1564        pmd = page_check_address_pmd(page, mm, address,
1565                                     PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
1566        if (pmd) {
1567                /*
1568                 * We can't temporarily set the pmd to null in order
1569                 * to split it, the pmd must remain marked huge at all
1570                 * times or the VM won't take the pmd_trans_huge paths
1571                 * and it won't wait on the anon_vma->root->rwsem to
1572                 * serialize against split_huge_page*.
1573                 */
1574                pmdp_splitting_flush(vma, address, pmd);
1575                ret = 1;
1576        }
1577        spin_unlock(&mm->page_table_lock);
1578        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1579
1580        return ret;
1581}
1582
1583static void __split_huge_page_refcount(struct page *page,
1584                                       struct list_head *list)
1585{
1586        int i;
1587        struct zone *zone = page_zone(page);
1588        struct lruvec *lruvec;
1589        int tail_count = 0;
1590
1591        /* prevent PageLRU to go away from under us, and freeze lru stats */
1592        spin_lock_irq(&zone->lru_lock);
1593        lruvec = mem_cgroup_page_lruvec(page, zone);
1594
1595        compound_lock(page);
1596        /* complete memcg works before add pages to LRU */
1597        mem_cgroup_split_huge_fixup(page);
1598
1599        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1600                struct page *page_tail = page + i;
1601
1602                /* tail_page->_mapcount cannot change */
1603                BUG_ON(page_mapcount(page_tail) < 0);
1604                tail_count += page_mapcount(page_tail);
1605                /* check for overflow */
1606                BUG_ON(tail_count < 0);
1607                BUG_ON(atomic_read(&page_tail->_count) != 0);
1608                /*
1609                 * tail_page->_count is zero and not changing from
1610                 * under us. But get_page_unless_zero() may be running
1611                 * from under us on the tail_page. If we used
1612                 * atomic_set() below instead of atomic_add(), we
1613                 * would then run atomic_set() concurrently with
1614                 * get_page_unless_zero(), and atomic_set() is
1615                 * implemented in C not using locked ops. spin_unlock
1616                 * on x86 sometime uses locked ops because of PPro
1617                 * errata 66, 92, so unless somebody can guarantee
1618                 * atomic_set() here would be safe on all archs (and
1619                 * not only on x86), it's safer to use atomic_add().
1620                 */
1621                atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1622                           &page_tail->_count);
1623
1624                /* after clearing PageTail the gup refcount can be released */
1625                smp_mb();
1626
1627                /*
1628                 * retain hwpoison flag of the poisoned tail page:
1629                 *   fix for the unsuitable process killed on Guest Machine(KVM)
1630                 *   by the memory-failure.
1631                 */
1632                page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1633                page_tail->flags |= (page->flags &
1634                                     ((1L << PG_referenced) |
1635                                      (1L << PG_swapbacked) |
1636                                      (1L << PG_mlocked) |
1637                                      (1L << PG_uptodate) |
1638                                      (1L << PG_active) |
1639                                      (1L << PG_unevictable)));
1640                page_tail->flags |= (1L << PG_dirty);
1641
1642                /* clear PageTail before overwriting first_page */
1643                smp_wmb();
1644
1645                /*
1646                 * __split_huge_page_splitting() already set the
1647                 * splitting bit in all pmd that could map this
1648                 * hugepage, that will ensure no CPU can alter the
1649                 * mapcount on the head page. The mapcount is only
1650                 * accounted in the head page and it has to be
1651                 * transferred to all tail pages in the below code. So
1652                 * for this code to be safe, the split the mapcount
1653                 * can't change. But that doesn't mean userland can't
1654                 * keep changing and reading the page contents while
1655                 * we transfer the mapcount, so the pmd splitting
1656                 * status is achieved setting a reserved bit in the
1657                 * pmd, not by clearing the present bit.
1658                */
1659                page_tail->_mapcount = page->_mapcount;
1660
1661                BUG_ON(page_tail->mapping);
1662                page_tail->mapping = page->mapping;
1663
1664                page_tail->index = page->index + i;
1665                page_nid_xchg_last(page_tail, page_nid_last(page));
1666
1667                BUG_ON(!PageAnon(page_tail));
1668                BUG_ON(!PageUptodate(page_tail));
1669                BUG_ON(!PageDirty(page_tail));
1670                BUG_ON(!PageSwapBacked(page_tail));
1671
1672                lru_add_page_tail(page, page_tail, lruvec, list);
1673        }
1674        atomic_sub(tail_count, &page->_count);
1675        BUG_ON(atomic_read(&page->_count) <= 0);
1676
1677        __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1678
1679        ClearPageCompound(page);
1680        compound_unlock(page);
1681        spin_unlock_irq(&zone->lru_lock);
1682
1683        for (i = 1; i < HPAGE_PMD_NR; i++) {
1684                struct page *page_tail = page + i;
1685                BUG_ON(page_count(page_tail) <= 0);
1686                /*
1687                 * Tail pages may be freed if there wasn't any mapping
1688                 * like if add_to_swap() is running on a lru page that
1689                 * had its mapping zapped. And freeing these pages
1690                 * requires taking the lru_lock so we do the put_page
1691                 * of the tail pages after the split is complete.
1692                 */
1693                put_page(page_tail);
1694        }
1695
1696        /*
1697         * Only the head page (now become a regular page) is required
1698         * to be pinned by the caller.
1699         */
1700        BUG_ON(page_count(page) <= 0);
1701}
1702
1703static int __split_huge_page_map(struct page *page,
1704                                 struct vm_area_struct *vma,
1705                                 unsigned long address)
1706{
1707        struct mm_struct *mm = vma->vm_mm;
1708        pmd_t *pmd, _pmd;
1709        int ret = 0, i;
1710        pgtable_t pgtable;
1711        unsigned long haddr;
1712
1713        spin_lock(&mm->page_table_lock);
1714        pmd = page_check_address_pmd(page, mm, address,
1715                                     PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
1716        if (pmd) {
1717                pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1718                pmd_populate(mm, &_pmd, pgtable);
1719
1720                haddr = address;
1721                for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1722                        pte_t *pte, entry;
1723                        BUG_ON(PageCompound(page+i));
1724                        entry = mk_pte(page + i, vma->vm_page_prot);
1725                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1726                        if (!pmd_write(*pmd))
1727                                entry = pte_wrprotect(entry);
1728                        else
1729                                BUG_ON(page_mapcount(page) != 1);
1730                        if (!pmd_young(*pmd))
1731                                entry = pte_mkold(entry);
1732                        if (pmd_numa(*pmd))
1733                                entry = pte_mknuma(entry);
1734                        pte = pte_offset_map(&_pmd, haddr);
1735                        BUG_ON(!pte_none(*pte));
1736                        set_pte_at(mm, haddr, pte, entry);
1737                        pte_unmap(pte);
1738                }
1739
1740                smp_wmb(); /* make pte visible before pmd */
1741                /*
1742                 * Up to this point the pmd is present and huge and
1743                 * userland has the whole access to the hugepage
1744                 * during the split (which happens in place). If we
1745                 * overwrite the pmd with the not-huge version
1746                 * pointing to the pte here (which of course we could
1747                 * if all CPUs were bug free), userland could trigger
1748                 * a small page size TLB miss on the small sized TLB
1749                 * while the hugepage TLB entry is still established
1750                 * in the huge TLB. Some CPU doesn't like that. See
1751                 * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1752                 * Erratum 383 on page 93. Intel should be safe but is
1753                 * also warns that it's only safe if the permission
1754                 * and cache attributes of the two entries loaded in
1755                 * the two TLB is identical (which should be the case
1756                 * here). But it is generally safer to never allow
1757                 * small and huge TLB entries for the same virtual
1758                 * address to be loaded simultaneously. So instead of
1759                 * doing "pmd_populate(); flush_tlb_range();" we first
1760                 * mark the current pmd notpresent (atomically because
1761                 * here the pmd_trans_huge and pmd_trans_splitting
1762                 * must remain set at all times on the pmd until the
1763                 * split is complete for this pmd), then we flush the
1764                 * SMP TLB and finally we write the non-huge version
1765                 * of the pmd entry with pmd_populate.
1766                 */
1767                pmdp_invalidate(vma, address, pmd);
1768                pmd_populate(mm, pmd, pgtable);
1769                ret = 1;
1770        }
1771        spin_unlock(&mm->page_table_lock);
1772
1773        return ret;
1774}
1775
1776/* must be called with anon_vma->root->rwsem held */
1777static void __split_huge_page(struct page *page,
1778                              struct anon_vma *anon_vma,
1779                              struct list_head *list)
1780{
1781        int mapcount, mapcount2;
1782        pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1783        struct anon_vma_chain *avc;
1784
1785        BUG_ON(!PageHead(page));
1786        BUG_ON(PageTail(page));
1787
1788        mapcount = 0;
1789        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1790                struct vm_area_struct *vma = avc->vma;
1791                unsigned long addr = vma_address(page, vma);
1792                BUG_ON(is_vma_temporary_stack(vma));
1793                mapcount += __split_huge_page_splitting(page, vma, addr);
1794        }
1795        /*
1796         * It is critical that new vmas are added to the tail of the
1797         * anon_vma list. This guarantes that if copy_huge_pmd() runs
1798         * and establishes a child pmd before
1799         * __split_huge_page_splitting() freezes the parent pmd (so if
1800         * we fail to prevent copy_huge_pmd() from running until the
1801         * whole __split_huge_page() is complete), we will still see
1802         * the newly established pmd of the child later during the
1803         * walk, to be able to set it as pmd_trans_splitting too.
1804         */
1805        if (mapcount != page_mapcount(page))
1806                printk(KERN_ERR "mapcount %d page_mapcount %d\n",
1807                       mapcount, page_mapcount(page));
1808        BUG_ON(mapcount != page_mapcount(page));
1809
1810        __split_huge_page_refcount(page, list);
1811
1812        mapcount2 = 0;
1813        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1814                struct vm_area_struct *vma = avc->vma;
1815                unsigned long addr = vma_address(page, vma);
1816                BUG_ON(is_vma_temporary_stack(vma));
1817                mapcount2 += __split_huge_page_map(page, vma, addr);
1818        }
1819        if (mapcount != mapcount2)
1820                printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
1821                       mapcount, mapcount2, page_mapcount(page));
1822        BUG_ON(mapcount != mapcount2);
1823}
1824
1825/*
1826 * Split a hugepage into normal pages. This doesn't change the position of head
1827 * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1828 * @list. Both head page and tail pages will inherit mapping, flags, and so on
1829 * from the hugepage.
1830 * Return 0 if the hugepage is split successfully otherwise return 1.
1831 */
1832int split_huge_page_to_list(struct page *page, struct list_head *list)
1833{
1834        struct anon_vma *anon_vma;
1835        int ret = 1;
1836
1837        BUG_ON(is_huge_zero_page(page));
1838        BUG_ON(!PageAnon(page));
1839
1840        /*
1841         * The caller does not necessarily hold an mmap_sem that would prevent
1842         * the anon_vma disappearing so we first we take a reference to it
1843         * and then lock the anon_vma for write. This is similar to
1844         * page_lock_anon_vma_read except the write lock is taken to serialise
1845         * against parallel split or collapse operations.
1846         */
1847        anon_vma = page_get_anon_vma(page);
1848        if (!anon_vma)
1849                goto out;
1850        anon_vma_lock_write(anon_vma);
1851
1852        ret = 0;
1853        if (!PageCompound(page))
1854                goto out_unlock;
1855
1856        BUG_ON(!PageSwapBacked(page));
1857        __split_huge_page(page, anon_vma, list);
1858        count_vm_event(THP_SPLIT);
1859
1860        BUG_ON(PageCompound(page));
1861out_unlock:
1862        anon_vma_unlock_write(anon_vma);
1863        put_anon_vma(anon_vma);
1864out:
1865        return ret;
1866}
1867
1868#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
1869
1870int hugepage_madvise(struct vm_area_struct *vma,
1871                     unsigned long *vm_flags, int advice)
1872{
1873        struct mm_struct *mm = vma->vm_mm;
1874
1875        switch (advice) {
1876        case MADV_HUGEPAGE:
1877                /*
1878                 * Be somewhat over-protective like KSM for now!
1879                 */
1880                if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1881                        return -EINVAL;
1882                if (mm->def_flags & VM_NOHUGEPAGE)
1883                        return -EINVAL;
1884                *vm_flags &= ~VM_NOHUGEPAGE;
1885                *vm_flags |= VM_HUGEPAGE;
1886                /*
1887                 * If the vma become good for khugepaged to scan,
1888                 * register it here without waiting a page fault that
1889                 * may not happen any time soon.
1890                 */
1891                if (unlikely(khugepaged_enter_vma_merge(vma)))
1892                        return -ENOMEM;
1893                break;
1894        case MADV_NOHUGEPAGE:
1895                /*
1896                 * Be somewhat over-protective like KSM for now!
1897                 */
1898                if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
1899                        return -EINVAL;
1900                *vm_flags &= ~VM_HUGEPAGE;
1901                *vm_flags |= VM_NOHUGEPAGE;
1902                /*
1903                 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
1904                 * this vma even if we leave the mm registered in khugepaged if
1905                 * it got registered before VM_NOHUGEPAGE was set.
1906                 */
1907                break;
1908        }
1909
1910        return 0;
1911}
1912
1913static int __init khugepaged_slab_init(void)
1914{
1915        mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
1916                                          sizeof(struct mm_slot),
1917                                          __alignof__(struct mm_slot), 0, NULL);
1918        if (!mm_slot_cache)
1919                return -ENOMEM;
1920
1921        return 0;
1922}
1923
1924static inline struct mm_slot *alloc_mm_slot(void)
1925{
1926        if (!mm_slot_cache)     /* initialization failed */
1927                return NULL;
1928        return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
1929}
1930
1931static inline void free_mm_slot(struct mm_slot *mm_slot)
1932{
1933        kmem_cache_free(mm_slot_cache, mm_slot);
1934}
1935
1936static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1937{
1938        struct mm_slot *mm_slot;
1939
1940        hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1941                if (mm == mm_slot->mm)
1942                        return mm_slot;
1943
1944        return NULL;
1945}
1946
1947static void insert_to_mm_slots_hash(struct mm_struct *mm,
1948                                    struct mm_slot *mm_slot)
1949{
1950        mm_slot->mm = mm;
1951        hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
1952}
1953
1954static inline int khugepaged_test_exit(struct mm_struct *mm)
1955{
1956        return atomic_read(&mm->mm_users) == 0;
1957}
1958
1959int __khugepaged_enter(struct mm_struct *mm)
1960{
1961        struct mm_slot *mm_slot;
1962        int wakeup;
1963
1964        mm_slot = alloc_mm_slot();
1965        if (!mm_slot)
1966                return -ENOMEM;
1967
1968        /* __khugepaged_exit() must not run from under us */
1969        VM_BUG_ON(khugepaged_test_exit(mm));
1970        if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
1971                free_mm_slot(mm_slot);
1972                return 0;
1973        }
1974
1975        spin_lock(&khugepaged_mm_lock);
1976        insert_to_mm_slots_hash(mm, mm_slot);
1977        /*
1978         * Insert just behind the scanning cursor, to let the area settle
1979         * down a little.
1980         */
1981        wakeup = list_empty(&khugepaged_scan.mm_head);
1982        list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
1983        spin_unlock(&khugepaged_mm_lock);
1984
1985        atomic_inc(&mm->mm_count);
1986        if (wakeup)
1987                wake_up_interruptible(&khugepaged_wait);
1988
1989        return 0;
1990}
1991
1992int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
1993{
1994        unsigned long hstart, hend;
1995        if (!vma->anon_vma)
1996                /*
1997                 * Not yet faulted in so we will register later in the
1998                 * page fault if needed.
1999                 */
2000                return 0;
2001        if (vma->vm_ops)
2002                /* khugepaged not yet working on file or special mappings */
2003                return 0;
2004        VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2005        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2006        hend = vma->vm_end & HPAGE_PMD_MASK;
2007        if (hstart < hend)
2008                return khugepaged_enter(vma);
2009        return 0;
2010}
2011
2012void __khugepaged_exit(struct mm_struct *mm)
2013{
2014        struct mm_slot *mm_slot;
2015        int free = 0;
2016
2017        spin_lock(&khugepaged_mm_lock);
2018        mm_slot = get_mm_slot(mm);
2019        if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2020                hash_del(&mm_slot->hash);
2021                list_del(&mm_slot->mm_node);
2022                free = 1;
2023        }
2024        spin_unlock(&khugepaged_mm_lock);
2025
2026        if (free) {
2027                clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2028                free_mm_slot(mm_slot);
2029                mmdrop(mm);
2030        } else if (mm_slot) {
2031                /*
2032                 * This is required to serialize against
2033                 * khugepaged_test_exit() (which is guaranteed to run
2034                 * under mmap sem read mode). Stop here (after we
2035                 * return all pagetables will be destroyed) until
2036                 * khugepaged has finished working on the pagetables
2037                 * under the mmap_sem.
2038                 */
2039                down_write(&mm->mmap_sem);
2040                up_write(&mm->mmap_sem);
2041        }
2042}
2043
2044static void release_pte_page(struct page *page)
2045{
2046        /* 0 stands for page_is_file_cache(page) == false */
2047        dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2048        unlock_page(page);
2049        putback_lru_page(page);
2050}
2051
2052static void release_pte_pages(pte_t *pte, pte_t *_pte)
2053{
2054        while (--_pte >= pte) {
2055                pte_t pteval = *_pte;
2056                if (!pte_none(pteval))
2057                        release_pte_page(pte_page(pteval));
2058        }
2059}
2060
2061static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2062                                        unsigned long address,
2063                                        pte_t *pte)
2064{
2065        struct page *page;
2066        pte_t *_pte;
2067        int referenced = 0, none = 0;
2068        for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2069             _pte++, address += PAGE_SIZE) {
2070                pte_t pteval = *_pte;
2071                if (pte_none(pteval)) {
2072                        if (++none <= khugepaged_max_ptes_none)
2073                                continue;
2074                        else
2075                                goto out;
2076                }
2077                if (!pte_present(pteval) || !pte_write(pteval))
2078                        goto out;
2079                page = vm_normal_page(vma, address, pteval);
2080                if (unlikely(!page))
2081                        goto out;
2082
2083                VM_BUG_ON(PageCompound(page));
2084                BUG_ON(!PageAnon(page));
2085                VM_BUG_ON(!PageSwapBacked(page));
2086
2087                /* cannot use mapcount: can't collapse if there's a gup pin */
2088                if (page_count(page) != 1)
2089                        goto out;
2090                /*
2091                 * We can do it before isolate_lru_page because the
2092                 * page can't be freed from under us. NOTE: PG_lock
2093                 * is needed to serialize against split_huge_page
2094                 * when invoked from the VM.
2095                 */
2096                if (!trylock_page(page))
2097                        goto out;
2098                /*
2099                 * Isolate the page to avoid collapsing an hugepage
2100                 * currently in use by the VM.
2101                 */
2102                if (isolate_lru_page(page)) {
2103                        unlock_page(page);
2104                        goto out;
2105                }
2106                /* 0 stands for page_is_file_cache(page) == false */
2107                inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2108                VM_BUG_ON(!PageLocked(page));
2109                VM_BUG_ON(PageLRU(page));
2110
2111                /* If there is no mapped pte young don't collapse the page */
2112                if (pte_young(pteval) || PageReferenced(page) ||
2113                    mmu_notifier_test_young(vma->vm_mm, address))
2114                        referenced = 1;
2115        }
2116        if (likely(referenced))
2117                return 1;
2118out:
2119        release_pte_pages(pte, _pte);
2120        return 0;
2121}
2122
2123static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2124                                      struct vm_area_struct *vma,
2125                                      unsigned long address,
2126                                      spinlock_t *ptl)
2127{
2128        pte_t *_pte;
2129        for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2130                pte_t pteval = *_pte;
2131                struct page *src_page;
2132
2133                if (pte_none(pteval)) {
2134                        clear_user_highpage(page, address);
2135                        add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2136                } else {
2137                        src_page = pte_page(pteval);
2138                        copy_user_highpage(page, src_page, address, vma);
2139                        VM_BUG_ON(page_mapcount(src_page) != 1);
2140                        release_pte_page(src_page);
2141                        /*
2142                         * ptl mostly unnecessary, but preempt has to
2143                         * be disabled to update the per-cpu stats
2144                         * inside page_remove_rmap().
2145                         */
2146                        spin_lock(ptl);
2147                        /*
2148                         * paravirt calls inside pte_clear here are
2149                         * superfluous.
2150                         */
2151                        pte_clear(vma->vm_mm, address, _pte);
2152                        page_remove_rmap(src_page);
2153                        spin_unlock(ptl);
2154                        free_page_and_swap_cache(src_page);
2155                }
2156
2157                address += PAGE_SIZE;
2158                page++;
2159        }
2160}
2161
2162static void khugepaged_alloc_sleep(void)
2163{
2164        wait_event_freezable_timeout(khugepaged_wait, false,
2165                        msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2166}
2167
2168#ifdef CONFIG_NUMA
2169static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2170{
2171        if (IS_ERR(*hpage)) {
2172                if (!*wait)
2173                        return false;
2174
2175                *wait = false;
2176                *hpage = NULL;
2177                khugepaged_alloc_sleep();
2178        } else if (*hpage) {
2179                put_page(*hpage);
2180                *hpage = NULL;
2181        }
2182
2183        return true;
2184}
2185
2186static struct page
2187*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2188                       struct vm_area_struct *vma, unsigned long address,
2189                       int node)
2190{
2191        VM_BUG_ON(*hpage);
2192        /*
2193         * Allocate the page while the vma is still valid and under
2194         * the mmap_sem read mode so there is no memory allocation
2195         * later when we take the mmap_sem in write mode. This is more
2196         * friendly behavior (OTOH it may actually hide bugs) to
2197         * filesystems in userland with daemons allocating memory in
2198         * the userland I/O paths.  Allocating memory with the
2199         * mmap_sem in read mode is good idea also to allow greater
2200         * scalability.
2201         */
2202        *hpage  = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
2203                                      node, __GFP_OTHER_NODE);
2204
2205        /*
2206         * After allocating the hugepage, release the mmap_sem read lock in
2207         * preparation for taking it in write mode.
2208         */
2209        up_read(&mm->mmap_sem);
2210        if (unlikely(!*hpage)) {
2211                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2212                *hpage = ERR_PTR(-ENOMEM);
2213                return NULL;
2214        }
2215
2216        count_vm_event(THP_COLLAPSE_ALLOC);
2217        return *hpage;
2218}
2219#else
2220static struct page *khugepaged_alloc_hugepage(bool *wait)
2221{
2222        struct page *hpage;
2223
2224        do {
2225                hpage = alloc_hugepage(khugepaged_defrag());
2226                if (!hpage) {
2227                        count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2228                        if (!*wait)
2229                                return NULL;
2230
2231                        *wait = false;
2232                        khugepaged_alloc_sleep();
2233                } else
2234                        count_vm_event(THP_COLLAPSE_ALLOC);
2235        } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2236
2237        return hpage;
2238}
2239
2240static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2241{
2242        if (!*hpage)
2243                *hpage = khugepaged_alloc_hugepage(wait);
2244
2245        if (unlikely(!*hpage))
2246                return false;
2247
2248        return true;
2249}
2250
2251static struct page
2252*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
2253                       struct vm_area_struct *vma, unsigned long address,
2254                       int node)
2255{
2256        up_read(&mm->mmap_sem);
2257        VM_BUG_ON(!*hpage);
2258        return  *hpage;
2259}
2260#endif
2261
2262static bool hugepage_vma_check(struct vm_area_struct *vma)
2263{
2264        if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2265            (vma->vm_flags & VM_NOHUGEPAGE))
2266                return false;
2267
2268        if (!vma->anon_vma || vma->vm_ops)
2269                return false;
2270        if (is_vma_temporary_stack(vma))
2271                return false;
2272        VM_BUG_ON(vma->vm_flags & VM_NO_THP);
2273        return true;
2274}
2275
2276static void collapse_huge_page(struct mm_struct *mm,
2277                                   unsigned long address,
2278                                   struct page **hpage,
2279                                   struct vm_area_struct *vma,
2280                                   int node)
2281{
2282        pmd_t *pmd, _pmd;
2283        pte_t *pte;
2284        pgtable_t pgtable;
2285        struct page *new_page;
2286        spinlock_t *ptl;
2287        int isolated;
2288        unsigned long hstart, hend;
2289        unsigned long mmun_start;       /* For mmu_notifiers */
2290        unsigned long mmun_end;         /* For mmu_notifiers */
2291
2292        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2293
2294        /* release the mmap_sem read lock. */
2295        new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
2296        if (!new_page)
2297                return;
2298
2299        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
2300                return;
2301
2302        /*
2303         * Prevent all access to pagetables with the exception of
2304         * gup_fast later hanlded by the ptep_clear_flush and the VM
2305         * handled by the anon_vma lock + PG_lock.
2306         */
2307        down_write(&mm->mmap_sem);
2308        if (unlikely(khugepaged_test_exit(mm)))
2309                goto out;
2310
2311        vma = find_vma(mm, address);
2312        if (!vma)
2313                goto out;
2314        hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2315        hend = vma->vm_end & HPAGE_PMD_MASK;
2316        if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2317                goto out;
2318        if (!hugepage_vma_check(vma))
2319                goto out;
2320        pmd = mm_find_pmd(mm, address);
2321        if (!pmd)
2322                goto out;
2323        if (pmd_trans_huge(*pmd))
2324                goto out;
2325
2326        anon_vma_lock_write(vma->anon_vma);
2327
2328        pte = pte_offset_map(pmd, address);
2329        ptl = pte_lockptr(mm, pmd);
2330
2331        mmun_start = address;
2332        mmun_end   = address + HPAGE_PMD_SIZE;
2333        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2334        spin_lock(&mm->page_table_lock); /* probably unnecessary */
2335        /*
2336         * After this gup_fast can't run anymore. This also removes
2337         * any huge TLB entry from the CPU so we won't allow
2338         * huge and small TLB entries for the same virtual address
2339         * to avoid the risk of CPU bugs in that area.
2340         */
2341        _pmd = pmdp_clear_flush(vma, address, pmd);
2342        spin_unlock(&mm->page_table_lock);
2343        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2344
2345        spin_lock(ptl);
2346        isolated = __collapse_huge_page_isolate(vma, address, pte);
2347        spin_unlock(ptl);
2348
2349        if (unlikely(!isolated)) {
2350                pte_unmap(pte);
2351                spin_lock(&mm->page_table_lock);
2352                BUG_ON(!pmd_none(*pmd));
2353                /*
2354                 * We can only use set_pmd_at when establishing
2355                 * hugepmds and never for establishing regular pmds that
2356                 * points to regular pagetables. Use pmd_populate for that
2357                 */
2358                pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2359                spin_unlock(&mm->page_table_lock);
2360                anon_vma_unlock_write(vma->anon_vma);
2361                goto out;
2362        }
2363
2364        /*
2365         * All pages are isolated and locked so anon_vma rmap
2366         * can't run anymore.
2367         */
2368        anon_vma_unlock_write(vma->anon_vma);
2369
2370        __collapse_huge_page_copy(pte, new_page, vma, address, ptl);
2371        pte_unmap(pte);
2372        __SetPageUptodate(new_page);
2373        pgtable = pmd_pgtable(_pmd);
2374
2375        _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2376        _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2377
2378        /*
2379         * spin_lock() below is not the equivalent of smp_wmb(), so
2380         * this is needed to avoid the copy_huge_page writes to become
2381         * visible after the set_pmd_at() write.
2382         */
2383        smp_wmb();
2384
2385        spin_lock(&mm->page_table_lock);
2386        BUG_ON(!pmd_none(*pmd));
2387        page_add_new_anon_rmap(new_page, vma, address);
2388        pgtable_trans_huge_deposit(mm, pmd, pgtable);
2389        set_pmd_at(mm, address, pmd, _pmd);
2390        update_mmu_cache_pmd(vma, address, pmd);
2391        spin_unlock(&mm->page_table_lock);
2392
2393        *hpage = NULL;
2394
2395        khugepaged_pages_collapsed++;
2396out_up_write:
2397        up_write(&mm->mmap_sem);
2398        return;
2399
2400out:
2401        mem_cgroup_uncharge_page(new_page);
2402        goto out_up_write;
2403}
2404
2405static int khugepaged_scan_pmd(struct mm_struct *mm,
2406                               struct vm_area_struct *vma,
2407                               unsigned long address,
2408                               struct page **hpage)
2409{
2410        pmd_t *pmd;
2411        pte_t *pte, *_pte;
2412        int ret = 0, referenced = 0, none = 0;
2413        struct page *page;
2414        unsigned long _address;
2415        spinlock_t *ptl;
2416        int node = NUMA_NO_NODE;
2417
2418        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2419
2420        pmd = mm_find_pmd(mm, address);
2421        if (!pmd)
2422                goto out;
2423        if (pmd_trans_huge(*pmd))
2424                goto out;
2425
2426        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2427        for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2428             _pte++, _address += PAGE_SIZE) {
2429                pte_t pteval = *_pte;
2430                if (pte_none(pteval)) {
2431                        if (++none <= khugepaged_max_ptes_none)
2432                                continue;
2433                        else
2434                                goto out_unmap;
2435                }
2436                if (!pte_present(pteval) || !pte_write(pteval))
2437                        goto out_unmap;
2438                page = vm_normal_page(vma, _address, pteval);
2439                if (unlikely(!page))
2440                        goto out_unmap;
2441                /*
2442                 * Chose the node of the first page. This could
2443                 * be more sophisticated and look at more pages,
2444                 * but isn't for now.
2445                 */
2446                if (node == NUMA_NO_NODE)
2447                        node = page_to_nid(page);
2448                VM_BUG_ON(PageCompound(page));
2449                if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2450                        goto out_unmap;
2451                /* cannot use mapcount: can't collapse if there's a gup pin */
2452                if (page_count(page) != 1)
2453                        goto out_unmap;
2454                if (pte_young(pteval) || PageReferenced(page) ||
2455                    mmu_notifier_test_young(vma->vm_mm, address))
2456                        referenced = 1;
2457        }
2458        if (referenced)
2459                ret = 1;
2460out_unmap:
2461        pte_unmap_unlock(pte, ptl);
2462        if (ret)
2463                /* collapse_huge_page will return with the mmap_sem released */
2464                collapse_huge_page(mm, address, hpage, vma, node);
2465out:
2466        return ret;
2467}
2468
2469static void collect_mm_slot(struct mm_slot *mm_slot)
2470{
2471        struct mm_struct *mm = mm_slot->mm;
2472
2473        VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2474
2475        if (khugepaged_test_exit(mm)) {
2476                /* free mm_slot */
2477                hash_del(&mm_slot->hash);
2478                list_del(&mm_slot->mm_node);
2479
2480                /*
2481                 * Not strictly needed because the mm exited already.
2482                 *
2483                 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2484                 */
2485
2486                /* khugepaged_mm_lock actually not necessary for the below */
2487                free_mm_slot(mm_slot);
2488                mmdrop(mm);
2489        }
2490}
2491
2492static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2493                                            struct page **hpage)
2494        __releases(&khugepaged_mm_lock)
2495        __acquires(&khugepaged_mm_lock)
2496{
2497        struct mm_slot *mm_slot;
2498        struct mm_struct *mm;
2499        struct vm_area_struct *vma;
2500        int progress = 0;
2501
2502        VM_BUG_ON(!pages);
2503        VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2504
2505        if (khugepaged_scan.mm_slot)
2506                mm_slot = khugepaged_scan.mm_slot;
2507        else {
2508                mm_slot = list_entry(khugepaged_scan.mm_head.next,
2509                                     struct mm_slot, mm_node);
2510                khugepaged_scan.address = 0;
2511                khugepaged_scan.mm_slot = mm_slot;
2512        }
2513        spin_unlock(&khugepaged_mm_lock);
2514
2515        mm = mm_slot->mm;
2516        down_read(&mm->mmap_sem);
2517        if (unlikely(khugepaged_test_exit(mm)))
2518                vma = NULL;
2519        else
2520                vma = find_vma(mm, khugepaged_scan.address);
2521
2522        progress++;
2523        for (; vma; vma = vma->vm_next) {
2524                unsigned long hstart, hend;
2525
2526                cond_resched();
2527                if (unlikely(khugepaged_test_exit(mm))) {
2528                        progress++;
2529                        break;
2530                }
2531                if (!hugepage_vma_check(vma)) {
2532skip:
2533                        progress++;
2534                        continue;
2535                }
2536                hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2537                hend = vma->vm_end & HPAGE_PMD_MASK;
2538                if (hstart >= hend)
2539                        goto skip;
2540                if (khugepaged_scan.address > hend)
2541                        goto skip;
2542                if (khugepaged_scan.address < hstart)
2543                        khugepaged_scan.address = hstart;
2544                VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2545
2546                while (khugepaged_scan.address < hend) {
2547                        int ret;
2548                        cond_resched();
2549                        if (unlikely(khugepaged_test_exit(mm)))
2550                                goto breakouterloop;
2551
2552                        VM_BUG_ON(khugepaged_scan.address < hstart ||
2553                                  khugepaged_scan.address + HPAGE_PMD_SIZE >
2554                                  hend);
2555                        ret = khugepaged_scan_pmd(mm, vma,
2556                                                  khugepaged_scan.address,
2557                                                  hpage);
2558                        /* move to next address */
2559                        khugepaged_scan.address += HPAGE_PMD_SIZE;
2560                        progress += HPAGE_PMD_NR;
2561                        if (ret)
2562                                /* we released mmap_sem so break loop */
2563                                goto breakouterloop_mmap_sem;
2564                        if (progress >= pages)
2565                                goto breakouterloop;
2566                }
2567        }
2568breakouterloop:
2569        up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2570breakouterloop_mmap_sem:
2571
2572        spin_lock(&khugepaged_mm_lock);
2573        VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2574        /*
2575         * Release the current mm_slot if this mm is about to die, or
2576         * if we scanned all vmas of this mm.
2577         */
2578        if (khugepaged_test_exit(mm) || !vma) {
2579                /*
2580                 * Make sure that if mm_users is reaching zero while
2581                 * khugepaged runs here, khugepaged_exit will find
2582                 * mm_slot not pointing to the exiting mm.
2583                 */
2584                if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2585                        khugepaged_scan.mm_slot = list_entry(
2586                                mm_slot->mm_node.next,
2587                                struct mm_slot, mm_node);
2588                        khugepaged_scan.address = 0;
2589                } else {
2590                        khugepaged_scan.mm_slot = NULL;
2591                        khugepaged_full_scans++;
2592                }
2593
2594                collect_mm_slot(mm_slot);
2595        }
2596
2597        return progress;
2598}
2599
2600static int khugepaged_has_work(void)
2601{
2602        return !list_empty(&khugepaged_scan.mm_head) &&
2603                khugepaged_enabled();
2604}
2605
2606static int khugepaged_wait_event(void)
2607{
2608        return !list_empty(&khugepaged_scan.mm_head) ||
2609                kthread_should_stop();
2610}
2611
2612static void khugepaged_do_scan(void)
2613{
2614        struct page *hpage = NULL;
2615        unsigned int progress = 0, pass_through_head = 0;
2616        unsigned int pages = khugepaged_pages_to_scan;
2617        bool wait = true;
2618
2619        barrier(); /* write khugepaged_pages_to_scan to local stack */
2620
2621        while (progress < pages) {
2622                if (!khugepaged_prealloc_page(&hpage, &wait))
2623                        break;
2624
2625                cond_resched();
2626
2627                if (unlikely(kthread_should_stop() || freezing(current)))
2628                        break;
2629
2630                spin_lock(&khugepaged_mm_lock);
2631                if (!khugepaged_scan.mm_slot)
2632                        pass_through_head++;
2633                if (khugepaged_has_work() &&
2634                    pass_through_head < 2)
2635                        progress += khugepaged_scan_mm_slot(pages - progress,
2636                                                            &hpage);
2637                else
2638                        progress = pages;
2639                spin_unlock(&khugepaged_mm_lock);
2640        }
2641
2642        if (!IS_ERR_OR_NULL(hpage))
2643                put_page(hpage);
2644}
2645
2646static void khugepaged_wait_work(void)
2647{
2648        try_to_freeze();
2649
2650        if (khugepaged_has_work()) {
2651                if (!khugepaged_scan_sleep_millisecs)
2652                        return;
2653
2654                wait_event_freezable_timeout(khugepaged_wait,
2655                                             kthread_should_stop(),
2656                        msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2657                return;
2658        }
2659
2660        if (khugepaged_enabled())
2661                wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2662}
2663
2664static int khugepaged(void *none)
2665{
2666        struct mm_slot *mm_slot;
2667
2668        set_freezable();
2669        set_user_nice(current, 19);
2670
2671        while (!kthread_should_stop()) {
2672                khugepaged_do_scan();
2673                khugepaged_wait_work();
2674        }
2675
2676        spin_lock(&khugepaged_mm_lock);
2677        mm_slot = khugepaged_scan.mm_slot;
2678        khugepaged_scan.mm_slot = NULL;
2679        if (mm_slot)
2680                collect_mm_slot(mm_slot);
2681        spin_unlock(&khugepaged_mm_lock);
2682        return 0;
2683}
2684
2685static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2686                unsigned long haddr, pmd_t *pmd)
2687{
2688        struct mm_struct *mm = vma->vm_mm;
2689        pgtable_t pgtable;
2690        pmd_t _pmd;
2691        int i;
2692
2693        pmdp_clear_flush(vma, haddr, pmd);
2694        /* leave pmd empty until pte is filled */
2695
2696        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2697        pmd_populate(mm, &_pmd, pgtable);
2698
2699        for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2700                pte_t *pte, entry;
2701                entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2702                entry = pte_mkspecial(entry);
2703                pte = pte_offset_map(&_pmd, haddr);
2704                VM_BUG_ON(!pte_none(*pte));
2705                set_pte_at(mm, haddr, pte, entry);
2706                pte_unmap(pte);
2707        }
2708        smp_wmb(); /* make pte visible before pmd */
2709        pmd_populate(mm, pmd, pgtable);
2710        put_huge_zero_page();
2711}
2712
2713void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2714                pmd_t *pmd)
2715{
2716        struct page *page;
2717        struct mm_struct *mm = vma->vm_mm;
2718        unsigned long haddr = address & HPAGE_PMD_MASK;
2719        unsigned long mmun_start;       /* For mmu_notifiers */
2720        unsigned long mmun_end;         /* For mmu_notifiers */
2721
2722        BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
2723
2724        mmun_start = haddr;
2725        mmun_end   = haddr + HPAGE_PMD_SIZE;
2726again:
2727        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2728        spin_lock(&mm->page_table_lock);
2729        if (unlikely(!pmd_trans_huge(*pmd))) {
2730                spin_unlock(&mm->page_table_lock);
2731                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2732                return;
2733        }
2734        if (is_huge_zero_pmd(*pmd)) {
2735                __split_huge_zero_page_pmd(vma, haddr, pmd);
2736                spin_unlock(&mm->page_table_lock);
2737                mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2738                return;
2739        }
2740        page = pmd_page(*pmd);
2741        VM_BUG_ON(!page_count(page));
2742        get_page(page);
2743        spin_unlock(&mm->page_table_lock);
2744        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2745
2746        split_huge_page(page);
2747
2748        put_page(page);
2749
2750        /*
2751         * We don't always have down_write of mmap_sem here: a racing
2752         * do_huge_pmd_wp_page() might have copied-on-write to another
2753         * huge page before our split_huge_page() got the anon_vma lock.
2754         */
2755        if (unlikely(pmd_trans_huge(*pmd)))
2756                goto again;
2757}
2758
2759void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2760                pmd_t *pmd)
2761{
2762        struct vm_area_struct *vma;
2763
2764        vma = find_vma(mm, address);
2765        BUG_ON(vma == NULL);
2766        split_huge_page_pmd(vma, address, pmd);
2767}
2768
2769static void split_huge_page_address(struct mm_struct *mm,
2770                                    unsigned long address)
2771{
2772        pmd_t *pmd;
2773
2774        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2775
2776        pmd = mm_find_pmd(mm, address);
2777        if (!pmd)
2778                return;
2779        /*
2780         * Caller holds the mmap_sem write mode, so a huge pmd cannot
2781         * materialize from under us.
2782         */
2783        split_huge_page_pmd_mm(mm, address, pmd);
2784}
2785
2786void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2787                             unsigned long start,
2788                             unsigned long end,
2789                             long adjust_next)
2790{
2791        /*
2792         * If the new start address isn't hpage aligned and it could
2793         * previously contain an hugepage: check if we need to split
2794         * an huge pmd.
2795         */
2796        if (start & ~HPAGE_PMD_MASK &&
2797            (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2798            (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2799                split_huge_page_address(vma->vm_mm, start);
2800
2801        /*
2802         * If the new end address isn't hpage aligned and it could
2803         * previously contain an hugepage: check if we need to split
2804         * an huge pmd.
2805         */
2806        if (end & ~HPAGE_PMD_MASK &&
2807            (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2808            (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2809                split_huge_page_address(vma->vm_mm, end);
2810
2811        /*
2812         * If we're also updating the vma->vm_next->vm_start, if the new
2813         * vm_next->vm_start isn't page aligned and it could previously
2814         * contain an hugepage: check if we need to split an huge pmd.
2815         */
2816        if (adjust_next > 0) {
2817                struct vm_area_struct *next = vma->vm_next;
2818                unsigned long nstart = next->vm_start;
2819                nstart += adjust_next << PAGE_SHIFT;
2820                if (nstart & ~HPAGE_PMD_MASK &&
2821                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2822                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2823                        split_huge_page_address(next->vm_mm, nstart);
2824        }
2825}
2826
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.