linux/mm/mmap.c
<<
>>
Prefs
   1/*
   2 * mm/mmap.c
   3 *
   4 * Written by obz.
   5 *
   6 * Address space accounting code        <alan@lxorguk.ukuu.org.uk>
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/backing-dev.h>
  12#include <linux/mm.h>
  13#include <linux/shm.h>
  14#include <linux/mman.h>
  15#include <linux/pagemap.h>
  16#include <linux/swap.h>
  17#include <linux/syscalls.h>
  18#include <linux/capability.h>
  19#include <linux/init.h>
  20#include <linux/file.h>
  21#include <linux/fs.h>
  22#include <linux/personality.h>
  23#include <linux/security.h>
  24#include <linux/hugetlb.h>
  25#include <linux/profile.h>
  26#include <linux/export.h>
  27#include <linux/mount.h>
  28#include <linux/mempolicy.h>
  29#include <linux/rmap.h>
  30#include <linux/mmu_notifier.h>
  31#include <linux/perf_event.h>
  32#include <linux/audit.h>
  33#include <linux/khugepaged.h>
  34#include <linux/uprobes.h>
  35#include <linux/rbtree_augmented.h>
  36#include <linux/sched/sysctl.h>
  37#include <linux/notifier.h>
  38#include <linux/memory.h>
  39
  40#include <asm/uaccess.h>
  41#include <asm/cacheflush.h>
  42#include <asm/tlb.h>
  43#include <asm/mmu_context.h>
  44
  45#include "internal.h"
  46
  47#ifndef arch_mmap_check
  48#define arch_mmap_check(addr, len, flags)       (0)
  49#endif
  50
  51#ifndef arch_rebalance_pgtables
  52#define arch_rebalance_pgtables(addr, len)              (addr)
  53#endif
  54
  55static void unmap_region(struct mm_struct *mm,
  56                struct vm_area_struct *vma, struct vm_area_struct *prev,
  57                unsigned long start, unsigned long end);
  58
  59/* description of effects of mapping type and prot in current implementation.
  60 * this is due to the limited x86 page protection hardware.  The expected
  61 * behavior is in parens:
  62 *
  63 * map_type     prot
  64 *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
  65 * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  66 *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
  67 *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
  68 *              
  69 * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  70 *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  71 *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
  72 *
  73 */
  74pgprot_t protection_map[16] = {
  75        __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
  76        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
  77};
  78
  79pgprot_t vm_get_page_prot(unsigned long vm_flags)
  80{
  81        return __pgprot(pgprot_val(protection_map[vm_flags &
  82                                (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
  83                        pgprot_val(arch_vm_get_page_prot(vm_flags)));
  84}
  85EXPORT_SYMBOL(vm_get_page_prot);
  86
  87int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
  88int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
  89int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
  90unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
  91unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
  92/*
  93 * Make sure vm_committed_as in one cacheline and not cacheline shared with
  94 * other variables. It can be updated by several CPUs frequently.
  95 */
  96struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
  97
  98/*
  99 * The global memory commitment made in the system can be a metric
 100 * that can be used to drive ballooning decisions when Linux is hosted
 101 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 102 * balancing memory across competing virtual machines that are hosted.
 103 * Several metrics drive this policy engine including the guest reported
 104 * memory commitment.
 105 */
 106unsigned long vm_memory_committed(void)
 107{
 108        return percpu_counter_read_positive(&vm_committed_as);
 109}
 110EXPORT_SYMBOL_GPL(vm_memory_committed);
 111
 112/*
 113 * Check that a process has enough memory to allocate a new virtual
 114 * mapping. 0 means there is enough memory for the allocation to
 115 * succeed and -ENOMEM implies there is not.
 116 *
 117 * We currently support three overcommit policies, which are set via the
 118 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
 119 *
 120 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 121 * Additional code 2002 Jul 20 by Robert Love.
 122 *
 123 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 124 *
 125 * Note this is a helper function intended to be used by LSMs which
 126 * wish to use this logic.
 127 */
 128int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 129{
 130        unsigned long free, allowed, reserve;
 131
 132        vm_acct_memory(pages);
 133
 134        /*
 135         * Sometimes we want to use more memory than we have
 136         */
 137        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 138                return 0;
 139
 140        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 141                free = global_page_state(NR_FREE_PAGES);
 142                free += global_page_state(NR_FILE_PAGES);
 143
 144                /*
 145                 * shmem pages shouldn't be counted as free in this
 146                 * case, they can't be purged, only swapped out, and
 147                 * that won't affect the overall amount of available
 148                 * memory in the system.
 149                 */
 150                free -= global_page_state(NR_SHMEM);
 151
 152                free += get_nr_swap_pages();
 153
 154                /*
 155                 * Any slabs which are created with the
 156                 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
 157                 * which are reclaimable, under pressure.  The dentry
 158                 * cache and most inode caches should fall into this
 159                 */
 160                free += global_page_state(NR_SLAB_RECLAIMABLE);
 161
 162                /*
 163                 * Leave reserved pages. The pages are not for anonymous pages.
 164                 */
 165                if (free <= totalreserve_pages)
 166                        goto error;
 167                else
 168                        free -= totalreserve_pages;
 169
 170                /*
 171                 * Reserve some for root
 172                 */
 173                if (!cap_sys_admin)
 174                        free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 175
 176                if (free > pages)
 177                        return 0;
 178
 179                goto error;
 180        }
 181
 182        allowed = (totalram_pages - hugetlb_total_pages())
 183                * sysctl_overcommit_ratio / 100;
 184        /*
 185         * Reserve some for root
 186         */
 187        if (!cap_sys_admin)
 188                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 189        allowed += total_swap_pages;
 190
 191        /*
 192         * Don't let a single process grow so big a user can't recover
 193         */
 194        if (mm) {
 195                reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 196                allowed -= min(mm->total_vm / 32, reserve);
 197        }
 198
 199        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 200                return 0;
 201error:
 202        vm_unacct_memory(pages);
 203
 204        return -ENOMEM;
 205}
 206
 207/*
 208 * Requires inode->i_mapping->i_mmap_mutex
 209 */
 210static void __remove_shared_vm_struct(struct vm_area_struct *vma,
 211                struct file *file, struct address_space *mapping)
 212{
 213        if (vma->vm_flags & VM_DENYWRITE)
 214                atomic_inc(&file_inode(file)->i_writecount);
 215        if (vma->vm_flags & VM_SHARED)
 216                mapping->i_mmap_writable--;
 217
 218        flush_dcache_mmap_lock(mapping);
 219        if (unlikely(vma->vm_flags & VM_NONLINEAR))
 220                list_del_init(&vma->shared.nonlinear);
 221        else
 222                vma_interval_tree_remove(vma, &mapping->i_mmap);
 223        flush_dcache_mmap_unlock(mapping);
 224}
 225
 226/*
 227 * Unlink a file-based vm structure from its interval tree, to hide
 228 * vma from rmap and vmtruncate before freeing its page tables.
 229 */
 230void unlink_file_vma(struct vm_area_struct *vma)
 231{
 232        struct file *file = vma->vm_file;
 233
 234        if (file) {
 235                struct address_space *mapping = file->f_mapping;
 236                mutex_lock(&mapping->i_mmap_mutex);
 237                __remove_shared_vm_struct(vma, file, mapping);
 238                mutex_unlock(&mapping->i_mmap_mutex);
 239        }
 240}
 241
 242/*
 243 * Close a vm structure and free it, returning the next.
 244 */
 245static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
 246{
 247        struct vm_area_struct *next = vma->vm_next;
 248
 249        might_sleep();
 250        if (vma->vm_ops && vma->vm_ops->close)
 251                vma->vm_ops->close(vma);
 252        if (vma->vm_file)
 253                fput(vma->vm_file);
 254        mpol_put(vma_policy(vma));
 255        kmem_cache_free(vm_area_cachep, vma);
 256        return next;
 257}
 258
 259static unsigned long do_brk(unsigned long addr, unsigned long len);
 260
 261SYSCALL_DEFINE1(brk, unsigned long, brk)
 262{
 263        unsigned long rlim, retval;
 264        unsigned long newbrk, oldbrk;
 265        struct mm_struct *mm = current->mm;
 266        unsigned long min_brk;
 267        bool populate;
 268
 269        down_write(&mm->mmap_sem);
 270
 271#ifdef CONFIG_COMPAT_BRK
 272        /*
 273         * CONFIG_COMPAT_BRK can still be overridden by setting
 274         * randomize_va_space to 2, which will still cause mm->start_brk
 275         * to be arbitrarily shifted
 276         */
 277        if (current->brk_randomized)
 278                min_brk = mm->start_brk;
 279        else
 280                min_brk = mm->end_data;
 281#else
 282        min_brk = mm->start_brk;
 283#endif
 284        if (brk < min_brk)
 285                goto out;
 286
 287        /*
 288         * Check against rlimit here. If this check is done later after the test
 289         * of oldbrk with newbrk then it can escape the test and let the data
 290         * segment grow beyond its set limit the in case where the limit is
 291         * not page aligned -Ram Gupta
 292         */
 293        rlim = rlimit(RLIMIT_DATA);
 294        if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
 295                        (mm->end_data - mm->start_data) > rlim)
 296                goto out;
 297
 298        newbrk = PAGE_ALIGN(brk);
 299        oldbrk = PAGE_ALIGN(mm->brk);
 300        if (oldbrk == newbrk)
 301                goto set_brk;
 302
 303        /* Always allow shrinking brk. */
 304        if (brk <= mm->brk) {
 305                if (!do_munmap(mm, newbrk, oldbrk-newbrk))
 306                        goto set_brk;
 307                goto out;
 308        }
 309
 310        /* Check against existing mmap mappings. */
 311        if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
 312                goto out;
 313
 314        /* Ok, looks good - let it rip. */
 315        if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
 316                goto out;
 317
 318set_brk:
 319        mm->brk = brk;
 320        populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
 321        up_write(&mm->mmap_sem);
 322        if (populate)
 323                mm_populate(oldbrk, newbrk - oldbrk);
 324        return brk;
 325
 326out:
 327        retval = mm->brk;
 328        up_write(&mm->mmap_sem);
 329        return retval;
 330}
 331
 332static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 333{
 334        unsigned long max, subtree_gap;
 335        max = vma->vm_start;
 336        if (vma->vm_prev)
 337                max -= vma->vm_prev->vm_end;
 338        if (vma->vm_rb.rb_left) {
 339                subtree_gap = rb_entry(vma->vm_rb.rb_left,
 340                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
 341                if (subtree_gap > max)
 342                        max = subtree_gap;
 343        }
 344        if (vma->vm_rb.rb_right) {
 345                subtree_gap = rb_entry(vma->vm_rb.rb_right,
 346                                struct vm_area_struct, vm_rb)->rb_subtree_gap;
 347                if (subtree_gap > max)
 348                        max = subtree_gap;
 349        }
 350        return max;
 351}
 352
 353#ifdef CONFIG_DEBUG_VM_RB
 354static int browse_rb(struct rb_root *root)
 355{
 356        int i = 0, j, bug = 0;
 357        struct rb_node *nd, *pn = NULL;
 358        unsigned long prev = 0, pend = 0;
 359
 360        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
 361                struct vm_area_struct *vma;
 362                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
 363                if (vma->vm_start < prev) {
 364                        printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
 365                        bug = 1;
 366                }
 367                if (vma->vm_start < pend) {
 368                        printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
 369                        bug = 1;
 370                }
 371                if (vma->vm_start > vma->vm_end) {
 372                        printk("vm_end %lx < vm_start %lx\n",
 373                                vma->vm_end, vma->vm_start);
 374                        bug = 1;
 375                }
 376                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
 377                        printk("free gap %lx, correct %lx\n",
 378                               vma->rb_subtree_gap,
 379                               vma_compute_subtree_gap(vma));
 380                        bug = 1;
 381                }
 382                i++;
 383                pn = nd;
 384                prev = vma->vm_start;
 385                pend = vma->vm_end;
 386        }
 387        j = 0;
 388        for (nd = pn; nd; nd = rb_prev(nd))
 389                j++;
 390        if (i != j) {
 391                printk("backwards %d, forwards %d\n", j, i);
 392                bug = 1;
 393        }
 394        return bug ? -1 : i;
 395}
 396
 397static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
 398{
 399        struct rb_node *nd;
 400
 401        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
 402                struct vm_area_struct *vma;
 403                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
 404                BUG_ON(vma != ignore &&
 405                       vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
 406        }
 407}
 408
 409void validate_mm(struct mm_struct *mm)
 410{
 411        int bug = 0;
 412        int i = 0;
 413        unsigned long highest_address = 0;
 414        struct vm_area_struct *vma = mm->mmap;
 415        while (vma) {
 416                struct anon_vma_chain *avc;
 417                vma_lock_anon_vma(vma);
 418                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 419                        anon_vma_interval_tree_verify(avc);
 420                vma_unlock_anon_vma(vma);
 421                highest_address = vma->vm_end;
 422                vma = vma->vm_next;
 423                i++;
 424        }
 425        if (i != mm->map_count) {
 426                printk("map_count %d vm_next %d\n", mm->map_count, i);
 427                bug = 1;
 428        }
 429        if (highest_address != mm->highest_vm_end) {
 430                printk("mm->highest_vm_end %lx, found %lx\n",
 431                       mm->highest_vm_end, highest_address);
 432                bug = 1;
 433        }
 434        i = browse_rb(&mm->mm_rb);
 435        if (i != mm->map_count) {
 436                printk("map_count %d rb %d\n", mm->map_count, i);
 437                bug = 1;
 438        }
 439        BUG_ON(bug);
 440}
 441#else
 442#define validate_mm_rb(root, ignore) do { } while (0)
 443#define validate_mm(mm) do { } while (0)
 444#endif
 445
 446RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
 447                     unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
 448
 449/*
 450 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
 451 * vma->vm_prev->vm_end values changed, without modifying the vma's position
 452 * in the rbtree.
 453 */
 454static void vma_gap_update(struct vm_area_struct *vma)
 455{
 456        /*
 457         * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
 458         * function that does exacltly what we want.
 459         */
 460        vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
 461}
 462
 463static inline void vma_rb_insert(struct vm_area_struct *vma,
 464                                 struct rb_root *root)
 465{
 466        /* All rb_subtree_gap values must be consistent prior to insertion */
 467        validate_mm_rb(root, NULL);
 468
 469        rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
 470}
 471
 472static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
 473{
 474        /*
 475         * All rb_subtree_gap values must be consistent prior to erase,
 476         * with the possible exception of the vma being erased.
 477         */
 478        validate_mm_rb(root, vma);
 479
 480        /*
 481         * Note rb_erase_augmented is a fairly large inline function,
 482         * so make sure we instantiate it only once with our desired
 483         * augmented rbtree callbacks.
 484         */
 485        rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
 486}
 487
 488/*
 489 * vma has some anon_vma assigned, and is already inserted on that
 490 * anon_vma's interval trees.
 491 *
 492 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
 493 * vma must be removed from the anon_vma's interval trees using
 494 * anon_vma_interval_tree_pre_update_vma().
 495 *
 496 * After the update, the vma will be reinserted using
 497 * anon_vma_interval_tree_post_update_vma().
 498 *
 499 * The entire update must be protected by exclusive mmap_sem and by
 500 * the root anon_vma's mutex.
 501 */
 502static inline void
 503anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
 504{
 505        struct anon_vma_chain *avc;
 506
 507        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 508                anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
 509}
 510
 511static inline void
 512anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
 513{
 514        struct anon_vma_chain *avc;
 515
 516        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
 517                anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
 518}
 519
 520static int find_vma_links(struct mm_struct *mm, unsigned long addr,
 521                unsigned long end, struct vm_area_struct **pprev,
 522                struct rb_node ***rb_link, struct rb_node **rb_parent)
 523{
 524        struct rb_node **__rb_link, *__rb_parent, *rb_prev;
 525
 526        __rb_link = &mm->mm_rb.rb_node;
 527        rb_prev = __rb_parent = NULL;
 528
 529        while (*__rb_link) {
 530                struct vm_area_struct *vma_tmp;
 531
 532                __rb_parent = *__rb_link;
 533                vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
 534
 535                if (vma_tmp->vm_end > addr) {
 536                        /* Fail if an existing vma overlaps the area */
 537                        if (vma_tmp->vm_start < end)
 538                                return -ENOMEM;
 539                        __rb_link = &__rb_parent->rb_left;
 540                } else {
 541                        rb_prev = __rb_parent;
 542                        __rb_link = &__rb_parent->rb_right;
 543                }
 544        }
 545
 546        *pprev = NULL;
 547        if (rb_prev)
 548                *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 549        *rb_link = __rb_link;
 550        *rb_parent = __rb_parent;
 551        return 0;
 552}
 553
 554static unsigned long count_vma_pages_range(struct mm_struct *mm,
 555                unsigned long addr, unsigned long end)
 556{
 557        unsigned long nr_pages = 0;
 558        struct vm_area_struct *vma;
 559
 560        /* Find first overlaping mapping */
 561        vma = find_vma_intersection(mm, addr, end);
 562        if (!vma)
 563                return 0;
 564
 565        nr_pages = (min(end, vma->vm_end) -
 566                max(addr, vma->vm_start)) >> PAGE_SHIFT;
 567
 568        /* Iterate over the rest of the overlaps */
 569        for (vma = vma->vm_next; vma; vma = vma->vm_next) {
 570                unsigned long overlap_len;
 571
 572                if (vma->vm_start > end)
 573                        break;
 574
 575                overlap_len = min(end, vma->vm_end) - vma->vm_start;
 576                nr_pages += overlap_len >> PAGE_SHIFT;
 577        }
 578
 579        return nr_pages;
 580}
 581
 582void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
 583                struct rb_node **rb_link, struct rb_node *rb_parent)
 584{
 585        /* Update tracking information for the gap following the new vma. */
 586        if (vma->vm_next)
 587                vma_gap_update(vma->vm_next);
 588        else
 589                mm->highest_vm_end = vma->vm_end;
 590
 591        /*
 592         * vma->vm_prev wasn't known when we followed the rbtree to find the
 593         * correct insertion point for that vma. As a result, we could not
 594         * update the vma vm_rb parents rb_subtree_gap values on the way down.
 595         * So, we first insert the vma with a zero rb_subtree_gap value
 596         * (to be consistent with what we did on the way down), and then
 597         * immediately update the gap to the correct value. Finally we
 598         * rebalance the rbtree after all augmented values have been set.
 599         */
 600        rb_link_node(&vma->vm_rb, rb_parent, rb_link);
 601        vma->rb_subtree_gap = 0;
 602        vma_gap_update(vma);
 603        vma_rb_insert(vma, &mm->mm_rb);
 604}
 605
 606static void __vma_link_file(struct vm_area_struct *vma)
 607{
 608        struct file *file;
 609
 610        file = vma->vm_file;
 611        if (file) {
 612                struct address_space *mapping = file->f_mapping;
 613
 614                if (vma->vm_flags & VM_DENYWRITE)
 615                        atomic_dec(&file_inode(file)->i_writecount);
 616                if (vma->vm_flags & VM_SHARED)
 617                        mapping->i_mmap_writable++;
 618
 619                flush_dcache_mmap_lock(mapping);
 620                if (unlikely(vma->vm_flags & VM_NONLINEAR))
 621                        vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
 622                else
 623                        vma_interval_tree_insert(vma, &mapping->i_mmap);
 624                flush_dcache_mmap_unlock(mapping);
 625        }
 626}
 627
 628static void
 629__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 630        struct vm_area_struct *prev, struct rb_node **rb_link,
 631        struct rb_node *rb_parent)
 632{
 633        __vma_link_list(mm, vma, prev, rb_parent);
 634        __vma_link_rb(mm, vma, rb_link, rb_parent);
 635}
 636
 637static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 638                        struct vm_area_struct *prev, struct rb_node **rb_link,
 639                        struct rb_node *rb_parent)
 640{
 641        struct address_space *mapping = NULL;
 642
 643        if (vma->vm_file)
 644                mapping = vma->vm_file->f_mapping;
 645
 646        if (mapping)
 647                mutex_lock(&mapping->i_mmap_mutex);
 648
 649        __vma_link(mm, vma, prev, rb_link, rb_parent);
 650        __vma_link_file(vma);
 651
 652        if (mapping)
 653                mutex_unlock(&mapping->i_mmap_mutex);
 654
 655        mm->map_count++;
 656        validate_mm(mm);
 657}
 658
 659/*
 660 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
 661 * mm's list and rbtree.  It has already been inserted into the interval tree.
 662 */
 663static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 664{
 665        struct vm_area_struct *prev;
 666        struct rb_node **rb_link, *rb_parent;
 667
 668        if (find_vma_links(mm, vma->vm_start, vma->vm_end,
 669                           &prev, &rb_link, &rb_parent))
 670                BUG();
 671        __vma_link(mm, vma, prev, rb_link, rb_parent);
 672        mm->map_count++;
 673}
 674
 675static inline void
 676__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
 677                struct vm_area_struct *prev)
 678{
 679        struct vm_area_struct *next;
 680
 681        vma_rb_erase(vma, &mm->mm_rb);
 682        prev->vm_next = next = vma->vm_next;
 683        if (next)
 684                next->vm_prev = prev;
 685        if (mm->mmap_cache == vma)
 686                mm->mmap_cache = prev;
 687}
 688
 689/*
 690 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
 691 * is already present in an i_mmap tree without adjusting the tree.
 692 * The following helper function should be used when such adjustments
 693 * are necessary.  The "insert" vma (if any) is to be inserted
 694 * before we drop the necessary locks.
 695 */
 696int vma_adjust(struct vm_area_struct *vma, unsigned long start,
 697        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
 698{
 699        struct mm_struct *mm = vma->vm_mm;
 700        struct vm_area_struct *next = vma->vm_next;
 701        struct vm_area_struct *importer = NULL;
 702        struct address_space *mapping = NULL;
 703        struct rb_root *root = NULL;
 704        struct anon_vma *anon_vma = NULL;
 705        struct file *file = vma->vm_file;
 706        bool start_changed = false, end_changed = false;
 707        long adjust_next = 0;
 708        int remove_next = 0;
 709
 710        if (next && !insert) {
 711                struct vm_area_struct *exporter = NULL;
 712
 713                if (end >= next->vm_end) {
 714                        /*
 715                         * vma expands, overlapping all the next, and
 716                         * perhaps the one after too (mprotect case 6).
 717                         */
 718again:                  remove_next = 1 + (end > next->vm_end);
 719                        end = next->vm_end;
 720                        exporter = next;
 721                        importer = vma;
 722                } else if (end > next->vm_start) {
 723                        /*
 724                         * vma expands, overlapping part of the next:
 725                         * mprotect case 5 shifting the boundary up.
 726                         */
 727                        adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
 728                        exporter = next;
 729                        importer = vma;
 730                } else if (end < vma->vm_end) {
 731                        /*
 732                         * vma shrinks, and !insert tells it's not
 733                         * split_vma inserting another: so it must be
 734                         * mprotect case 4 shifting the boundary down.
 735                         */
 736                        adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
 737                        exporter = vma;
 738                        importer = next;
 739                }
 740
 741                /*
 742                 * Easily overlooked: when mprotect shifts the boundary,
 743                 * make sure the expanding vma has anon_vma set if the
 744                 * shrinking vma had, to cover any anon pages imported.
 745                 */
 746                if (exporter && exporter->anon_vma && !importer->anon_vma) {
 747                        if (anon_vma_clone(importer, exporter))
 748                                return -ENOMEM;
 749                        importer->anon_vma = exporter->anon_vma;
 750                }
 751        }
 752
 753        if (file) {
 754                mapping = file->f_mapping;
 755                if (!(vma->vm_flags & VM_NONLINEAR)) {
 756                        root = &mapping->i_mmap;
 757                        uprobe_munmap(vma, vma->vm_start, vma->vm_end);
 758
 759                        if (adjust_next)
 760                                uprobe_munmap(next, next->vm_start,
 761                                                        next->vm_end);
 762                }
 763
 764                mutex_lock(&mapping->i_mmap_mutex);
 765                if (insert) {
 766                        /*
 767                         * Put into interval tree now, so instantiated pages
 768                         * are visible to arm/parisc __flush_dcache_page
 769                         * throughout; but we cannot insert into address
 770                         * space until vma start or end is updated.
 771                         */
 772                        __vma_link_file(insert);
 773                }
 774        }
 775
 776        vma_adjust_trans_huge(vma, start, end, adjust_next);
 777
 778        anon_vma = vma->anon_vma;
 779        if (!anon_vma && adjust_next)
 780                anon_vma = next->anon_vma;
 781        if (anon_vma) {
 782                VM_BUG_ON(adjust_next && next->anon_vma &&
 783                          anon_vma != next->anon_vma);
 784                anon_vma_lock_write(anon_vma);
 785                anon_vma_interval_tree_pre_update_vma(vma);
 786                if (adjust_next)
 787                        anon_vma_interval_tree_pre_update_vma(next);
 788        }
 789
 790        if (root) {
 791                flush_dcache_mmap_lock(mapping);
 792                vma_interval_tree_remove(vma, root);
 793                if (adjust_next)
 794                        vma_interval_tree_remove(next, root);
 795        }
 796
 797        if (start != vma->vm_start) {
 798                vma->vm_start = start;
 799                start_changed = true;
 800        }
 801        if (end != vma->vm_end) {
 802                vma->vm_end = end;
 803                end_changed = true;
 804        }
 805        vma->vm_pgoff = pgoff;
 806        if (adjust_next) {
 807                next->vm_start += adjust_next << PAGE_SHIFT;
 808                next->vm_pgoff += adjust_next;
 809        }
 810
 811        if (root) {
 812                if (adjust_next)
 813                        vma_interval_tree_insert(next, root);
 814                vma_interval_tree_insert(vma, root);
 815                flush_dcache_mmap_unlock(mapping);
 816        }
 817
 818        if (remove_next) {
 819                /*
 820                 * vma_merge has merged next into vma, and needs
 821                 * us to remove next before dropping the locks.
 822                 */
 823                __vma_unlink(mm, next, vma);
 824                if (file)
 825                        __remove_shared_vm_struct(next, file, mapping);
 826        } else if (insert) {
 827                /*
 828                 * split_vma has split insert from vma, and needs
 829                 * us to insert it before dropping the locks
 830                 * (it may either follow vma or precede it).
 831                 */
 832                __insert_vm_struct(mm, insert);
 833        } else {
 834                if (start_changed)
 835                        vma_gap_update(vma);
 836                if (end_changed) {
 837                        if (!next)
 838                                mm->highest_vm_end = end;
 839                        else if (!adjust_next)
 840                                vma_gap_update(next);
 841                }
 842        }
 843
 844        if (anon_vma) {
 845                anon_vma_interval_tree_post_update_vma(vma);
 846                if (adjust_next)
 847                        anon_vma_interval_tree_post_update_vma(next);
 848                anon_vma_unlock_write(anon_vma);
 849        }
 850        if (mapping)
 851                mutex_unlock(&mapping->i_mmap_mutex);
 852
 853        if (root) {
 854                uprobe_mmap(vma);
 855
 856                if (adjust_next)
 857                        uprobe_mmap(next);
 858        }
 859
 860        if (remove_next) {
 861                if (file) {
 862                        uprobe_munmap(next, next->vm_start, next->vm_end);
 863                        fput(file);
 864                }
 865                if (next->anon_vma)
 866                        anon_vma_merge(vma, next);
 867                mm->map_count--;
 868                vma_set_policy(vma, vma_policy(next));
 869                kmem_cache_free(vm_area_cachep, next);
 870                /*
 871                 * In mprotect's case 6 (see comments on vma_merge),
 872                 * we must remove another next too. It would clutter
 873                 * up the code too much to do both in one go.
 874                 */
 875                next = vma->vm_next;
 876                if (remove_next == 2)
 877                        goto again;
 878                else if (next)
 879                        vma_gap_update(next);
 880                else
 881                        mm->highest_vm_end = end;
 882        }
 883        if (insert && file)
 884                uprobe_mmap(insert);
 885
 886        validate_mm(mm);
 887
 888        return 0;
 889}
 890
 891/*
 892 * If the vma has a ->close operation then the driver probably needs to release
 893 * per-vma resources, so we don't attempt to merge those.
 894 */
 895static inline int is_mergeable_vma(struct vm_area_struct *vma,
 896                        struct file *file, unsigned long vm_flags)
 897{
 898        if (vma->vm_flags ^ vm_flags)
 899                return 0;
 900        if (vma->vm_file != file)
 901                return 0;
 902        if (vma->vm_ops && vma->vm_ops->close)
 903                return 0;
 904        return 1;
 905}
 906
 907static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
 908                                        struct anon_vma *anon_vma2,
 909                                        struct vm_area_struct *vma)
 910{
 911        /*
 912         * The list_is_singular() test is to avoid merging VMA cloned from
 913         * parents. This can improve scalability caused by anon_vma lock.
 914         */
 915        if ((!anon_vma1 || !anon_vma2) && (!vma ||
 916                list_is_singular(&vma->anon_vma_chain)))
 917                return 1;
 918        return anon_vma1 == anon_vma2;
 919}
 920
 921/*
 922 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
 923 * in front of (at a lower virtual address and file offset than) the vma.
 924 *
 925 * We cannot merge two vmas if they have differently assigned (non-NULL)
 926 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
 927 *
 928 * We don't check here for the merged mmap wrapping around the end of pagecache
 929 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
 930 * wrap, nor mmaps which cover the final page at index -1UL.
 931 */
 932static int
 933can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 934        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 935{
 936        if (is_mergeable_vma(vma, file, vm_flags) &&
 937            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 938                if (vma->vm_pgoff == vm_pgoff)
 939                        return 1;
 940        }
 941        return 0;
 942}
 943
 944/*
 945 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
 946 * beyond (at a higher virtual address and file offset than) the vma.
 947 *
 948 * We cannot merge two vmas if they have differently assigned (non-NULL)
 949 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
 950 */
 951static int
 952can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 953        struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 954{
 955        if (is_mergeable_vma(vma, file, vm_flags) &&
 956            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 957                pgoff_t vm_pglen;
 958                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 959                if (vma->vm_pgoff + vm_pglen == vm_pgoff)
 960                        return 1;
 961        }
 962        return 0;
 963}
 964
 965/*
 966 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
 967 * whether that can be merged with its predecessor or its successor.
 968 * Or both (it neatly fills a hole).
 969 *
 970 * In most cases - when called for mmap, brk or mremap - [addr,end) is
 971 * certain not to be mapped by the time vma_merge is called; but when
 972 * called for mprotect, it is certain to be already mapped (either at
 973 * an offset within prev, or at the start of next), and the flags of
 974 * this area are about to be changed to vm_flags - and the no-change
 975 * case has already been eliminated.
 976 *
 977 * The following mprotect cases have to be considered, where AAAA is
 978 * the area passed down from mprotect_fixup, never extending beyond one
 979 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
 980 *
 981 *     AAAA             AAAA                AAAA          AAAA
 982 *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
 983 *    cannot merge    might become    might become    might become
 984 *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
 985 *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
 986 *    mremap move:                                    PPPPNNNNNNNN 8
 987 *        AAAA
 988 *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
 989 *    might become    case 1 below    case 2 below    case 3 below
 990 *
 991 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
 992 * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
 993 */
 994struct vm_area_struct *vma_merge(struct mm_struct *mm,
 995                        struct vm_area_struct *prev, unsigned long addr,
 996                        unsigned long end, unsigned long vm_flags,
 997                        struct anon_vma *anon_vma, struct file *file,
 998                        pgoff_t pgoff, struct mempolicy *policy)
 999{
1000        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1001        struct vm_area_struct *area, *next;
1002        int err;
1003
1004        /*
1005         * We later require that vma->vm_flags == vm_flags,
1006         * so this tests vma->vm_flags & VM_SPECIAL, too.
1007         */
1008        if (vm_flags & VM_SPECIAL)
1009                return NULL;
1010
1011        if (prev)
1012                next = prev->vm_next;
1013        else
1014                next = mm->mmap;
1015        area = next;
1016        if (next && next->vm_end == end)                /* cases 6, 7, 8 */
1017                next = next->vm_next;
1018
1019        /*
1020         * Can it merge with the predecessor?
1021         */
1022        if (prev && prev->vm_end == addr &&
1023                        mpol_equal(vma_policy(prev), policy) &&
1024                        can_vma_merge_after(prev, vm_flags,
1025                                                anon_vma, file, pgoff)) {
1026                /*
1027                 * OK, it can.  Can we now merge in the successor as well?
1028                 */
1029                if (next && end == next->vm_start &&
1030                                mpol_equal(policy, vma_policy(next)) &&
1031                                can_vma_merge_before(next, vm_flags,
1032                                        anon_vma, file, pgoff+pglen) &&
1033                                is_mergeable_anon_vma(prev->anon_vma,
1034                                                      next->anon_vma, NULL)) {
1035                                                        /* cases 1, 6 */
1036                        err = vma_adjust(prev, prev->vm_start,
1037                                next->vm_end, prev->vm_pgoff, NULL);
1038                } else                                  /* cases 2, 5, 7 */
1039                        err = vma_adjust(prev, prev->vm_start,
1040                                end, prev->vm_pgoff, NULL);
1041                if (err)
1042                        return NULL;
1043                khugepaged_enter_vma_merge(prev);
1044                return prev;
1045        }
1046
1047        /*
1048         * Can this new request be merged in front of next?
1049         */
1050        if (next && end == next->vm_start &&
1051                        mpol_equal(policy, vma_policy(next)) &&
1052                        can_vma_merge_before(next, vm_flags,
1053                                        anon_vma, file, pgoff+pglen)) {
1054                if (prev && addr < prev->vm_end)        /* case 4 */
1055                        err = vma_adjust(prev, prev->vm_start,
1056                                addr, prev->vm_pgoff, NULL);
1057                else                                    /* cases 3, 8 */
1058                        err = vma_adjust(area, addr, next->vm_end,
1059                                next->vm_pgoff - pglen, NULL);
1060                if (err)
1061                        return NULL;
1062                khugepaged_enter_vma_merge(area);
1063                return area;
1064        }
1065
1066        return NULL;
1067}
1068
1069/*
1070 * Rough compatbility check to quickly see if it's even worth looking
1071 * at sharing an anon_vma.
1072 *
1073 * They need to have the same vm_file, and the flags can only differ
1074 * in things that mprotect may change.
1075 *
1076 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1077 * we can merge the two vma's. For example, we refuse to merge a vma if
1078 * there is a vm_ops->close() function, because that indicates that the
1079 * driver is doing some kind of reference counting. But that doesn't
1080 * really matter for the anon_vma sharing case.
1081 */
1082static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1083{
1084        return a->vm_end == b->vm_start &&
1085                mpol_equal(vma_policy(a), vma_policy(b)) &&
1086                a->vm_file == b->vm_file &&
1087                !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
1088                b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1089}
1090
1091/*
1092 * Do some basic sanity checking to see if we can re-use the anon_vma
1093 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1094 * the same as 'old', the other will be the new one that is trying
1095 * to share the anon_vma.
1096 *
1097 * NOTE! This runs with mm_sem held for reading, so it is possible that
1098 * the anon_vma of 'old' is concurrently in the process of being set up
1099 * by another page fault trying to merge _that_. But that's ok: if it
1100 * is being set up, that automatically means that it will be a singleton
1101 * acceptable for merging, so we can do all of this optimistically. But
1102 * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1103 *
1104 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1105 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1106 * is to return an anon_vma that is "complex" due to having gone through
1107 * a fork).
1108 *
1109 * We also make sure that the two vma's are compatible (adjacent,
1110 * and with the same memory policies). That's all stable, even with just
1111 * a read lock on the mm_sem.
1112 */
1113static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1114{
1115        if (anon_vma_compatible(a, b)) {
1116                struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1117
1118                if (anon_vma && list_is_singular(&old->anon_vma_chain))
1119                        return anon_vma;
1120        }
1121        return NULL;
1122}
1123
1124/*
1125 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1126 * neighbouring vmas for a suitable anon_vma, before it goes off
1127 * to allocate a new anon_vma.  It checks because a repetitive
1128 * sequence of mprotects and faults may otherwise lead to distinct
1129 * anon_vmas being allocated, preventing vma merge in subsequent
1130 * mprotect.
1131 */
1132struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1133{
1134        struct anon_vma *anon_vma;
1135        struct vm_area_struct *near;
1136
1137        near = vma->vm_next;
1138        if (!near)
1139                goto try_prev;
1140
1141        anon_vma = reusable_anon_vma(near, vma, near);
1142        if (anon_vma)
1143                return anon_vma;
1144try_prev:
1145        near = vma->vm_prev;
1146        if (!near)
1147                goto none;
1148
1149        anon_vma = reusable_anon_vma(near, near, vma);
1150        if (anon_vma)
1151                return anon_vma;
1152none:
1153        /*
1154         * There's no absolute need to look only at touching neighbours:
1155         * we could search further afield for "compatible" anon_vmas.
1156         * But it would probably just be a waste of time searching,
1157         * or lead to too many vmas hanging off the same anon_vma.
1158         * We're trying to allow mprotect remerging later on,
1159         * not trying to minimize memory used for anon_vmas.
1160         */
1161        return NULL;
1162}
1163
1164#ifdef CONFIG_PROC_FS
1165void vm_stat_account(struct mm_struct *mm, unsigned long flags,
1166                                                struct file *file, long pages)
1167{
1168        const unsigned long stack_flags
1169                = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1170
1171        mm->total_vm += pages;
1172
1173        if (file) {
1174                mm->shared_vm += pages;
1175                if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
1176                        mm->exec_vm += pages;
1177        } else if (flags & stack_flags)
1178                mm->stack_vm += pages;
1179}
1180#endif /* CONFIG_PROC_FS */
1181
1182/*
1183 * If a hint addr is less than mmap_min_addr change hint to be as
1184 * low as possible but still greater than mmap_min_addr
1185 */
1186static inline unsigned long round_hint_to_min(unsigned long hint)
1187{
1188        hint &= PAGE_MASK;
1189        if (((void *)hint != NULL) &&
1190            (hint < mmap_min_addr))
1191                return PAGE_ALIGN(mmap_min_addr);
1192        return hint;
1193}
1194
1195/*
1196 * The caller must hold down_write(&current->mm->mmap_sem).
1197 */
1198
1199unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1200                        unsigned long len, unsigned long prot,
1201                        unsigned long flags, unsigned long pgoff,
1202                        unsigned long *populate)
1203{
1204        struct mm_struct * mm = current->mm;
1205        struct inode *inode;
1206        vm_flags_t vm_flags;
1207
1208        *populate = 0;
1209
1210        /*
1211         * Does the application expect PROT_READ to imply PROT_EXEC?
1212         *
1213         * (the exception is when the underlying filesystem is noexec
1214         *  mounted, in which case we dont add PROT_EXEC.)
1215         */
1216        if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1217                if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
1218                        prot |= PROT_EXEC;
1219
1220        if (!len)
1221                return -EINVAL;
1222
1223        if (!(flags & MAP_FIXED))
1224                addr = round_hint_to_min(addr);
1225
1226        /* Careful about overflows.. */
1227        len = PAGE_ALIGN(len);
1228        if (!len)
1229                return -ENOMEM;
1230
1231        /* offset overflow? */
1232        if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1233               return -EOVERFLOW;
1234
1235        /* Too many mappings? */
1236        if (mm->map_count > sysctl_max_map_count)
1237                return -ENOMEM;
1238
1239        /* Obtain the address to map to. we verify (or select) it and ensure
1240         * that it represents a valid section of the address space.
1241         */
1242        addr = get_unmapped_area(file, addr, len, pgoff, flags);
1243        if (addr & ~PAGE_MASK)
1244                return addr;
1245
1246        /* Do simple checking here so the lower-level routines won't have
1247         * to. we assume access permissions have been handled by the open
1248         * of the memory object, so we don't do any here.
1249         */
1250        vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1251                        mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1252
1253        if (flags & MAP_LOCKED)
1254                if (!can_do_mlock())
1255                        return -EPERM;
1256
1257        /* mlock MCL_FUTURE? */
1258        if (vm_flags & VM_LOCKED) {
1259                unsigned long locked, lock_limit;
1260                locked = len >> PAGE_SHIFT;
1261                locked += mm->locked_vm;
1262                lock_limit = rlimit(RLIMIT_MEMLOCK);
1263                lock_limit >>= PAGE_SHIFT;
1264                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1265                        return -EAGAIN;
1266        }
1267
1268        inode = file ? file_inode(file) : NULL;
1269
1270        if (file) {
1271                switch (flags & MAP_TYPE) {
1272                case MAP_SHARED:
1273                        if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1274                                return -EACCES;
1275
1276                        /*
1277                         * Make sure we don't allow writing to an append-only
1278                         * file..
1279                         */
1280                        if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1281                                return -EACCES;
1282
1283                        /*
1284                         * Make sure there are no mandatory locks on the file.
1285                         */
1286                        if (locks_verify_locked(inode))
1287                                return -EAGAIN;
1288
1289                        vm_flags |= VM_SHARED | VM_MAYSHARE;
1290                        if (!(file->f_mode & FMODE_WRITE))
1291                                vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1292
1293                        /* fall through */
1294                case MAP_PRIVATE:
1295                        if (!(file->f_mode & FMODE_READ))
1296                                return -EACCES;
1297                        if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1298                                if (vm_flags & VM_EXEC)
1299                                        return -EPERM;
1300                                vm_flags &= ~VM_MAYEXEC;
1301                        }
1302
1303                        if (!file->f_op || !file->f_op->mmap)
1304                                return -ENODEV;
1305                        break;
1306
1307                default:
1308                        return -EINVAL;
1309                }
1310        } else {
1311                switch (flags & MAP_TYPE) {
1312                case MAP_SHARED:
1313                        /*
1314                         * Ignore pgoff.
1315                         */
1316                        pgoff = 0;
1317                        vm_flags |= VM_SHARED | VM_MAYSHARE;
1318                        break;
1319                case MAP_PRIVATE:
1320                        /*
1321                         * Set pgoff according to addr for anon_vma.
1322                         */
1323                        pgoff = addr >> PAGE_SHIFT;
1324                        break;
1325                default:
1326                        return -EINVAL;
1327                }
1328        }
1329
1330        /*
1331         * Set 'VM_NORESERVE' if we should not account for the
1332         * memory use of this mapping.
1333         */
1334        if (flags & MAP_NORESERVE) {
1335                /* We honor MAP_NORESERVE if allowed to overcommit */
1336                if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1337                        vm_flags |= VM_NORESERVE;
1338
1339                /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1340                if (file && is_file_hugepages(file))
1341                        vm_flags |= VM_NORESERVE;
1342        }
1343
1344        addr = mmap_region(file, addr, len, vm_flags, pgoff);
1345        if (!IS_ERR_VALUE(addr) &&
1346            ((vm_flags & VM_LOCKED) ||
1347             (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1348                *populate = len;
1349        return addr;
1350}
1351
1352SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1353                unsigned long, prot, unsigned long, flags,
1354                unsigned long, fd, unsigned long, pgoff)
1355{
1356        struct file *file = NULL;
1357        unsigned long retval = -EBADF;
1358
1359        if (!(flags & MAP_ANONYMOUS)) {
1360                audit_mmap_fd(fd, flags);
1361                if (unlikely(flags & MAP_HUGETLB))
1362                        return -EINVAL;
1363                file = fget(fd);
1364                if (!file)
1365                        goto out;
1366                if (is_file_hugepages(file))
1367                        len = ALIGN(len, huge_page_size(hstate_file(file)));
1368        } else if (flags & MAP_HUGETLB) {
1369                struct user_struct *user = NULL;
1370                struct hstate *hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) &
1371                                                   SHM_HUGE_MASK);
1372
1373                if (!hs)
1374                        return -EINVAL;
1375
1376                len = ALIGN(len, huge_page_size(hs));
1377                /*
1378                 * VM_NORESERVE is used because the reservations will be
1379                 * taken when vm_ops->mmap() is called
1380                 * A dummy user value is used because we are not locking
1381                 * memory so no accounting is necessary
1382                 */
1383                file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1384                                VM_NORESERVE,
1385                                &user, HUGETLB_ANONHUGE_INODE,
1386                                (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1387                if (IS_ERR(file))
1388                        return PTR_ERR(file);
1389        }
1390
1391        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1392
1393        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1394        if (file)
1395                fput(file);
1396out:
1397        return retval;
1398}
1399
1400#ifdef __ARCH_WANT_SYS_OLD_MMAP
1401struct mmap_arg_struct {
1402        unsigned long addr;
1403        unsigned long len;
1404        unsigned long prot;
1405        unsigned long flags;
1406        unsigned long fd;
1407        unsigned long offset;
1408};
1409
1410SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1411{
1412        struct mmap_arg_struct a;
1413
1414        if (copy_from_user(&a, arg, sizeof(a)))
1415                return -EFAULT;
1416        if (a.offset & ~PAGE_MASK)
1417                return -EINVAL;
1418
1419        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1420                              a.offset >> PAGE_SHIFT);
1421}
1422#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1423
1424/*
1425 * Some shared mappigns will want the pages marked read-only
1426 * to track write events. If so, we'll downgrade vm_page_prot
1427 * to the private version (using protection_map[] without the
1428 * VM_SHARED bit).
1429 */
1430int vma_wants_writenotify(struct vm_area_struct *vma)
1431{
1432        vm_flags_t vm_flags = vma->vm_flags;
1433
1434        /* If it was private or non-writable, the write bit is already clear */
1435        if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1436                return 0;
1437
1438        /* The backer wishes to know when pages are first written to? */
1439        if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1440                return 1;
1441
1442        /* The open routine did something to the protections already? */
1443        if (pgprot_val(vma->vm_page_prot) !=
1444            pgprot_val(vm_get_page_prot(vm_flags)))
1445                return 0;
1446
1447        /* Specialty mapping? */
1448        if (vm_flags & VM_PFNMAP)
1449                return 0;
1450
1451        /* Can the mapping track the dirty pages? */
1452        return vma->vm_file && vma->vm_file->f_mapping &&
1453                mapping_cap_account_dirty(vma->vm_file->f_mapping);
1454}
1455
1456/*
1457 * We account for memory if it's a private writeable mapping,
1458 * not hugepages and VM_NORESERVE wasn't set.
1459 */
1460static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1461{
1462        /*
1463         * hugetlb has its own accounting separate from the core VM
1464         * VM_HUGETLB may not be set yet so we cannot check for that flag.
1465         */
1466        if (file && is_file_hugepages(file))
1467                return 0;
1468
1469        return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1470}
1471
1472unsigned long mmap_region(struct file *file, unsigned long addr,
1473                unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
1474{
1475        struct mm_struct *mm = current->mm;
1476        struct vm_area_struct *vma, *prev;
1477        int correct_wcount = 0;
1478        int error;
1479        struct rb_node **rb_link, *rb_parent;
1480        unsigned long charged = 0;
1481        struct inode *inode =  file ? file_inode(file) : NULL;
1482
1483        /* Check against address space limit. */
1484        if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
1485                unsigned long nr_pages;
1486
1487                /*
1488                 * MAP_FIXED may remove pages of mappings that intersects with
1489                 * requested mapping. Account for the pages it would unmap.
1490                 */
1491                if (!(vm_flags & MAP_FIXED))
1492                        return -ENOMEM;
1493
1494                nr_pages = count_vma_pages_range(mm, addr, addr + len);
1495
1496                if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
1497                        return -ENOMEM;
1498        }
1499
1500        /* Clear old maps */
1501        error = -ENOMEM;
1502munmap_back:
1503        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1504                if (do_munmap(mm, addr, len))
1505                        return -ENOMEM;
1506                goto munmap_back;
1507        }
1508
1509        /*
1510         * Private writable mapping: check memory availability
1511         */
1512        if (accountable_mapping(file, vm_flags)) {
1513                charged = len >> PAGE_SHIFT;
1514                if (security_vm_enough_memory_mm(mm, charged))
1515                        return -ENOMEM;
1516                vm_flags |= VM_ACCOUNT;
1517        }
1518
1519        /*
1520         * Can we just expand an old mapping?
1521         */
1522        vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1523        if (vma)
1524                goto out;
1525
1526        /*
1527         * Determine the object being mapped and call the appropriate
1528         * specific mapper. the address has already been validated, but
1529         * not unmapped, but the maps are removed from the list.
1530         */
1531        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1532        if (!vma) {
1533                error = -ENOMEM;
1534                goto unacct_error;
1535        }
1536
1537        vma->vm_mm = mm;
1538        vma->vm_start = addr;
1539        vma->vm_end = addr + len;
1540        vma->vm_flags = vm_flags;
1541        vma->vm_page_prot = vm_get_page_prot(vm_flags);
1542        vma->vm_pgoff = pgoff;
1543        INIT_LIST_HEAD(&vma->anon_vma_chain);
1544
1545        error = -EINVAL;        /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
1546
1547        if (file) {
1548                if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1549                        goto free_vma;
1550                if (vm_flags & VM_DENYWRITE) {
1551                        error = deny_write_access(file);
1552                        if (error)
1553                                goto free_vma;
1554                        correct_wcount = 1;
1555                }
1556                vma->vm_file = get_file(file);
1557                error = file->f_op->mmap(file, vma);
1558                if (error)
1559                        goto unmap_and_free_vma;
1560
1561                /* Can addr have changed??
1562                 *
1563                 * Answer: Yes, several device drivers can do it in their
1564                 *         f_op->mmap method. -DaveM
1565                 * Bug: If addr is changed, prev, rb_link, rb_parent should
1566                 *      be updated for vma_link()
1567                 */
1568                WARN_ON_ONCE(addr != vma->vm_start);
1569
1570                addr = vma->vm_start;
1571                pgoff = vma->vm_pgoff;
1572                vm_flags = vma->vm_flags;
1573        } else if (vm_flags & VM_SHARED) {
1574                if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
1575                        goto free_vma;
1576                error = shmem_zero_setup(vma);
1577                if (error)
1578                        goto free_vma;
1579        }
1580
1581        if (vma_wants_writenotify(vma)) {
1582                pgprot_t pprot = vma->vm_page_prot;
1583
1584                /* Can vma->vm_page_prot have changed??
1585                 *
1586                 * Answer: Yes, drivers may have changed it in their
1587                 *         f_op->mmap method.
1588                 *
1589                 * Ensures that vmas marked as uncached stay that way.
1590                 */
1591                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1592                if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1593                        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1594        }
1595
1596        vma_link(mm, vma, prev, rb_link, rb_parent);
1597        file = vma->vm_file;
1598
1599        /* Once vma denies write, undo our temporary denial count */
1600        if (correct_wcount)
1601                atomic_inc(&inode->i_writecount);
1602out:
1603        perf_event_mmap(vma);
1604
1605        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1606        if (vm_flags & VM_LOCKED) {
1607                if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1608                                        vma == get_gate_vma(current->mm)))
1609                        mm->locked_vm += (len >> PAGE_SHIFT);
1610                else
1611                        vma->vm_flags &= ~VM_LOCKED;
1612        }
1613
1614        if (file)
1615                uprobe_mmap(vma);
1616
1617        return addr;
1618
1619unmap_and_free_vma:
1620        if (correct_wcount)
1621                atomic_inc(&inode->i_writecount);
1622        vma->vm_file = NULL;
1623        fput(file);
1624
1625        /* Undo any partial mapping done by a device driver. */
1626        unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1627        charged = 0;
1628free_vma:
1629        kmem_cache_free(vm_area_cachep, vma);
1630unacct_error:
1631        if (charged)
1632                vm_unacct_memory(charged);
1633        return error;
1634}
1635
1636unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1637{
1638        /*
1639         * We implement the search by looking for an rbtree node that
1640         * immediately follows a suitable gap. That is,
1641         * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1642         * - gap_end   = vma->vm_start        >= info->low_limit  + length;
1643         * - gap_end - gap_start >= length
1644         */
1645
1646        struct mm_struct *mm = current->mm;
1647        struct vm_area_struct *vma;
1648        unsigned long length, low_limit, high_limit, gap_start, gap_end;
1649
1650        /* Adjust search length to account for worst case alignment overhead */
1651        length = info->length + info->align_mask;
1652        if (length < info->length)
1653                return -ENOMEM;
1654
1655        /* Adjust search limits by the desired length */
1656        if (info->high_limit < length)
1657                return -ENOMEM;
1658        high_limit = info->high_limit - length;
1659
1660        if (info->low_limit > high_limit)
1661                return -ENOMEM;
1662        low_limit = info->low_limit + length;
1663
1664        /* Check if rbtree root looks promising */
1665        if (RB_EMPTY_ROOT(&mm->mm_rb))
1666                goto check_highest;
1667        vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1668        if (vma->rb_subtree_gap < length)
1669                goto check_highest;
1670
1671        while (true) {
1672                /* Visit left subtree if it looks promising */
1673                gap_end = vma->vm_start;
1674                if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1675                        struct vm_area_struct *left =
1676                                rb_entry(vma->vm_rb.rb_left,
1677                                         struct vm_area_struct, vm_rb);
1678                        if (left->rb_subtree_gap >= length) {
1679                                vma = left;
1680                                continue;
1681                        }
1682                }
1683
1684                gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1685check_current:
1686                /* Check if current node has a suitable gap */
1687                if (gap_start > high_limit)
1688                        return -ENOMEM;
1689                if (gap_end >= low_limit && gap_end - gap_start >= length)
1690                        goto found;
1691
1692                /* Visit right subtree if it looks promising */
1693                if (vma->vm_rb.rb_right) {
1694                        struct vm_area_struct *right =
1695                                rb_entry(vma->vm_rb.rb_right,
1696                                         struct vm_area_struct, vm_rb);
1697                        if (right->rb_subtree_gap >= length) {
1698                                vma = right;
1699                                continue;
1700                        }
1701                }
1702
1703                /* Go back up the rbtree to find next candidate node */
1704                while (true) {
1705                        struct rb_node *prev = &vma->vm_rb;
1706                        if (!rb_parent(prev))
1707                                goto check_highest;
1708                        vma = rb_entry(rb_parent(prev),
1709                                       struct vm_area_struct, vm_rb);
1710                        if (prev == vma->vm_rb.rb_left) {
1711                                gap_start = vma->vm_prev->vm_end;
1712                                gap_end = vma->vm_start;
1713                                goto check_current;
1714                        }
1715                }
1716        }
1717
1718check_highest:
1719        /* Check highest gap, which does not precede any rbtree node */
1720        gap_start = mm->highest_vm_end;
1721        gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
1722        if (gap_start > high_limit)
1723                return -ENOMEM;
1724
1725found:
1726        /* We found a suitable gap. Clip it with the original low_limit. */
1727        if (gap_start < info->low_limit)
1728                gap_start = info->low_limit;
1729
1730        /* Adjust gap address to the desired alignment */
1731        gap_start += (info->align_offset - gap_start) & info->align_mask;
1732
1733        VM_BUG_ON(gap_start + info->length > info->high_limit);
1734        VM_BUG_ON(gap_start + info->length > gap_end);
1735        return gap_start;
1736}
1737
1738unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1739{
1740        struct mm_struct *mm = current->mm;
1741        struct vm_area_struct *vma;
1742        unsigned long length, low_limit, high_limit, gap_start, gap_end;
1743
1744        /* Adjust search length to account for worst case alignment overhead */
1745        length = info->length + info->align_mask;
1746        if (length < info->length)
1747                return -ENOMEM;
1748
1749        /*
1750         * Adjust search limits by the desired length.
1751         * See implementation comment at top of unmapped_area().
1752         */
1753        gap_end = info->high_limit;
1754        if (gap_end < length)
1755                return -ENOMEM;
1756        high_limit = gap_end - length;
1757
1758        if (info->low_limit > high_limit)
1759                return -ENOMEM;
1760        low_limit = info->low_limit + length;
1761
1762        /* Check highest gap, which does not precede any rbtree node */
1763        gap_start = mm->highest_vm_end;
1764        if (gap_start <= high_limit)
1765                goto found_highest;
1766
1767        /* Check if rbtree root looks promising */
1768        if (RB_EMPTY_ROOT(&mm->mm_rb))
1769                return -ENOMEM;
1770        vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1771        if (vma->rb_subtree_gap < length)
1772                return -ENOMEM;
1773
1774        while (true) {
1775                /* Visit right subtree if it looks promising */
1776                gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1777                if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1778                        struct vm_area_struct *right =
1779                                rb_entry(vma->vm_rb.rb_right,
1780                                         struct vm_area_struct, vm_rb);
1781                        if (right->rb_subtree_gap >= length) {
1782                                vma = right;
1783                                continue;
1784                        }
1785                }
1786
1787check_current:
1788                /* Check if current node has a suitable gap */
1789                gap_end = vma->vm_start;
1790                if (gap_end < low_limit)
1791                        return -ENOMEM;
1792                if (gap_start <= high_limit && gap_end - gap_start >= length)
1793                        goto found;
1794
1795                /* Visit left subtree if it looks promising */
1796                if (vma->vm_rb.rb_left) {
1797                        struct vm_area_struct *left =
1798                                rb_entry(vma->vm_rb.rb_left,
1799                                         struct vm_area_struct, vm_rb);
1800                        if (left->rb_subtree_gap >= length) {
1801                                vma = left;
1802                                continue;
1803                        }
1804                }
1805
1806                /* Go back up the rbtree to find next candidate node */
1807                while (true) {
1808                        struct rb_node *prev = &vma->vm_rb;
1809                        if (!rb_parent(prev))
1810                                return -ENOMEM;
1811                        vma = rb_entry(rb_parent(prev),
1812                                       struct vm_area_struct, vm_rb);
1813                        if (prev == vma->vm_rb.rb_right) {
1814                                gap_start = vma->vm_prev ?
1815                                        vma->vm_prev->vm_end : 0;
1816                                goto check_current;
1817                        }
1818                }
1819        }
1820
1821found:
1822        /* We found a suitable gap. Clip it with the original high_limit. */
1823        if (gap_end > info->high_limit)
1824                gap_end = info->high_limit;
1825
1826found_highest:
1827        /* Compute highest gap address at the desired alignment */
1828        gap_end -= info->length;
1829        gap_end -= (gap_end - info->align_offset) & info->align_mask;
1830
1831        VM_BUG_ON(gap_end < info->low_limit);
1832        VM_BUG_ON(gap_end < gap_start);
1833        return gap_end;
1834}
1835
1836/* Get an address range which is currently unmapped.
1837 * For shmat() with addr=0.
1838 *
1839 * Ugly calling convention alert:
1840 * Return value with the low bits set means error value,
1841 * ie
1842 *      if (ret & ~PAGE_MASK)
1843 *              error = ret;
1844 *
1845 * This function "knows" that -ENOMEM has the bits set.
1846 */
1847#ifndef HAVE_ARCH_UNMAPPED_AREA
1848unsigned long
1849arch_get_unmapped_area(struct file *filp, unsigned long addr,
1850                unsigned long len, unsigned long pgoff, unsigned long flags)
1851{
1852        struct mm_struct *mm = current->mm;
1853        struct vm_area_struct *vma;
1854        struct vm_unmapped_area_info info;
1855
1856        if (len > TASK_SIZE)
1857                return -ENOMEM;
1858
1859        if (flags & MAP_FIXED)
1860                return addr;
1861
1862        if (addr) {
1863                addr = PAGE_ALIGN(addr);
1864                vma = find_vma(mm, addr);
1865                if (TASK_SIZE - len >= addr &&
1866                    (!vma || addr + len <= vma->vm_start))
1867                        return addr;
1868        }
1869
1870        info.flags = 0;
1871        info.length = len;
1872        info.low_limit = TASK_UNMAPPED_BASE;
1873        info.high_limit = TASK_SIZE;
1874        info.align_mask = 0;
1875        return vm_unmapped_area(&info);
1876}
1877#endif  
1878
1879void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1880{
1881        /*
1882         * Is this a new hole at the lowest possible address?
1883         */
1884        if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
1885                mm->free_area_cache = addr;
1886}
1887
1888/*
1889 * This mmap-allocator allocates new areas top-down from below the
1890 * stack's low limit (the base):
1891 */
1892#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1893unsigned long
1894arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1895                          const unsigned long len, const unsigned long pgoff,
1896                          const unsigned long flags)
1897{
1898        struct vm_area_struct *vma;
1899        struct mm_struct *mm = current->mm;
1900        unsigned long addr = addr0;
1901        struct vm_unmapped_area_info info;
1902
1903        /* requested length too big for entire address space */
1904        if (len > TASK_SIZE)
1905                return -ENOMEM;
1906
1907        if (flags & MAP_FIXED)
1908                return addr;
1909
1910        /* requesting a specific address */
1911        if (addr) {
1912                addr = PAGE_ALIGN(addr);
1913                vma = find_vma(mm, addr);
1914                if (TASK_SIZE - len >= addr &&
1915                                (!vma || addr + len <= vma->vm_start))
1916                        return addr;
1917        }
1918
1919        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1920        info.length = len;
1921        info.low_limit = PAGE_SIZE;
1922        info.high_limit = mm->mmap_base;
1923        info.align_mask = 0;
1924        addr = vm_unmapped_area(&info);
1925
1926        /*
1927         * A failed mmap() very likely causes application failure,
1928         * so fall back to the bottom-up function here. This scenario
1929         * can happen with large stack limits and large mmap()
1930         * allocations.
1931         */
1932        if (addr & ~PAGE_MASK) {
1933                VM_BUG_ON(addr != -ENOMEM);
1934                info.flags = 0;
1935                info.low_limit = TASK_UNMAPPED_BASE;
1936                info.high_limit = TASK_SIZE;
1937                addr = vm_unmapped_area(&info);
1938        }
1939
1940        return addr;
1941}
1942#endif
1943
1944void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
1945{
1946        /*
1947         * Is this a new hole at the highest possible address?
1948         */
1949        if (addr > mm->free_area_cache)
1950                mm->free_area_cache = addr;
1951
1952        /* dont allow allocations above current base */
1953        if (mm->free_area_cache > mm->mmap_base)
1954                mm->free_area_cache = mm->mmap_base;
1955}
1956
1957unsigned long
1958get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1959                unsigned long pgoff, unsigned long flags)
1960{
1961        unsigned long (*get_area)(struct file *, unsigned long,
1962                                  unsigned long, unsigned long, unsigned long);
1963
1964        unsigned long error = arch_mmap_check(addr, len, flags);
1965        if (error)
1966                return error;
1967
1968        /* Careful about overflows.. */
1969        if (len > TASK_SIZE)
1970                return -ENOMEM;
1971
1972        get_area = current->mm->get_unmapped_area;
1973        if (file && file->f_op && file->f_op->get_unmapped_area)
1974                get_area = file->f_op->get_unmapped_area;
1975        addr = get_area(file, addr, len, pgoff, flags);
1976        if (IS_ERR_VALUE(addr))
1977                return addr;
1978
1979        if (addr > TASK_SIZE - len)
1980                return -ENOMEM;
1981        if (addr & ~PAGE_MASK)
1982                return -EINVAL;
1983
1984        addr = arch_rebalance_pgtables(addr, len);
1985        error = security_mmap_addr(addr);
1986        return error ? error : addr;
1987}
1988
1989EXPORT_SYMBOL(get_unmapped_area);
1990
1991/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1992struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1993{
1994        struct vm_area_struct *vma = NULL;
1995
1996        /* Check the cache first. */
1997        /* (Cache hit rate is typically around 35%.) */
1998        vma = ACCESS_ONCE(mm->mmap_cache);
1999        if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
2000                struct rb_node *rb_node;
2001
2002                rb_node = mm->mm_rb.rb_node;
2003                vma = NULL;
2004
2005                while (rb_node) {
2006                        struct vm_area_struct *vma_tmp;
2007
2008                        vma_tmp = rb_entry(rb_node,
2009                                           struct vm_area_struct, vm_rb);
2010
2011                        if (vma_tmp->vm_end > addr) {
2012                                vma = vma_tmp;
2013                                if (vma_tmp->vm_start <= addr)
2014                                        break;
2015                                rb_node = rb_node->rb_left;
2016                        } else
2017                                rb_node = rb_node->rb_right;
2018                }
2019                if (vma)
2020                        mm->mmap_cache = vma;
2021        }
2022        return vma;
2023}
2024
2025EXPORT_SYMBOL(find_vma);
2026
2027/*
2028 * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2029 */
2030struct vm_area_struct *
2031find_vma_prev(struct mm_struct *mm, unsigned long addr,
2032                        struct vm_area_struct **pprev)
2033{
2034        struct vm_area_struct *vma;
2035
2036        vma = find_vma(mm, addr);
2037        if (vma) {
2038                *pprev = vma->vm_prev;
2039        } else {
2040                struct rb_node *rb_node = mm->mm_rb.rb_node;
2041                *pprev = NULL;
2042                while (rb_node) {
2043                        *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2044                        rb_node = rb_node->rb_right;
2045                }
2046        }
2047        return vma;
2048}
2049
2050/*
2051 * Verify that the stack growth is acceptable and
2052 * update accounting. This is shared with both the
2053 * grow-up and grow-down cases.
2054 */
2055static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
2056{
2057        struct mm_struct *mm = vma->vm_mm;
2058        struct rlimit *rlim = current->signal->rlim;
2059        unsigned long new_start;
2060
2061        /* address space limit tests */
2062        if (!may_expand_vm(mm, grow))
2063                return -ENOMEM;
2064
2065        /* Stack limit test */
2066        if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2067                return -ENOMEM;
2068
2069        /* mlock limit tests */
2070        if (vma->vm_flags & VM_LOCKED) {
2071                unsigned long locked;
2072                unsigned long limit;
2073                locked = mm->locked_vm + grow;
2074                limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2075                limit >>= PAGE_SHIFT;
2076                if (locked > limit && !capable(CAP_IPC_LOCK))
2077                        return -ENOMEM;
2078        }
2079
2080        /* Check to ensure the stack will not grow into a hugetlb-only region */
2081        new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2082                        vma->vm_end - size;
2083        if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2084                return -EFAULT;
2085
2086        /*
2087         * Overcommit..  This must be the final test, as it will
2088         * update security statistics.
2089         */
2090        if (security_vm_enough_memory_mm(mm, grow))
2091                return -ENOMEM;
2092
2093        /* Ok, everything looks good - let it rip */
2094        if (vma->vm_flags & VM_LOCKED)
2095                mm->locked_vm += grow;
2096        vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
2097        return 0;
2098}
2099
2100#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2101/*
2102 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2103 * vma is the last one with address > vma->vm_end.  Have to extend vma.
2104 */
2105int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2106{
2107        int error;
2108
2109        if (!(vma->vm_flags & VM_GROWSUP))
2110                return -EFAULT;
2111
2112        /*
2113         * We must make sure the anon_vma is allocated
2114         * so that the anon_vma locking is not a noop.
2115         */
2116        if (unlikely(anon_vma_prepare(vma)))
2117                return -ENOMEM;
2118        vma_lock_anon_vma(vma);
2119
2120        /*
2121         * vma->vm_start/vm_end cannot change under us because the caller
2122         * is required to hold the mmap_sem in read mode.  We need the
2123         * anon_vma lock to serialize against concurrent expand_stacks.
2124         * Also guard against wrapping around to address 0.
2125         */
2126        if (address < PAGE_ALIGN(address+4))
2127                address = PAGE_ALIGN(address+4);
2128        else {
2129                vma_unlock_anon_vma(vma);
2130                return -ENOMEM;
2131        }
2132        error = 0;
2133
2134        /* Somebody else might have raced and expanded it already */
2135        if (address > vma->vm_end) {
2136                unsigned long size, grow;
2137
2138                size = address - vma->vm_start;
2139                grow = (address - vma->vm_end) >> PAGE_SHIFT;
2140
2141                error = -ENOMEM;
2142                if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2143                        error = acct_stack_growth(vma, size, grow);
2144                        if (!error) {
2145                                /*
2146                                 * vma_gap_update() doesn't support concurrent
2147                                 * updates, but we only hold a shared mmap_sem
2148                                 * lock here, so we need to protect against
2149                                 * concurrent vma expansions.
2150                                 * vma_lock_anon_vma() doesn't help here, as
2151                                 * we don't guarantee that all growable vmas
2152                                 * in a mm share the same root anon vma.
2153                                 * So, we reuse mm->page_table_lock to guard
2154                                 * against concurrent vma expansions.
2155                                 */
2156                                spin_lock(&vma->vm_mm->page_table_lock);
2157                                anon_vma_interval_tree_pre_update_vma(vma);
2158                                vma->vm_end = address;
2159                                anon_vma_interval_tree_post_update_vma(vma);
2160                                if (vma->vm_next)
2161                                        vma_gap_update(vma->vm_next);
2162                                else
2163                                        vma->vm_mm->highest_vm_end = address;
2164                                spin_unlock(&vma->vm_mm->page_table_lock);
2165
2166                                perf_event_mmap(vma);
2167                        }
2168                }
2169        }
2170        vma_unlock_anon_vma(vma);
2171        khugepaged_enter_vma_merge(vma);
2172        validate_mm(vma->vm_mm);
2173        return error;
2174}
2175#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2176
2177/*
2178 * vma is the first one with address < vma->vm_start.  Have to extend vma.
2179 */
2180int expand_downwards(struct vm_area_struct *vma,
2181                                   unsigned long address)
2182{
2183        int error;
2184
2185        /*
2186         * We must make sure the anon_vma is allocated
2187         * so that the anon_vma locking is not a noop.
2188         */
2189        if (unlikely(anon_vma_prepare(vma)))
2190                return -ENOMEM;
2191
2192        address &= PAGE_MASK;
2193        error = security_mmap_addr(address);
2194        if (error)
2195                return error;
2196
2197        vma_lock_anon_vma(vma);
2198
2199        /*
2200         * vma->vm_start/vm_end cannot change under us because the caller
2201         * is required to hold the mmap_sem in read mode.  We need the
2202         * anon_vma lock to serialize against concurrent expand_stacks.
2203         */
2204
2205        /* Somebody else might have raced and expanded it already */
2206        if (address < vma->vm_start) {
2207                unsigned long size, grow;
2208
2209                size = vma->vm_end - address;
2210                grow = (vma->vm_start - address) >> PAGE_SHIFT;
2211
2212                error = -ENOMEM;
2213                if (grow <= vma->vm_pgoff) {
2214                        error = acct_stack_growth(vma, size, grow);
2215                        if (!error) {
2216                                /*
2217                                 * vma_gap_update() doesn't support concurrent
2218                                 * updates, but we only hold a shared mmap_sem
2219                                 * lock here, so we need to protect against
2220                                 * concurrent vma expansions.
2221                                 * vma_lock_anon_vma() doesn't help here, as
2222                                 * we don't guarantee that all growable vmas
2223                                 * in a mm share the same root anon vma.
2224                                 * So, we reuse mm->page_table_lock to guard
2225                                 * against concurrent vma expansions.
2226                                 */
2227                                spin_lock(&vma->vm_mm->page_table_lock);
2228                                anon_vma_interval_tree_pre_update_vma(vma);
2229                                vma->vm_start = address;
2230                                vma->vm_pgoff -= grow;
2231                                anon_vma_interval_tree_post_update_vma(vma);
2232                                vma_gap_update(vma);
2233                                spin_unlock(&vma->vm_mm->page_table_lock);
2234
2235                                perf_event_mmap(vma);
2236                        }
2237                }
2238        }
2239        vma_unlock_anon_vma(vma);
2240        khugepaged_enter_vma_merge(vma);
2241        validate_mm(vma->vm_mm);
2242        return error;
2243}
2244
2245/*
2246 * Note how expand_stack() refuses to expand the stack all the way to
2247 * abut the next virtual mapping, *unless* that mapping itself is also
2248 * a stack mapping. We want to leave room for a guard page, after all
2249 * (the guard page itself is not added here, that is done by the
2250 * actual page faulting logic)
2251 *
2252 * This matches the behavior of the guard page logic (see mm/memory.c:
2253 * check_stack_guard_page()), which only allows the guard page to be
2254 * removed under these circumstances.
2255 */
2256#ifdef CONFIG_STACK_GROWSUP
2257int expand_stack(struct vm_area_struct *vma, unsigned long address)
2258{
2259        struct vm_area_struct *next;
2260
2261        address &= PAGE_MASK;
2262        next = vma->vm_next;
2263        if (next && next->vm_start == address + PAGE_SIZE) {
2264                if (!(next->vm_flags & VM_GROWSUP))
2265                        return -ENOMEM;
2266        }
2267        return expand_upwards(vma, address);
2268}
2269
2270struct vm_area_struct *
2271find_extend_vma(struct mm_struct *mm, unsigned long addr)
2272{
2273        struct vm_area_struct *vma, *prev;
2274
2275        addr &= PAGE_MASK;
2276        vma = find_vma_prev(mm, addr, &prev);
2277        if (vma && (vma->vm_start <= addr))
2278                return vma;
2279        if (!prev || expand_stack(prev, addr))
2280                return NULL;
2281        if (prev->vm_flags & VM_LOCKED)
2282                __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
2283        return prev;
2284}
2285#else
2286int expand_stack(struct vm_area_struct *vma, unsigned long address)
2287{
2288        struct vm_area_struct *prev;
2289
2290        address &= PAGE_MASK;
2291        prev = vma->vm_prev;
2292        if (prev && prev->vm_end == address) {
2293                if (!(prev->vm_flags & VM_GROWSDOWN))
2294                        return -ENOMEM;
2295        }
2296        return expand_downwards(vma, address);
2297}
2298
2299struct vm_area_struct *
2300find_extend_vma(struct mm_struct * mm, unsigned long addr)
2301{
2302        struct vm_area_struct * vma;
2303        unsigned long start;
2304
2305        addr &= PAGE_MASK;
2306        vma = find_vma(mm,addr);
2307        if (!vma)
2308                return NULL;
2309        if (vma->vm_start <= addr)
2310                return vma;
2311        if (!(vma->vm_flags & VM_GROWSDOWN))
2312                return NULL;
2313        start = vma->vm_start;
2314        if (expand_stack(vma, addr))
2315                return NULL;
2316        if (vma->vm_flags & VM_LOCKED)
2317                __mlock_vma_pages_range(vma, addr, start, NULL);
2318        return vma;
2319}
2320#endif
2321
2322/*
2323 * Ok - we have the memory areas we should free on the vma list,
2324 * so release them, and do the vma updates.
2325 *
2326 * Called with the mm semaphore held.
2327 */
2328static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2329{
2330        unsigned long nr_accounted = 0;
2331
2332        /* Update high watermark before we lower total_vm */
2333        update_hiwater_vm(mm);
2334        do {
2335                long nrpages = vma_pages(vma);
2336
2337                if (vma->vm_flags & VM_ACCOUNT)
2338                        nr_accounted += nrpages;
2339                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
2340                vma = remove_vma(vma);
2341        } while (vma);
2342        vm_unacct_memory(nr_accounted);
2343        validate_mm(mm);
2344}
2345
2346/*
2347 * Get rid of page table information in the indicated region.
2348 *
2349 * Called with the mm semaphore held.
2350 */
2351static void unmap_region(struct mm_struct *mm,
2352                struct vm_area_struct *vma, struct vm_area_struct *prev,
2353                unsigned long start, unsigned long end)
2354{
2355        struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
2356        struct mmu_gather tlb;
2357
2358        lru_add_drain();
2359        tlb_gather_mmu(&tlb, mm, 0);
2360        update_hiwater_rss(mm);
2361        unmap_vmas(&tlb, vma, start, end);
2362        free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2363                                 next ? next->vm_start : USER_PGTABLES_CEILING);
2364        tlb_finish_mmu(&tlb, start, end);
2365}
2366
2367/*
2368 * Create a list of vma's touched by the unmap, removing them from the mm's
2369 * vma list as we go..
2370 */
2371static void
2372detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2373        struct vm_area_struct *prev, unsigned long end)
2374{
2375        struct vm_area_struct **insertion_point;
2376        struct vm_area_struct *tail_vma = NULL;
2377        unsigned long addr;
2378
2379        insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2380        vma->vm_prev = NULL;
2381        do {
2382                vma_rb_erase(vma, &mm->mm_rb);
2383                mm->map_count--;
2384                tail_vma = vma;
2385                vma = vma->vm_next;
2386        } while (vma && vma->vm_start < end);
2387        *insertion_point = vma;
2388        if (vma) {
2389                vma->vm_prev = prev;
2390                vma_gap_update(vma);
2391        } else
2392                mm->highest_vm_end = prev ? prev->vm_end : 0;
2393        tail_vma->vm_next = NULL;
2394        if (mm->unmap_area == arch_unmap_area)
2395                addr = prev ? prev->vm_end : mm->mmap_base;
2396        else
2397                addr = vma ?  vma->vm_start : mm->mmap_base;
2398        mm->unmap_area(mm, addr);
2399        mm->mmap_cache = NULL;          /* Kill the cache. */
2400}
2401
2402/*
2403 * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
2404 * munmap path where it doesn't make sense to fail.
2405 */
2406static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
2407              unsigned long addr, int new_below)
2408{
2409        struct mempolicy *pol;
2410        struct vm_area_struct *new;
2411        int err = -ENOMEM;
2412
2413        if (is_vm_hugetlb_page(vma) && (addr &
2414                                        ~(huge_page_mask(hstate_vma(vma)))))
2415                return -EINVAL;
2416
2417        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2418        if (!new)
2419                goto out_err;
2420
2421        /* most fields are the same, copy all, and then fixup */
2422        *new = *vma;
2423
2424        INIT_LIST_HEAD(&new->anon_vma_chain);
2425
2426        if (new_below)
2427                new->vm_end = addr;
2428        else {
2429                new->vm_start = addr;
2430                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2431        }
2432
2433        pol = mpol_dup(vma_policy(vma));
2434        if (IS_ERR(pol)) {
2435                err = PTR_ERR(pol);
2436                goto out_free_vma;
2437        }
2438        vma_set_policy(new, pol);
2439
2440        if (anon_vma_clone(new, vma))
2441                goto out_free_mpol;
2442
2443        if (new->vm_file)
2444                get_file(new->vm_file);
2445
2446        if (new->vm_ops && new->vm_ops->open)
2447                new->vm_ops->open(new);
2448
2449        if (new_below)
2450                err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2451                        ((addr - new->vm_start) >> PAGE_SHIFT), new);
2452        else
2453                err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2454
2455        /* Success. */
2456        if (!err)
2457                return 0;
2458
2459        /* Clean everything up if vma_adjust failed. */
2460        if (new->vm_ops && new->vm_ops->close)
2461                new->vm_ops->close(new);
2462        if (new->vm_file)
2463                fput(new->vm_file);
2464        unlink_anon_vmas(new);
2465 out_free_mpol:
2466        mpol_put(pol);
2467 out_free_vma:
2468        kmem_cache_free(vm_area_cachep, new);
2469 out_err:
2470        return err;
2471}
2472
2473/*
2474 * Split a vma into two pieces at address 'addr', a new vma is allocated
2475 * either for the first part or the tail.
2476 */
2477int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2478              unsigned long addr, int new_below)
2479{
2480        if (mm->map_count >= sysctl_max_map_count)
2481                return -ENOMEM;
2482
2483        return __split_vma(mm, vma, addr, new_below);
2484}
2485
2486/* Munmap is split into 2 main parts -- this part which finds
2487 * what needs doing, and the areas themselves, which do the
2488 * work.  This now handles partial unmappings.
2489 * Jeremy Fitzhardinge <jeremy@goop.org>
2490 */
2491int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2492{
2493        unsigned long end;
2494        struct vm_area_struct *vma, *prev, *last;
2495
2496        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2497                return -EINVAL;
2498
2499        if ((len = PAGE_ALIGN(len)) == 0)
2500                return -EINVAL;
2501
2502        /* Find the first overlapping VMA */
2503        vma = find_vma(mm, start);
2504        if (!vma)
2505                return 0;
2506        prev = vma->vm_prev;
2507        /* we have  start < vma->vm_end  */
2508
2509        /* if it doesn't overlap, we have nothing.. */
2510        end = start + len;
2511        if (vma->vm_start >= end)
2512                return 0;
2513
2514        /*
2515         * If we need to split any vma, do it now to save pain later.
2516         *
2517         * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2518         * unmapped vm_area_struct will remain in use: so lower split_vma
2519         * places tmp vma above, and higher split_vma places tmp vma below.
2520         */
2521        if (start > vma->vm_start) {
2522                int error;
2523
2524                /*
2525                 * Make sure that map_count on return from munmap() will
2526                 * not exceed its limit; but let map_count go just above
2527                 * its limit temporarily, to help free resources as expected.
2528                 */
2529                if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2530                        return -ENOMEM;
2531
2532                error = __split_vma(mm, vma, start, 0);
2533                if (error)
2534                        return error;
2535                prev = vma;
2536        }
2537
2538        /* Does it split the last one? */
2539        last = find_vma(mm, end);
2540        if (last && end > last->vm_start) {
2541                int error = __split_vma(mm, last, end, 1);
2542                if (error)
2543                        return error;
2544        }
2545        vma = prev? prev->vm_next: mm->mmap;
2546
2547        /*
2548         * unlock any mlock()ed ranges before detaching vmas
2549         */
2550        if (mm->locked_vm) {
2551                struct vm_area_struct *tmp = vma;
2552                while (tmp && tmp->vm_start < end) {
2553                        if (tmp->vm_flags & VM_LOCKED) {
2554                                mm->locked_vm -= vma_pages(tmp);
2555                                munlock_vma_pages_all(tmp);
2556                        }
2557                        tmp = tmp->vm_next;
2558                }
2559        }
2560
2561        /*
2562         * Remove the vma's, and unmap the actual pages
2563         */
2564        detach_vmas_to_be_unmapped(mm, vma, prev, end);
2565        unmap_region(mm, vma, prev, start, end);
2566
2567        /* Fix up all other VM information */
2568        remove_vma_list(mm, vma);
2569
2570        return 0;
2571}
2572
2573int vm_munmap(unsigned long start, size_t len)
2574{
2575        int ret;
2576        struct mm_struct *mm = current->mm;
2577
2578        down_write(&mm->mmap_sem);
2579        ret = do_munmap(mm, start, len);
2580        up_write(&mm->mmap_sem);
2581        return ret;
2582}
2583EXPORT_SYMBOL(vm_munmap);
2584
2585SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2586{
2587        profile_munmap(addr);
2588        return vm_munmap(addr, len);
2589}
2590
2591static inline void verify_mm_writelocked(struct mm_struct *mm)
2592{
2593#ifdef CONFIG_DEBUG_VM
2594        if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2595                WARN_ON(1);
2596                up_read(&mm->mmap_sem);
2597        }
2598#endif
2599}
2600
2601/*
2602 *  this is really a simplified "do_mmap".  it only handles
2603 *  anonymous maps.  eventually we may be able to do some
2604 *  brk-specific accounting here.
2605 */
2606static unsigned long do_brk(unsigned long addr, unsigned long len)
2607{
2608        struct mm_struct * mm = current->mm;
2609        struct vm_area_struct * vma, * prev;
2610        unsigned long flags;
2611        struct rb_node ** rb_link, * rb_parent;
2612        pgoff_t pgoff = addr >> PAGE_SHIFT;
2613        int error;
2614
2615        len = PAGE_ALIGN(len);
2616        if (!len)
2617                return addr;
2618
2619        flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2620
2621        error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2622        if (error & ~PAGE_MASK)
2623                return error;
2624
2625        /*
2626         * mlock MCL_FUTURE?
2627         */
2628        if (mm->def_flags & VM_LOCKED) {
2629                unsigned long locked, lock_limit;
2630                locked = len >> PAGE_SHIFT;
2631                locked += mm->locked_vm;
2632                lock_limit = rlimit(RLIMIT_MEMLOCK);
2633                lock_limit >>= PAGE_SHIFT;
2634                if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2635                        return -EAGAIN;
2636        }
2637
2638        /*
2639         * mm->mmap_sem is required to protect against another thread
2640         * changing the mappings in case we sleep.
2641         */
2642        verify_mm_writelocked(mm);
2643
2644        /*
2645         * Clear old maps.  this also does some error checking for us
2646         */
2647 munmap_back:
2648        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
2649                if (do_munmap(mm, addr, len))
2650                        return -ENOMEM;
2651                goto munmap_back;
2652        }
2653
2654        /* Check against address space limits *after* clearing old maps... */
2655        if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2656                return -ENOMEM;
2657
2658        if (mm->map_count > sysctl_max_map_count)
2659                return -ENOMEM;
2660
2661        if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2662                return -ENOMEM;
2663
2664        /* Can we just expand an old private anonymous mapping? */
2665        vma = vma_merge(mm, prev, addr, addr + len, flags,
2666                                        NULL, NULL, pgoff, NULL);
2667        if (vma)
2668                goto out;
2669
2670        /*
2671         * create a vma struct for an anonymous mapping
2672         */
2673        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2674        if (!vma) {
2675                vm_unacct_memory(len >> PAGE_SHIFT);
2676                return -ENOMEM;
2677        }
2678
2679        INIT_LIST_HEAD(&vma->anon_vma_chain);
2680        vma->vm_mm = mm;
2681        vma->vm_start = addr;
2682        vma->vm_end = addr + len;
2683        vma->vm_pgoff = pgoff;
2684        vma->vm_flags = flags;
2685        vma->vm_page_prot = vm_get_page_prot(flags);
2686        vma_link(mm, vma, prev, rb_link, rb_parent);
2687out:
2688        perf_event_mmap(vma);
2689        mm->total_vm += len >> PAGE_SHIFT;
2690        if (flags & VM_LOCKED)
2691                mm->locked_vm += (len >> PAGE_SHIFT);
2692        return addr;
2693}
2694
2695unsigned long vm_brk(unsigned long addr, unsigned long len)
2696{
2697        struct mm_struct *mm = current->mm;
2698        unsigned long ret;
2699        bool populate;
2700
2701        down_write(&mm->mmap_sem);
2702        ret = do_brk(addr, len);
2703        populate = ((mm->def_flags & VM_LOCKED) != 0);
2704        up_write(&mm->mmap_sem);
2705        if (populate)
2706                mm_populate(addr, len);
2707        return ret;
2708}
2709EXPORT_SYMBOL(vm_brk);
2710
2711/* Release all mmaps. */
2712void exit_mmap(struct mm_struct *mm)
2713{
2714        struct mmu_gather tlb;
2715        struct vm_area_struct *vma;
2716        unsigned long nr_accounted = 0;
2717
2718        /* mm's last user has gone, and its about to be pulled down */
2719        mmu_notifier_release(mm);
2720
2721        if (mm->locked_vm) {
2722                vma = mm->mmap;
2723                while (vma) {
2724                        if (vma->vm_flags & VM_LOCKED)
2725                                munlock_vma_pages_all(vma);
2726                        vma = vma->vm_next;
2727                }
2728        }
2729
2730        arch_exit_mmap(mm);
2731
2732        vma = mm->mmap;
2733        if (!vma)       /* Can happen if dup_mmap() received an OOM */
2734                return;
2735
2736        lru_add_drain();
2737        flush_cache_mm(mm);
2738        tlb_gather_mmu(&tlb, mm, 1);
2739        /* update_hiwater_rss(mm) here? but nobody should be looking */
2740        /* Use -1 here to ensure all VMAs in the mm are unmapped */
2741        unmap_vmas(&tlb, vma, 0, -1);
2742
2743        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2744        tlb_finish_mmu(&tlb, 0, -1);
2745
2746        /*
2747         * Walk the list again, actually closing and freeing it,
2748         * with preemption enabled, without holding any MM locks.
2749         */
2750        while (vma) {
2751                if (vma->vm_flags & VM_ACCOUNT)
2752                        nr_accounted += vma_pages(vma);
2753                vma = remove_vma(vma);
2754        }
2755        vm_unacct_memory(nr_accounted);
2756
2757        WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2758}
2759
2760/* Insert vm structure into process list sorted by address
2761 * and into the inode's i_mmap tree.  If vm_file is non-NULL
2762 * then i_mmap_mutex is taken here.
2763 */
2764int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2765{
2766        struct vm_area_struct *prev;
2767        struct rb_node **rb_link, *rb_parent;
2768
2769        /*
2770         * The vm_pgoff of a purely anonymous vma should be irrelevant
2771         * until its first write fault, when page's anon_vma and index
2772         * are set.  But now set the vm_pgoff it will almost certainly
2773         * end up with (unless mremap moves it elsewhere before that
2774         * first wfault), so /proc/pid/maps tells a consistent story.
2775         *
2776         * By setting it to reflect the virtual start address of the
2777         * vma, merges and splits can happen in a seamless way, just
2778         * using the existing file pgoff checks and manipulations.
2779         * Similarly in do_mmap_pgoff and in do_brk.
2780         */
2781        if (!vma->vm_file) {
2782                BUG_ON(vma->anon_vma);
2783                vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2784        }
2785        if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2786                           &prev, &rb_link, &rb_parent))
2787                return -ENOMEM;
2788        if ((vma->vm_flags & VM_ACCOUNT) &&
2789             security_vm_enough_memory_mm(mm, vma_pages(vma)))
2790                return -ENOMEM;
2791
2792        vma_link(mm, vma, prev, rb_link, rb_parent);
2793        return 0;
2794}
2795
2796/*
2797 * Copy the vma structure to a new location in the same mm,
2798 * prior to moving page table entries, to effect an mremap move.
2799 */
2800struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2801        unsigned long addr, unsigned long len, pgoff_t pgoff,
2802        bool *need_rmap_locks)
2803{
2804        struct vm_area_struct *vma = *vmap;
2805        unsigned long vma_start = vma->vm_start;
2806        struct mm_struct *mm = vma->vm_mm;
2807        struct vm_area_struct *new_vma, *prev;
2808        struct rb_node **rb_link, *rb_parent;
2809        struct mempolicy *pol;
2810        bool faulted_in_anon_vma = true;
2811
2812        /*
2813         * If anonymous vma has not yet been faulted, update new pgoff
2814         * to match new location, to increase its chance of merging.
2815         */
2816        if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2817                pgoff = addr >> PAGE_SHIFT;
2818                faulted_in_anon_vma = false;
2819        }
2820
2821        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2822                return NULL;    /* should never get here */
2823        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2824                        vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2825        if (new_vma) {
2826                /*
2827                 * Source vma may have been merged into new_vma
2828                 */
2829                if (unlikely(vma_start >= new_vma->vm_start &&
2830                             vma_start < new_vma->vm_end)) {
2831                        /*
2832                         * The only way we can get a vma_merge with
2833                         * self during an mremap is if the vma hasn't
2834                         * been faulted in yet and we were allowed to
2835                         * reset the dst vma->vm_pgoff to the
2836                         * destination address of the mremap to allow
2837                         * the merge to happen. mremap must change the
2838                         * vm_pgoff linearity between src and dst vmas
2839                         * (in turn preventing a vma_merge) to be
2840                         * safe. It is only safe to keep the vm_pgoff
2841                         * linear if there are no pages mapped yet.
2842                         */
2843                        VM_BUG_ON(faulted_in_anon_vma);
2844                        *vmap = vma = new_vma;
2845                }
2846                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2847        } else {
2848                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2849                if (new_vma) {
2850                        *new_vma = *vma;
2851                        new_vma->vm_start = addr;
2852                        new_vma->vm_end = addr + len;
2853                        new_vma->vm_pgoff = pgoff;
2854                        pol = mpol_dup(vma_policy(vma));
2855                        if (IS_ERR(pol))
2856                                goto out_free_vma;
2857                        vma_set_policy(new_vma, pol);
2858                        INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2859                        if (anon_vma_clone(new_vma, vma))
2860                                goto out_free_mempol;
2861                        if (new_vma->vm_file)
2862                                get_file(new_vma->vm_file);
2863                        if (new_vma->vm_ops && new_vma->vm_ops->open)
2864                                new_vma->vm_ops->open(new_vma);
2865                        vma_link(mm, new_vma, prev, rb_link, rb_parent);
2866                        *need_rmap_locks = false;
2867                }
2868        }
2869        return new_vma;
2870
2871 out_free_mempol:
2872        mpol_put(pol);
2873 out_free_vma:
2874        kmem_cache_free(vm_area_cachep, new_vma);
2875        return NULL;
2876}
2877
2878/*
2879 * Return true if the calling process may expand its vm space by the passed
2880 * number of pages
2881 */
2882int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2883{
2884        unsigned long cur = mm->total_vm;       /* pages */
2885        unsigned long lim;
2886
2887        lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2888
2889        if (cur + npages > lim)
2890                return 0;
2891        return 1;
2892}
2893
2894
2895static int special_mapping_fault(struct vm_area_struct *vma,
2896                                struct vm_fault *vmf)
2897{
2898        pgoff_t pgoff;
2899        struct page **pages;
2900
2901        /*
2902         * special mappings have no vm_file, and in that case, the mm
2903         * uses vm_pgoff internally. So we have to subtract it from here.
2904         * We are allowed to do this because we are the mm; do not copy
2905         * this code into drivers!
2906         */
2907        pgoff = vmf->pgoff - vma->vm_pgoff;
2908
2909        for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2910                pgoff--;
2911
2912        if (*pages) {
2913                struct page *page = *pages;
2914                get_page(page);
2915                vmf->page = page;
2916                return 0;
2917        }
2918
2919        return VM_FAULT_SIGBUS;
2920}
2921
2922/*
2923 * Having a close hook prevents vma merging regardless of flags.
2924 */
2925static void special_mapping_close(struct vm_area_struct *vma)
2926{
2927}
2928
2929static const struct vm_operations_struct special_mapping_vmops = {
2930        .close = special_mapping_close,
2931        .fault = special_mapping_fault,
2932};
2933
2934/*
2935 * Called with mm->mmap_sem held for writing.
2936 * Insert a new vma covering the given region, with the given flags.
2937 * Its pages are supplied by the given array of struct page *.
2938 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2939 * The region past the last page supplied will always produce SIGBUS.
2940 * The array pointer and the pages it points to are assumed to stay alive
2941 * for as long as this mapping might exist.
2942 */
2943int install_special_mapping(struct mm_struct *mm,
2944                            unsigned long addr, unsigned long len,
2945                            unsigned long vm_flags, struct page **pages)
2946{
2947        int ret;
2948        struct vm_area_struct *vma;
2949
2950        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2951        if (unlikely(vma == NULL))
2952                return -ENOMEM;
2953
2954        INIT_LIST_HEAD(&vma->anon_vma_chain);
2955        vma->vm_mm = mm;
2956        vma->vm_start = addr;
2957        vma->vm_end = addr + len;
2958
2959        vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
2960        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2961
2962        vma->vm_ops = &special_mapping_vmops;
2963        vma->vm_private_data = pages;
2964
2965        ret = insert_vm_struct(mm, vma);
2966        if (ret)
2967                goto out;
2968
2969        mm->total_vm += len >> PAGE_SHIFT;
2970
2971        perf_event_mmap(vma);
2972
2973        return 0;
2974
2975out:
2976        kmem_cache_free(vm_area_cachep, vma);
2977        return ret;
2978}
2979
2980static DEFINE_MUTEX(mm_all_locks_mutex);
2981
2982static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2983{
2984        if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
2985                /*
2986                 * The LSB of head.next can't change from under us
2987                 * because we hold the mm_all_locks_mutex.
2988                 */
2989                down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
2990                /*
2991                 * We can safely modify head.next after taking the
2992                 * anon_vma->root->rwsem. If some other vma in this mm shares
2993                 * the same anon_vma we won't take it again.
2994                 *
2995                 * No need of atomic instructions here, head.next
2996                 * can't change from under us thanks to the
2997                 * anon_vma->root->rwsem.
2998                 */
2999                if (__test_and_set_bit(0, (unsigned long *)
3000                                       &anon_vma->root->rb_root.rb_node))
3001                        BUG();
3002        }
3003}
3004
3005static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3006{
3007        if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3008                /*
3009                 * AS_MM_ALL_LOCKS can't change from under us because
3010                 * we hold the mm_all_locks_mutex.
3011                 *
3012                 * Operations on ->flags have to be atomic because
3013                 * even if AS_MM_ALL_LOCKS is stable thanks to the
3014                 * mm_all_locks_mutex, there may be other cpus
3015                 * changing other bitflags in parallel to us.
3016                 */
3017                if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3018                        BUG();
3019                mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
3020        }
3021}
3022
3023/*
3024 * This operation locks against the VM for all pte/vma/mm related
3025 * operations that could ever happen on a certain mm. This includes
3026 * vmtruncate, try_to_unmap, and all page faults.
3027 *
3028 * The caller must take the mmap_sem in write mode before calling
3029 * mm_take_all_locks(). The caller isn't allowed to release the
3030 * mmap_sem until mm_drop_all_locks() returns.
3031 *
3032 * mmap_sem in write mode is required in order to block all operations
3033 * that could modify pagetables and free pages without need of
3034 * altering the vma layout (for example populate_range() with
3035 * nonlinear vmas). It's also needed in write mode to avoid new
3036 * anon_vmas to be associated with existing vmas.
3037 *
3038 * A single task can't take more than one mm_take_all_locks() in a row
3039 * or it would deadlock.
3040 *
3041 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3042 * mapping->flags avoid to take the same lock twice, if more than one
3043 * vma in this mm is backed by the same anon_vma or address_space.
3044 *
3045 * We can take all the locks in random order because the VM code
3046 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
3047 * takes more than one of them in a row. Secondly we're protected
3048 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3049 *
3050 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3051 * that may have to take thousand of locks.
3052 *
3053 * mm_take_all_locks() can fail if it's interrupted by signals.
3054 */
3055int mm_take_all_locks(struct mm_struct *mm)
3056{
3057        struct vm_area_struct *vma;
3058        struct anon_vma_chain *avc;
3059
3060        BUG_ON(down_read_trylock(&mm->mmap_sem));
3061
3062        mutex_lock(&mm_all_locks_mutex);
3063
3064        for (vma = mm->mmap; vma; vma = vma->vm_next) {
3065                if (signal_pending(current))
3066                        goto out_unlock;
3067                if (vma->vm_file && vma->vm_file->f_mapping)
3068                        vm_lock_mapping(mm, vma->vm_file->f_mapping);
3069        }
3070
3071        for (vma = mm->mmap; vma; vma = vma->vm_next) {
3072                if (signal_pending(current))
3073                        goto out_unlock;
3074                if (vma->anon_vma)
3075                        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3076                                vm_lock_anon_vma(mm, avc->anon_vma);
3077        }
3078
3079        return 0;
3080
3081out_unlock:
3082        mm_drop_all_locks(mm);
3083        return -EINTR;
3084}
3085
3086static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3087{
3088        if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3089                /*
3090                 * The LSB of head.next can't change to 0 from under
3091                 * us because we hold the mm_all_locks_mutex.
3092                 *
3093                 * We must however clear the bitflag before unlocking
3094                 * the vma so the users using the anon_vma->rb_root will
3095                 * never see our bitflag.
3096                 *
3097                 * No need of atomic instructions here, head.next
3098                 * can't change from under us until we release the
3099                 * anon_vma->root->rwsem.
3100                 */
3101                if (!__test_and_clear_bit(0, (unsigned long *)
3102                                          &anon_vma->root->rb_root.rb_node))
3103                        BUG();
3104                anon_vma_unlock_write(anon_vma);
3105        }
3106}
3107
3108static void vm_unlock_mapping(struct address_space *mapping)
3109{
3110        if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3111                /*
3112                 * AS_MM_ALL_LOCKS can't change to 0 from under us
3113                 * because we hold the mm_all_locks_mutex.
3114                 */
3115                mutex_unlock(&mapping->i_mmap_mutex);
3116                if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3117                                        &mapping->flags))
3118                        BUG();
3119        }
3120}
3121
3122/*
3123 * The mmap_sem cannot be released by the caller until
3124 * mm_drop_all_locks() returns.
3125 */
3126void mm_drop_all_locks(struct mm_struct *mm)
3127{
3128        struct vm_area_struct *vma;
3129        struct anon_vma_chain *avc;
3130
3131        BUG_ON(down_read_trylock(&mm->mmap_sem));
3132        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3133
3134        for (vma = mm->mmap; vma; vma = vma->vm_next) {
3135                if (vma->anon_vma)
3136                        list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3137                                vm_unlock_anon_vma(avc->anon_vma);
3138                if (vma->vm_file && vma->vm_file->f_mapping)
3139                        vm_unlock_mapping(vma->vm_file->f_mapping);
3140        }
3141
3142        mutex_unlock(&mm_all_locks_mutex);
3143}
3144
3145/*
3146 * initialise the VMA slab
3147 */
3148void __init mmap_init(void)
3149{
3150        int ret;
3151
3152        ret = percpu_counter_init(&vm_committed_as, 0);
3153        VM_BUG_ON(ret);
3154}
3155
3156/*
3157 * Initialise sysctl_user_reserve_kbytes.
3158 *
3159 * This is intended to prevent a user from starting a single memory hogging
3160 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3161 * mode.
3162 *
3163 * The default value is min(3% of free memory, 128MB)
3164 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3165 */
3166static int init_user_reserve(void)
3167{
3168        unsigned long free_kbytes;
3169
3170        free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3171
3172        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3173        return 0;
3174}
3175module_init(init_user_reserve)
3176
3177/*
3178 * Initialise sysctl_admin_reserve_kbytes.
3179 *
3180 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3181 * to log in and kill a memory hogging process.
3182 *
3183 * Systems with more than 256MB will reserve 8MB, enough to recover
3184 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3185 * only reserve 3% of free pages by default.
3186 */
3187static int init_admin_reserve(void)
3188{
3189        unsigned long free_kbytes;
3190
3191        free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3192
3193        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3194        return 0;
3195}
3196module_init(init_admin_reserve)
3197
3198/*
3199 * Reinititalise user and admin reserves if memory is added or removed.
3200 *
3201 * The default user reserve max is 128MB, and the default max for the
3202 * admin reserve is 8MB. These are usually, but not always, enough to
3203 * enable recovery from a memory hogging process using login/sshd, a shell,
3204 * and tools like top. It may make sense to increase or even disable the
3205 * reserve depending on the existence of swap or variations in the recovery
3206 * tools. So, the admin may have changed them.
3207 *
3208 * If memory is added and the reserves have been eliminated or increased above
3209 * the default max, then we'll trust the admin.
3210 *
3211 * If memory is removed and there isn't enough free memory, then we
3212 * need to reset the reserves.
3213 *
3214 * Otherwise keep the reserve set by the admin.
3215 */
3216static int reserve_mem_notifier(struct notifier_block *nb,
3217                             unsigned long action, void *data)
3218{
3219        unsigned long tmp, free_kbytes;
3220
3221        switch (action) {
3222        case MEM_ONLINE:
3223                /* Default max is 128MB. Leave alone if modified by operator. */
3224                tmp = sysctl_user_reserve_kbytes;
3225                if (0 < tmp && tmp < (1UL << 17))
3226                        init_user_reserve();
3227
3228                /* Default max is 8MB.  Leave alone if modified by operator. */
3229                tmp = sysctl_admin_reserve_kbytes;
3230                if (0 < tmp && tmp < (1UL << 13))
3231                        init_admin_reserve();
3232
3233                break;
3234        case MEM_OFFLINE:
3235                free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3236
3237                if (sysctl_user_reserve_kbytes > free_kbytes) {
3238                        init_user_reserve();
3239                        pr_info("vm.user_reserve_kbytes reset to %lu\n",
3240                                sysctl_user_reserve_kbytes);
3241                }
3242
3243                if (sysctl_admin_reserve_kbytes > free_kbytes) {
3244                        init_admin_reserve();
3245                        pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3246                                sysctl_admin_reserve_kbytes);
3247                }
3248                break;
3249        default:
3250                break;
3251        }
3252        return NOTIFY_OK;
3253}
3254
3255static struct notifier_block reserve_mem_nb = {
3256        .notifier_call = reserve_mem_notifier,
3257};
3258
3259static int __meminit init_reserve_notifier(void)
3260{
3261        if (register_hotmemory_notifier(&reserve_mem_nb))
3262                printk("Failed registering memory add/remove notifier for admin reserve");
3263
3264        return 0;
3265}
3266module_init(init_reserve_notifier)
3267
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.