linux/mm/nommu.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/nommu.c
   3 *
   4 *  Replacement code for mm functions to support CPU's that don't
   5 *  have any form of memory management unit (thus no virtual memory).
   6 *
   7 *  See Documentation/nommu-mmap.txt
   8 *
   9 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
  10 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
  13 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
  14 */
  15
  16#include <linux/export.h>
  17#include <linux/mm.h>
  18#include <linux/mman.h>
  19#include <linux/swap.h>
  20#include <linux/file.h>
  21#include <linux/highmem.h>
  22#include <linux/pagemap.h>
  23#include <linux/slab.h>
  24#include <linux/vmalloc.h>
  25#include <linux/blkdev.h>
  26#include <linux/backing-dev.h>
  27#include <linux/mount.h>
  28#include <linux/personality.h>
  29#include <linux/security.h>
  30#include <linux/syscalls.h>
  31#include <linux/audit.h>
  32
  33#include <asm/uaccess.h>
  34#include <asm/tlb.h>
  35#include <asm/tlbflush.h>
  36#include <asm/mmu_context.h>
  37#include "internal.h"
  38
  39#if 0
  40#define kenter(FMT, ...) \
  41        printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  42#define kleave(FMT, ...) \
  43        printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  44#define kdebug(FMT, ...) \
  45        printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
  46#else
  47#define kenter(FMT, ...) \
  48        no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
  49#define kleave(FMT, ...) \
  50        no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
  51#define kdebug(FMT, ...) \
  52        no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
  53#endif
  54
  55void *high_memory;
  56struct page *mem_map;
  57unsigned long max_mapnr;
  58unsigned long num_physpages;
  59unsigned long highest_memmap_pfn;
  60struct percpu_counter vm_committed_as;
  61int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  62int sysctl_overcommit_ratio = 50; /* default is 50% */
  63int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  64int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
  65int heap_stack_gap = 0;
  66
  67atomic_long_t mmap_pages_allocated;
  68
  69/*
  70 * The global memory commitment made in the system can be a metric
  71 * that can be used to drive ballooning decisions when Linux is hosted
  72 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
  73 * balancing memory across competing virtual machines that are hosted.
  74 * Several metrics drive this policy engine including the guest reported
  75 * memory commitment.
  76 */
  77unsigned long vm_memory_committed(void)
  78{
  79        return percpu_counter_read_positive(&vm_committed_as);
  80}
  81
  82EXPORT_SYMBOL_GPL(vm_memory_committed);
  83
  84EXPORT_SYMBOL(mem_map);
  85EXPORT_SYMBOL(num_physpages);
  86
  87/* list of mapped, potentially shareable regions */
  88static struct kmem_cache *vm_region_jar;
  89struct rb_root nommu_region_tree = RB_ROOT;
  90DECLARE_RWSEM(nommu_region_sem);
  91
  92const struct vm_operations_struct generic_file_vm_ops = {
  93};
  94
  95/*
  96 * Return the total memory allocated for this pointer, not
  97 * just what the caller asked for.
  98 *
  99 * Doesn't have to be accurate, i.e. may have races.
 100 */
 101unsigned int kobjsize(const void *objp)
 102{
 103        struct page *page;
 104
 105        /*
 106         * If the object we have should not have ksize performed on it,
 107         * return size of 0
 108         */
 109        if (!objp || !virt_addr_valid(objp))
 110                return 0;
 111
 112        page = virt_to_head_page(objp);
 113
 114        /*
 115         * If the allocator sets PageSlab, we know the pointer came from
 116         * kmalloc().
 117         */
 118        if (PageSlab(page))
 119                return ksize(objp);
 120
 121        /*
 122         * If it's not a compound page, see if we have a matching VMA
 123         * region. This test is intentionally done in reverse order,
 124         * so if there's no VMA, we still fall through and hand back
 125         * PAGE_SIZE for 0-order pages.
 126         */
 127        if (!PageCompound(page)) {
 128                struct vm_area_struct *vma;
 129
 130                vma = find_vma(current->mm, (unsigned long)objp);
 131                if (vma)
 132                        return vma->vm_end - vma->vm_start;
 133        }
 134
 135        /*
 136         * The ksize() function is only guaranteed to work for pointers
 137         * returned by kmalloc(). So handle arbitrary pointers here.
 138         */
 139        return PAGE_SIZE << compound_order(page);
 140}
 141
 142int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 143                     unsigned long start, int nr_pages, unsigned int foll_flags,
 144                     struct page **pages, struct vm_area_struct **vmas,
 145                     int *retry)
 146{
 147        struct vm_area_struct *vma;
 148        unsigned long vm_flags;
 149        int i;
 150
 151        /* calculate required read or write permissions.
 152         * If FOLL_FORCE is set, we only require the "MAY" flags.
 153         */
 154        vm_flags  = (foll_flags & FOLL_WRITE) ?
 155                        (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
 156        vm_flags &= (foll_flags & FOLL_FORCE) ?
 157                        (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
 158
 159        for (i = 0; i < nr_pages; i++) {
 160                vma = find_vma(mm, start);
 161                if (!vma)
 162                        goto finish_or_fault;
 163
 164                /* protect what we can, including chardevs */
 165                if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
 166                    !(vm_flags & vma->vm_flags))
 167                        goto finish_or_fault;
 168
 169                if (pages) {
 170                        pages[i] = virt_to_page(start);
 171                        if (pages[i])
 172                                page_cache_get(pages[i]);
 173                }
 174                if (vmas)
 175                        vmas[i] = vma;
 176                start = (start + PAGE_SIZE) & PAGE_MASK;
 177        }
 178
 179        return i;
 180
 181finish_or_fault:
 182        return i ? : -EFAULT;
 183}
 184
 185/*
 186 * get a list of pages in an address range belonging to the specified process
 187 * and indicate the VMA that covers each page
 188 * - this is potentially dodgy as we may end incrementing the page count of a
 189 *   slab page or a secondary page from a compound page
 190 * - don't permit access to VMAs that don't support it, such as I/O mappings
 191 */
 192int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 193        unsigned long start, int nr_pages, int write, int force,
 194        struct page **pages, struct vm_area_struct **vmas)
 195{
 196        int flags = 0;
 197
 198        if (write)
 199                flags |= FOLL_WRITE;
 200        if (force)
 201                flags |= FOLL_FORCE;
 202
 203        return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
 204                                NULL);
 205}
 206EXPORT_SYMBOL(get_user_pages);
 207
 208/**
 209 * follow_pfn - look up PFN at a user virtual address
 210 * @vma: memory mapping
 211 * @address: user virtual address
 212 * @pfn: location to store found PFN
 213 *
 214 * Only IO mappings and raw PFN mappings are allowed.
 215 *
 216 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 217 */
 218int follow_pfn(struct vm_area_struct *vma, unsigned long address,
 219        unsigned long *pfn)
 220{
 221        if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
 222                return -EINVAL;
 223
 224        *pfn = address >> PAGE_SHIFT;
 225        return 0;
 226}
 227EXPORT_SYMBOL(follow_pfn);
 228
 229DEFINE_RWLOCK(vmlist_lock);
 230struct vm_struct *vmlist;
 231
 232void vfree(const void *addr)
 233{
 234        kfree(addr);
 235}
 236EXPORT_SYMBOL(vfree);
 237
 238void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 239{
 240        /*
 241         *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
 242         * returns only a logical address.
 243         */
 244        return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
 245}
 246EXPORT_SYMBOL(__vmalloc);
 247
 248void *vmalloc_user(unsigned long size)
 249{
 250        void *ret;
 251
 252        ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 253                        PAGE_KERNEL);
 254        if (ret) {
 255                struct vm_area_struct *vma;
 256
 257                down_write(&current->mm->mmap_sem);
 258                vma = find_vma(current->mm, (unsigned long)ret);
 259                if (vma)
 260                        vma->vm_flags |= VM_USERMAP;
 261                up_write(&current->mm->mmap_sem);
 262        }
 263
 264        return ret;
 265}
 266EXPORT_SYMBOL(vmalloc_user);
 267
 268struct page *vmalloc_to_page(const void *addr)
 269{
 270        return virt_to_page(addr);
 271}
 272EXPORT_SYMBOL(vmalloc_to_page);
 273
 274unsigned long vmalloc_to_pfn(const void *addr)
 275{
 276        return page_to_pfn(virt_to_page(addr));
 277}
 278EXPORT_SYMBOL(vmalloc_to_pfn);
 279
 280long vread(char *buf, char *addr, unsigned long count)
 281{
 282        memcpy(buf, addr, count);
 283        return count;
 284}
 285
 286long vwrite(char *buf, char *addr, unsigned long count)
 287{
 288        /* Don't allow overflow */
 289        if ((unsigned long) addr + count < count)
 290                count = -(unsigned long) addr;
 291
 292        memcpy(addr, buf, count);
 293        return(count);
 294}
 295
 296/*
 297 *      vmalloc  -  allocate virtually continguos memory
 298 *
 299 *      @size:          allocation size
 300 *
 301 *      Allocate enough pages to cover @size from the page level
 302 *      allocator and map them into continguos kernel virtual space.
 303 *
 304 *      For tight control over page level allocator and protection flags
 305 *      use __vmalloc() instead.
 306 */
 307void *vmalloc(unsigned long size)
 308{
 309       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
 310}
 311EXPORT_SYMBOL(vmalloc);
 312
 313/*
 314 *      vzalloc - allocate virtually continguos memory with zero fill
 315 *
 316 *      @size:          allocation size
 317 *
 318 *      Allocate enough pages to cover @size from the page level
 319 *      allocator and map them into continguos kernel virtual space.
 320 *      The memory allocated is set to zero.
 321 *
 322 *      For tight control over page level allocator and protection flags
 323 *      use __vmalloc() instead.
 324 */
 325void *vzalloc(unsigned long size)
 326{
 327        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 328                        PAGE_KERNEL);
 329}
 330EXPORT_SYMBOL(vzalloc);
 331
 332/**
 333 * vmalloc_node - allocate memory on a specific node
 334 * @size:       allocation size
 335 * @node:       numa node
 336 *
 337 * Allocate enough pages to cover @size from the page level
 338 * allocator and map them into contiguous kernel virtual space.
 339 *
 340 * For tight control over page level allocator and protection flags
 341 * use __vmalloc() instead.
 342 */
 343void *vmalloc_node(unsigned long size, int node)
 344{
 345        return vmalloc(size);
 346}
 347EXPORT_SYMBOL(vmalloc_node);
 348
 349/**
 350 * vzalloc_node - allocate memory on a specific node with zero fill
 351 * @size:       allocation size
 352 * @node:       numa node
 353 *
 354 * Allocate enough pages to cover @size from the page level
 355 * allocator and map them into contiguous kernel virtual space.
 356 * The memory allocated is set to zero.
 357 *
 358 * For tight control over page level allocator and protection flags
 359 * use __vmalloc() instead.
 360 */
 361void *vzalloc_node(unsigned long size, int node)
 362{
 363        return vzalloc(size);
 364}
 365EXPORT_SYMBOL(vzalloc_node);
 366
 367#ifndef PAGE_KERNEL_EXEC
 368# define PAGE_KERNEL_EXEC PAGE_KERNEL
 369#endif
 370
 371/**
 372 *      vmalloc_exec  -  allocate virtually contiguous, executable memory
 373 *      @size:          allocation size
 374 *
 375 *      Kernel-internal function to allocate enough pages to cover @size
 376 *      the page level allocator and map them into contiguous and
 377 *      executable kernel virtual space.
 378 *
 379 *      For tight control over page level allocator and protection flags
 380 *      use __vmalloc() instead.
 381 */
 382
 383void *vmalloc_exec(unsigned long size)
 384{
 385        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
 386}
 387
 388/**
 389 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 390 *      @size:          allocation size
 391 *
 392 *      Allocate enough 32bit PA addressable pages to cover @size from the
 393 *      page level allocator and map them into continguos kernel virtual space.
 394 */
 395void *vmalloc_32(unsigned long size)
 396{
 397        return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
 398}
 399EXPORT_SYMBOL(vmalloc_32);
 400
 401/**
 402 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 403 *      @size:          allocation size
 404 *
 405 * The resulting memory area is 32bit addressable and zeroed so it can be
 406 * mapped to userspace without leaking data.
 407 *
 408 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 409 * remap_vmalloc_range() are permissible.
 410 */
 411void *vmalloc_32_user(unsigned long size)
 412{
 413        /*
 414         * We'll have to sort out the ZONE_DMA bits for 64-bit,
 415         * but for now this can simply use vmalloc_user() directly.
 416         */
 417        return vmalloc_user(size);
 418}
 419EXPORT_SYMBOL(vmalloc_32_user);
 420
 421void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
 422{
 423        BUG();
 424        return NULL;
 425}
 426EXPORT_SYMBOL(vmap);
 427
 428void vunmap(const void *addr)
 429{
 430        BUG();
 431}
 432EXPORT_SYMBOL(vunmap);
 433
 434void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
 435{
 436        BUG();
 437        return NULL;
 438}
 439EXPORT_SYMBOL(vm_map_ram);
 440
 441void vm_unmap_ram(const void *mem, unsigned int count)
 442{
 443        BUG();
 444}
 445EXPORT_SYMBOL(vm_unmap_ram);
 446
 447void vm_unmap_aliases(void)
 448{
 449}
 450EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 451
 452/*
 453 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 454 * have one.
 455 */
 456void  __attribute__((weak)) vmalloc_sync_all(void)
 457{
 458}
 459
 460/**
 461 *      alloc_vm_area - allocate a range of kernel address space
 462 *      @size:          size of the area
 463 *
 464 *      Returns:        NULL on failure, vm_struct on success
 465 *
 466 *      This function reserves a range of kernel address space, and
 467 *      allocates pagetables to map that range.  No actual mappings
 468 *      are created.  If the kernel address space is not shared
 469 *      between processes, it syncs the pagetable across all
 470 *      processes.
 471 */
 472struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
 473{
 474        BUG();
 475        return NULL;
 476}
 477EXPORT_SYMBOL_GPL(alloc_vm_area);
 478
 479void free_vm_area(struct vm_struct *area)
 480{
 481        BUG();
 482}
 483EXPORT_SYMBOL_GPL(free_vm_area);
 484
 485int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
 486                   struct page *page)
 487{
 488        return -EINVAL;
 489}
 490EXPORT_SYMBOL(vm_insert_page);
 491
 492/*
 493 *  sys_brk() for the most part doesn't need the global kernel
 494 *  lock, except when an application is doing something nasty
 495 *  like trying to un-brk an area that has already been mapped
 496 *  to a regular file.  in this case, the unmapping will need
 497 *  to invoke file system routines that need the global lock.
 498 */
 499SYSCALL_DEFINE1(brk, unsigned long, brk)
 500{
 501        struct mm_struct *mm = current->mm;
 502
 503        if (brk < mm->start_brk || brk > mm->context.end_brk)
 504                return mm->brk;
 505
 506        if (mm->brk == brk)
 507                return mm->brk;
 508
 509        /*
 510         * Always allow shrinking brk
 511         */
 512        if (brk <= mm->brk) {
 513                mm->brk = brk;
 514                return brk;
 515        }
 516
 517        /*
 518         * Ok, looks good - let it rip.
 519         */
 520        flush_icache_range(mm->brk, brk);
 521        return mm->brk = brk;
 522}
 523
 524/*
 525 * initialise the VMA and region record slabs
 526 */
 527void __init mmap_init(void)
 528{
 529        int ret;
 530
 531        ret = percpu_counter_init(&vm_committed_as, 0);
 532        VM_BUG_ON(ret);
 533        vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
 534}
 535
 536/*
 537 * validate the region tree
 538 * - the caller must hold the region lock
 539 */
 540#ifdef CONFIG_DEBUG_NOMMU_REGIONS
 541static noinline void validate_nommu_regions(void)
 542{
 543        struct vm_region *region, *last;
 544        struct rb_node *p, *lastp;
 545
 546        lastp = rb_first(&nommu_region_tree);
 547        if (!lastp)
 548                return;
 549
 550        last = rb_entry(lastp, struct vm_region, vm_rb);
 551        BUG_ON(unlikely(last->vm_end <= last->vm_start));
 552        BUG_ON(unlikely(last->vm_top < last->vm_end));
 553
 554        while ((p = rb_next(lastp))) {
 555                region = rb_entry(p, struct vm_region, vm_rb);
 556                last = rb_entry(lastp, struct vm_region, vm_rb);
 557
 558                BUG_ON(unlikely(region->vm_end <= region->vm_start));
 559                BUG_ON(unlikely(region->vm_top < region->vm_end));
 560                BUG_ON(unlikely(region->vm_start < last->vm_top));
 561
 562                lastp = p;
 563        }
 564}
 565#else
 566static void validate_nommu_regions(void)
 567{
 568}
 569#endif
 570
 571/*
 572 * add a region into the global tree
 573 */
 574static void add_nommu_region(struct vm_region *region)
 575{
 576        struct vm_region *pregion;
 577        struct rb_node **p, *parent;
 578
 579        validate_nommu_regions();
 580
 581        parent = NULL;
 582        p = &nommu_region_tree.rb_node;
 583        while (*p) {
 584                parent = *p;
 585                pregion = rb_entry(parent, struct vm_region, vm_rb);
 586                if (region->vm_start < pregion->vm_start)
 587                        p = &(*p)->rb_left;
 588                else if (region->vm_start > pregion->vm_start)
 589                        p = &(*p)->rb_right;
 590                else if (pregion == region)
 591                        return;
 592                else
 593                        BUG();
 594        }
 595
 596        rb_link_node(&region->vm_rb, parent, p);
 597        rb_insert_color(&region->vm_rb, &nommu_region_tree);
 598
 599        validate_nommu_regions();
 600}
 601
 602/*
 603 * delete a region from the global tree
 604 */
 605static void delete_nommu_region(struct vm_region *region)
 606{
 607        BUG_ON(!nommu_region_tree.rb_node);
 608
 609        validate_nommu_regions();
 610        rb_erase(&region->vm_rb, &nommu_region_tree);
 611        validate_nommu_regions();
 612}
 613
 614/*
 615 * free a contiguous series of pages
 616 */
 617static void free_page_series(unsigned long from, unsigned long to)
 618{
 619        for (; from < to; from += PAGE_SIZE) {
 620                struct page *page = virt_to_page(from);
 621
 622                kdebug("- free %lx", from);
 623                atomic_long_dec(&mmap_pages_allocated);
 624                if (page_count(page) != 1)
 625                        kdebug("free page %p: refcount not one: %d",
 626                               page, page_count(page));
 627                put_page(page);
 628        }
 629}
 630
 631/*
 632 * release a reference to a region
 633 * - the caller must hold the region semaphore for writing, which this releases
 634 * - the region may not have been added to the tree yet, in which case vm_top
 635 *   will equal vm_start
 636 */
 637static void __put_nommu_region(struct vm_region *region)
 638        __releases(nommu_region_sem)
 639{
 640        kenter("%p{%d}", region, region->vm_usage);
 641
 642        BUG_ON(!nommu_region_tree.rb_node);
 643
 644        if (--region->vm_usage == 0) {
 645                if (region->vm_top > region->vm_start)
 646                        delete_nommu_region(region);
 647                up_write(&nommu_region_sem);
 648
 649                if (region->vm_file)
 650                        fput(region->vm_file);
 651
 652                /* IO memory and memory shared directly out of the pagecache
 653                 * from ramfs/tmpfs mustn't be released here */
 654                if (region->vm_flags & VM_MAPPED_COPY) {
 655                        kdebug("free series");
 656                        free_page_series(region->vm_start, region->vm_top);
 657                }
 658                kmem_cache_free(vm_region_jar, region);
 659        } else {
 660                up_write(&nommu_region_sem);
 661        }
 662}
 663
 664/*
 665 * release a reference to a region
 666 */
 667static void put_nommu_region(struct vm_region *region)
 668{
 669        down_write(&nommu_region_sem);
 670        __put_nommu_region(region);
 671}
 672
 673/*
 674 * update protection on a vma
 675 */
 676static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
 677{
 678#ifdef CONFIG_MPU
 679        struct mm_struct *mm = vma->vm_mm;
 680        long start = vma->vm_start & PAGE_MASK;
 681        while (start < vma->vm_end) {
 682                protect_page(mm, start, flags);
 683                start += PAGE_SIZE;
 684        }
 685        update_protections(mm);
 686#endif
 687}
 688
 689/*
 690 * add a VMA into a process's mm_struct in the appropriate place in the list
 691 * and tree and add to the address space's page tree also if not an anonymous
 692 * page
 693 * - should be called with mm->mmap_sem held writelocked
 694 */
 695static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
 696{
 697        struct vm_area_struct *pvma, *prev;
 698        struct address_space *mapping;
 699        struct rb_node **p, *parent, *rb_prev;
 700
 701        kenter(",%p", vma);
 702
 703        BUG_ON(!vma->vm_region);
 704
 705        mm->map_count++;
 706        vma->vm_mm = mm;
 707
 708        protect_vma(vma, vma->vm_flags);
 709
 710        /* add the VMA to the mapping */
 711        if (vma->vm_file) {
 712                mapping = vma->vm_file->f_mapping;
 713
 714                mutex_lock(&mapping->i_mmap_mutex);
 715                flush_dcache_mmap_lock(mapping);
 716                vma_interval_tree_insert(vma, &mapping->i_mmap);
 717                flush_dcache_mmap_unlock(mapping);
 718                mutex_unlock(&mapping->i_mmap_mutex);
 719        }
 720
 721        /* add the VMA to the tree */
 722        parent = rb_prev = NULL;
 723        p = &mm->mm_rb.rb_node;
 724        while (*p) {
 725                parent = *p;
 726                pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
 727
 728                /* sort by: start addr, end addr, VMA struct addr in that order
 729                 * (the latter is necessary as we may get identical VMAs) */
 730                if (vma->vm_start < pvma->vm_start)
 731                        p = &(*p)->rb_left;
 732                else if (vma->vm_start > pvma->vm_start) {
 733                        rb_prev = parent;
 734                        p = &(*p)->rb_right;
 735                } else if (vma->vm_end < pvma->vm_end)
 736                        p = &(*p)->rb_left;
 737                else if (vma->vm_end > pvma->vm_end) {
 738                        rb_prev = parent;
 739                        p = &(*p)->rb_right;
 740                } else if (vma < pvma)
 741                        p = &(*p)->rb_left;
 742                else if (vma > pvma) {
 743                        rb_prev = parent;
 744                        p = &(*p)->rb_right;
 745                } else
 746                        BUG();
 747        }
 748
 749        rb_link_node(&vma->vm_rb, parent, p);
 750        rb_insert_color(&vma->vm_rb, &mm->mm_rb);
 751
 752        /* add VMA to the VMA list also */
 753        prev = NULL;
 754        if (rb_prev)
 755                prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
 756
 757        __vma_link_list(mm, vma, prev, parent);
 758}
 759
 760/*
 761 * delete a VMA from its owning mm_struct and address space
 762 */
 763static void delete_vma_from_mm(struct vm_area_struct *vma)
 764{
 765        struct address_space *mapping;
 766        struct mm_struct *mm = vma->vm_mm;
 767
 768        kenter("%p", vma);
 769
 770        protect_vma(vma, 0);
 771
 772        mm->map_count--;
 773        if (mm->mmap_cache == vma)
 774                mm->mmap_cache = NULL;
 775
 776        /* remove the VMA from the mapping */
 777        if (vma->vm_file) {
 778                mapping = vma->vm_file->f_mapping;
 779
 780                mutex_lock(&mapping->i_mmap_mutex);
 781                flush_dcache_mmap_lock(mapping);
 782                vma_interval_tree_remove(vma, &mapping->i_mmap);
 783                flush_dcache_mmap_unlock(mapping);
 784                mutex_unlock(&mapping->i_mmap_mutex);
 785        }
 786
 787        /* remove from the MM's tree and list */
 788        rb_erase(&vma->vm_rb, &mm->mm_rb);
 789
 790        if (vma->vm_prev)
 791                vma->vm_prev->vm_next = vma->vm_next;
 792        else
 793                mm->mmap = vma->vm_next;
 794
 795        if (vma->vm_next)
 796                vma->vm_next->vm_prev = vma->vm_prev;
 797}
 798
 799/*
 800 * destroy a VMA record
 801 */
 802static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 803{
 804        kenter("%p", vma);
 805        if (vma->vm_ops && vma->vm_ops->close)
 806                vma->vm_ops->close(vma);
 807        if (vma->vm_file)
 808                fput(vma->vm_file);
 809        put_nommu_region(vma->vm_region);
 810        kmem_cache_free(vm_area_cachep, vma);
 811}
 812
 813/*
 814 * look up the first VMA in which addr resides, NULL if none
 815 * - should be called with mm->mmap_sem at least held readlocked
 816 */
 817struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 818{
 819        struct vm_area_struct *vma;
 820
 821        /* check the cache first */
 822        vma = mm->mmap_cache;
 823        if (vma && vma->vm_start <= addr && vma->vm_end > addr)
 824                return vma;
 825
 826        /* trawl the list (there may be multiple mappings in which addr
 827         * resides) */
 828        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 829                if (vma->vm_start > addr)
 830                        return NULL;
 831                if (vma->vm_end > addr) {
 832                        mm->mmap_cache = vma;
 833                        return vma;
 834                }
 835        }
 836
 837        return NULL;
 838}
 839EXPORT_SYMBOL(find_vma);
 840
 841/*
 842 * find a VMA
 843 * - we don't extend stack VMAs under NOMMU conditions
 844 */
 845struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
 846{
 847        return find_vma(mm, addr);
 848}
 849
 850/*
 851 * expand a stack to a given address
 852 * - not supported under NOMMU conditions
 853 */
 854int expand_stack(struct vm_area_struct *vma, unsigned long address)
 855{
 856        return -ENOMEM;
 857}
 858
 859/*
 860 * look up the first VMA exactly that exactly matches addr
 861 * - should be called with mm->mmap_sem at least held readlocked
 862 */
 863static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
 864                                             unsigned long addr,
 865                                             unsigned long len)
 866{
 867        struct vm_area_struct *vma;
 868        unsigned long end = addr + len;
 869
 870        /* check the cache first */
 871        vma = mm->mmap_cache;
 872        if (vma && vma->vm_start == addr && vma->vm_end == end)
 873                return vma;
 874
 875        /* trawl the list (there may be multiple mappings in which addr
 876         * resides) */
 877        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 878                if (vma->vm_start < addr)
 879                        continue;
 880                if (vma->vm_start > addr)
 881                        return NULL;
 882                if (vma->vm_end == end) {
 883                        mm->mmap_cache = vma;
 884                        return vma;
 885                }
 886        }
 887
 888        return NULL;
 889}
 890
 891/*
 892 * determine whether a mapping should be permitted and, if so, what sort of
 893 * mapping we're capable of supporting
 894 */
 895static int validate_mmap_request(struct file *file,
 896                                 unsigned long addr,
 897                                 unsigned long len,
 898                                 unsigned long prot,
 899                                 unsigned long flags,
 900                                 unsigned long pgoff,
 901                                 unsigned long *_capabilities)
 902{
 903        unsigned long capabilities, rlen;
 904        int ret;
 905
 906        /* do the simple checks first */
 907        if (flags & MAP_FIXED) {
 908                printk(KERN_DEBUG
 909                       "%d: Can't do fixed-address/overlay mmap of RAM\n",
 910                       current->pid);
 911                return -EINVAL;
 912        }
 913
 914        if ((flags & MAP_TYPE) != MAP_PRIVATE &&
 915            (flags & MAP_TYPE) != MAP_SHARED)
 916                return -EINVAL;
 917
 918        if (!len)
 919                return -EINVAL;
 920
 921        /* Careful about overflows.. */
 922        rlen = PAGE_ALIGN(len);
 923        if (!rlen || rlen > TASK_SIZE)
 924                return -ENOMEM;
 925
 926        /* offset overflow? */
 927        if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
 928                return -EOVERFLOW;
 929
 930        if (file) {
 931                /* validate file mapping requests */
 932                struct address_space *mapping;
 933
 934                /* files must support mmap */
 935                if (!file->f_op || !file->f_op->mmap)
 936                        return -ENODEV;
 937
 938                /* work out if what we've got could possibly be shared
 939                 * - we support chardevs that provide their own "memory"
 940                 * - we support files/blockdevs that are memory backed
 941                 */
 942                mapping = file->f_mapping;
 943                if (!mapping)
 944                        mapping = file->f_path.dentry->d_inode->i_mapping;
 945
 946                capabilities = 0;
 947                if (mapping && mapping->backing_dev_info)
 948                        capabilities = mapping->backing_dev_info->capabilities;
 949
 950                if (!capabilities) {
 951                        /* no explicit capabilities set, so assume some
 952                         * defaults */
 953                        switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
 954                        case S_IFREG:
 955                        case S_IFBLK:
 956                                capabilities = BDI_CAP_MAP_COPY;
 957                                break;
 958
 959                        case S_IFCHR:
 960                                capabilities =
 961                                        BDI_CAP_MAP_DIRECT |
 962                                        BDI_CAP_READ_MAP |
 963                                        BDI_CAP_WRITE_MAP;
 964                                break;
 965
 966                        default:
 967                                return -EINVAL;
 968                        }
 969                }
 970
 971                /* eliminate any capabilities that we can't support on this
 972                 * device */
 973                if (!file->f_op->get_unmapped_area)
 974                        capabilities &= ~BDI_CAP_MAP_DIRECT;
 975                if (!file->f_op->read)
 976                        capabilities &= ~BDI_CAP_MAP_COPY;
 977
 978                /* The file shall have been opened with read permission. */
 979                if (!(file->f_mode & FMODE_READ))
 980                        return -EACCES;
 981
 982                if (flags & MAP_SHARED) {
 983                        /* do checks for writing, appending and locking */
 984                        if ((prot & PROT_WRITE) &&
 985                            !(file->f_mode & FMODE_WRITE))
 986                                return -EACCES;
 987
 988                        if (IS_APPEND(file->f_path.dentry->d_inode) &&
 989                            (file->f_mode & FMODE_WRITE))
 990                                return -EACCES;
 991
 992                        if (locks_verify_locked(file->f_path.dentry->d_inode))
 993                                return -EAGAIN;
 994
 995                        if (!(capabilities & BDI_CAP_MAP_DIRECT))
 996                                return -ENODEV;
 997
 998                        /* we mustn't privatise shared mappings */
 999                        capabilities &= ~BDI_CAP_MAP_COPY;
1000                }
1001                else {
1002                        /* we're going to read the file into private memory we
1003                         * allocate */
1004                        if (!(capabilities & BDI_CAP_MAP_COPY))
1005                                return -ENODEV;
1006
1007                        /* we don't permit a private writable mapping to be
1008                         * shared with the backing device */
1009                        if (prot & PROT_WRITE)
1010                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1011                }
1012
1013                if (capabilities & BDI_CAP_MAP_DIRECT) {
1014                        if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
1015                            ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
1016                            ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
1017                            ) {
1018                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1019                                if (flags & MAP_SHARED) {
1020                                        printk(KERN_WARNING
1021                                               "MAP_SHARED not completely supported on !MMU\n");
1022                                        return -EINVAL;
1023                                }
1024                        }
1025                }
1026
1027                /* handle executable mappings and implied executable
1028                 * mappings */
1029                if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1030                        if (prot & PROT_EXEC)
1031                                return -EPERM;
1032                }
1033                else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
1034                        /* handle implication of PROT_EXEC by PROT_READ */
1035                        if (current->personality & READ_IMPLIES_EXEC) {
1036                                if (capabilities & BDI_CAP_EXEC_MAP)
1037                                        prot |= PROT_EXEC;
1038                        }
1039                }
1040                else if ((prot & PROT_READ) &&
1041                         (prot & PROT_EXEC) &&
1042                         !(capabilities & BDI_CAP_EXEC_MAP)
1043                         ) {
1044                        /* backing file is not executable, try to copy */
1045                        capabilities &= ~BDI_CAP_MAP_DIRECT;
1046                }
1047        }
1048        else {
1049                /* anonymous mappings are always memory backed and can be
1050                 * privately mapped
1051                 */
1052                capabilities = BDI_CAP_MAP_COPY;
1053
1054                /* handle PROT_EXEC implication by PROT_READ */
1055                if ((prot & PROT_READ) &&
1056                    (current->personality & READ_IMPLIES_EXEC))
1057                        prot |= PROT_EXEC;
1058        }
1059
1060        /* allow the security API to have its say */
1061        ret = security_mmap_addr(addr);
1062        if (ret < 0)
1063                return ret;
1064
1065        /* looks okay */
1066        *_capabilities = capabilities;
1067        return 0;
1068}
1069
1070/*
1071 * we've determined that we can make the mapping, now translate what we
1072 * now know into VMA flags
1073 */
1074static unsigned long determine_vm_flags(struct file *file,
1075                                        unsigned long prot,
1076                                        unsigned long flags,
1077                                        unsigned long capabilities)
1078{
1079        unsigned long vm_flags;
1080
1081        vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1082        /* vm_flags |= mm->def_flags; */
1083
1084        if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1085                /* attempt to share read-only copies of mapped file chunks */
1086                vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1087                if (file && !(prot & PROT_WRITE))
1088                        vm_flags |= VM_MAYSHARE;
1089        } else {
1090                /* overlay a shareable mapping on the backing device or inode
1091                 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1092                 * romfs/cramfs */
1093                vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1094                if (flags & MAP_SHARED)
1095                        vm_flags |= VM_SHARED;
1096        }
1097
1098        /* refuse to let anyone share private mappings with this process if
1099         * it's being traced - otherwise breakpoints set in it may interfere
1100         * with another untraced process
1101         */
1102        if ((flags & MAP_PRIVATE) && current->ptrace)
1103                vm_flags &= ~VM_MAYSHARE;
1104
1105        return vm_flags;
1106}
1107
1108/*
1109 * set up a shared mapping on a file (the driver or filesystem provides and
1110 * pins the storage)
1111 */
1112static int do_mmap_shared_file(struct vm_area_struct *vma)
1113{
1114        int ret;
1115
1116        ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1117        if (ret == 0) {
1118                vma->vm_region->vm_top = vma->vm_region->vm_end;
1119                return 0;
1120        }
1121        if (ret != -ENOSYS)
1122                return ret;
1123
1124        /* getting -ENOSYS indicates that direct mmap isn't possible (as
1125         * opposed to tried but failed) so we can only give a suitable error as
1126         * it's not possible to make a private copy if MAP_SHARED was given */
1127        return -ENODEV;
1128}
1129
1130/*
1131 * set up a private mapping or an anonymous shared mapping
1132 */
1133static int do_mmap_private(struct vm_area_struct *vma,
1134                           struct vm_region *region,
1135                           unsigned long len,
1136                           unsigned long capabilities)
1137{
1138        struct page *pages;
1139        unsigned long total, point, n;
1140        void *base;
1141        int ret, order;
1142
1143        /* invoke the file's mapping function so that it can keep track of
1144         * shared mappings on devices or memory
1145         * - VM_MAYSHARE will be set if it may attempt to share
1146         */
1147        if (capabilities & BDI_CAP_MAP_DIRECT) {
1148                ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1149                if (ret == 0) {
1150                        /* shouldn't return success if we're not sharing */
1151                        BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1152                        vma->vm_region->vm_top = vma->vm_region->vm_end;
1153                        return 0;
1154                }
1155                if (ret != -ENOSYS)
1156                        return ret;
1157
1158                /* getting an ENOSYS error indicates that direct mmap isn't
1159                 * possible (as opposed to tried but failed) so we'll try to
1160                 * make a private copy of the data and map that instead */
1161        }
1162
1163
1164        /* allocate some memory to hold the mapping
1165         * - note that this may not return a page-aligned address if the object
1166         *   we're allocating is smaller than a page
1167         */
1168        order = get_order(len);
1169        kdebug("alloc order %d for %lx", order, len);
1170
1171        pages = alloc_pages(GFP_KERNEL, order);
1172        if (!pages)
1173                goto enomem;
1174
1175        total = 1 << order;
1176        atomic_long_add(total, &mmap_pages_allocated);
1177
1178        point = len >> PAGE_SHIFT;
1179
1180        /* we allocated a power-of-2 sized page set, so we may want to trim off
1181         * the excess */
1182        if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1183                while (total > point) {
1184                        order = ilog2(total - point);
1185                        n = 1 << order;
1186                        kdebug("shave %lu/%lu @%lu", n, total - point, total);
1187                        atomic_long_sub(n, &mmap_pages_allocated);
1188                        total -= n;
1189                        set_page_refcounted(pages + total);
1190                        __free_pages(pages + total, order);
1191                }
1192        }
1193
1194        for (point = 1; point < total; point++)
1195                set_page_refcounted(&pages[point]);
1196
1197        base = page_address(pages);
1198        region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1199        region->vm_start = (unsigned long) base;
1200        region->vm_end   = region->vm_start + len;
1201        region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1202
1203        vma->vm_start = region->vm_start;
1204        vma->vm_end   = region->vm_start + len;
1205
1206        if (vma->vm_file) {
1207                /* read the contents of a file into the copy */
1208                mm_segment_t old_fs;
1209                loff_t fpos;
1210
1211                fpos = vma->vm_pgoff;
1212                fpos <<= PAGE_SHIFT;
1213
1214                old_fs = get_fs();
1215                set_fs(KERNEL_DS);
1216                ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
1217                set_fs(old_fs);
1218
1219                if (ret < 0)
1220                        goto error_free;
1221
1222                /* clear the last little bit */
1223                if (ret < len)
1224                        memset(base + ret, 0, len - ret);
1225
1226        }
1227
1228        return 0;
1229
1230error_free:
1231        free_page_series(region->vm_start, region->vm_top);
1232        region->vm_start = vma->vm_start = 0;
1233        region->vm_end   = vma->vm_end = 0;
1234        region->vm_top   = 0;
1235        return ret;
1236
1237enomem:
1238        printk("Allocation of length %lu from process %d (%s) failed\n",
1239               len, current->pid, current->comm);
1240        show_free_areas(0);
1241        return -ENOMEM;
1242}
1243
1244/*
1245 * handle mapping creation for uClinux
1246 */
1247unsigned long do_mmap_pgoff(struct file *file,
1248                            unsigned long addr,
1249                            unsigned long len,
1250                            unsigned long prot,
1251                            unsigned long flags,
1252                            unsigned long pgoff)
1253{
1254        struct vm_area_struct *vma;
1255        struct vm_region *region;
1256        struct rb_node *rb;
1257        unsigned long capabilities, vm_flags, result;
1258        int ret;
1259
1260        kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1261
1262        /* decide whether we should attempt the mapping, and if so what sort of
1263         * mapping */
1264        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1265                                    &capabilities);
1266        if (ret < 0) {
1267                kleave(" = %d [val]", ret);
1268                return ret;
1269        }
1270
1271        /* we ignore the address hint */
1272        addr = 0;
1273        len = PAGE_ALIGN(len);
1274
1275        /* we've determined that we can make the mapping, now translate what we
1276         * now know into VMA flags */
1277        vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1278
1279        /* we're going to need to record the mapping */
1280        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1281        if (!region)
1282                goto error_getting_region;
1283
1284        vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1285        if (!vma)
1286                goto error_getting_vma;
1287
1288        region->vm_usage = 1;
1289        region->vm_flags = vm_flags;
1290        region->vm_pgoff = pgoff;
1291
1292        INIT_LIST_HEAD(&vma->anon_vma_chain);
1293        vma->vm_flags = vm_flags;
1294        vma->vm_pgoff = pgoff;
1295
1296        if (file) {
1297                region->vm_file = get_file(file);
1298                vma->vm_file = get_file(file);
1299        }
1300
1301        down_write(&nommu_region_sem);
1302
1303        /* if we want to share, we need to check for regions created by other
1304         * mmap() calls that overlap with our proposed mapping
1305         * - we can only share with a superset match on most regular files
1306         * - shared mappings on character devices and memory backed files are
1307         *   permitted to overlap inexactly as far as we are concerned for in
1308         *   these cases, sharing is handled in the driver or filesystem rather
1309         *   than here
1310         */
1311        if (vm_flags & VM_MAYSHARE) {
1312                struct vm_region *pregion;
1313                unsigned long pglen, rpglen, pgend, rpgend, start;
1314
1315                pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1316                pgend = pgoff + pglen;
1317
1318                for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1319                        pregion = rb_entry(rb, struct vm_region, vm_rb);
1320
1321                        if (!(pregion->vm_flags & VM_MAYSHARE))
1322                                continue;
1323
1324                        /* search for overlapping mappings on the same file */
1325                        if (pregion->vm_file->f_path.dentry->d_inode !=
1326                            file->f_path.dentry->d_inode)
1327                                continue;
1328
1329                        if (pregion->vm_pgoff >= pgend)
1330                                continue;
1331
1332                        rpglen = pregion->vm_end - pregion->vm_start;
1333                        rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1334                        rpgend = pregion->vm_pgoff + rpglen;
1335                        if (pgoff >= rpgend)
1336                                continue;
1337
1338                        /* handle inexactly overlapping matches between
1339                         * mappings */
1340                        if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1341                            !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1342                                /* new mapping is not a subset of the region */
1343                                if (!(capabilities & BDI_CAP_MAP_DIRECT))
1344                                        goto sharing_violation;
1345                                continue;
1346                        }
1347
1348                        /* we've found a region we can share */
1349                        pregion->vm_usage++;
1350                        vma->vm_region = pregion;
1351                        start = pregion->vm_start;
1352                        start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1353                        vma->vm_start = start;
1354                        vma->vm_end = start + len;
1355
1356                        if (pregion->vm_flags & VM_MAPPED_COPY) {
1357                                kdebug("share copy");
1358                                vma->vm_flags |= VM_MAPPED_COPY;
1359                        } else {
1360                                kdebug("share mmap");
1361                                ret = do_mmap_shared_file(vma);
1362                                if (ret < 0) {
1363                                        vma->vm_region = NULL;
1364                                        vma->vm_start = 0;
1365                                        vma->vm_end = 0;
1366                                        pregion->vm_usage--;
1367                                        pregion = NULL;
1368                                        goto error_just_free;
1369                                }
1370                        }
1371                        fput(region->vm_file);
1372                        kmem_cache_free(vm_region_jar, region);
1373                        region = pregion;
1374                        result = start;
1375                        goto share;
1376                }
1377
1378                /* obtain the address at which to make a shared mapping
1379                 * - this is the hook for quasi-memory character devices to
1380                 *   tell us the location of a shared mapping
1381                 */
1382                if (capabilities & BDI_CAP_MAP_DIRECT) {
1383                        addr = file->f_op->get_unmapped_area(file, addr, len,
1384                                                             pgoff, flags);
1385                        if (IS_ERR_VALUE(addr)) {
1386                                ret = addr;
1387                                if (ret != -ENOSYS)
1388                                        goto error_just_free;
1389
1390                                /* the driver refused to tell us where to site
1391                                 * the mapping so we'll have to attempt to copy
1392                                 * it */
1393                                ret = -ENODEV;
1394                                if (!(capabilities & BDI_CAP_MAP_COPY))
1395                                        goto error_just_free;
1396
1397                                capabilities &= ~BDI_CAP_MAP_DIRECT;
1398                        } else {
1399                                vma->vm_start = region->vm_start = addr;
1400                                vma->vm_end = region->vm_end = addr + len;
1401                        }
1402                }
1403        }
1404
1405        vma->vm_region = region;
1406
1407        /* set up the mapping
1408         * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1409         */
1410        if (file && vma->vm_flags & VM_SHARED)
1411                ret = do_mmap_shared_file(vma);
1412        else
1413                ret = do_mmap_private(vma, region, len, capabilities);
1414        if (ret < 0)
1415                goto error_just_free;
1416        add_nommu_region(region);
1417
1418        /* clear anonymous mappings that don't ask for uninitialized data */
1419        if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1420                memset((void *)region->vm_start, 0,
1421                       region->vm_end - region->vm_start);
1422
1423        /* okay... we have a mapping; now we have to register it */
1424        result = vma->vm_start;
1425
1426        current->mm->total_vm += len >> PAGE_SHIFT;
1427
1428share:
1429        add_vma_to_mm(current->mm, vma);
1430
1431        /* we flush the region from the icache only when the first executable
1432         * mapping of it is made  */
1433        if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1434                flush_icache_range(region->vm_start, region->vm_end);
1435                region->vm_icache_flushed = true;
1436        }
1437
1438        up_write(&nommu_region_sem);
1439
1440        kleave(" = %lx", result);
1441        return result;
1442
1443error_just_free:
1444        up_write(&nommu_region_sem);
1445error:
1446        if (region->vm_file)
1447                fput(region->vm_file);
1448        kmem_cache_free(vm_region_jar, region);
1449        if (vma->vm_file)
1450                fput(vma->vm_file);
1451        kmem_cache_free(vm_area_cachep, vma);
1452        kleave(" = %d", ret);
1453        return ret;
1454
1455sharing_violation:
1456        up_write(&nommu_region_sem);
1457        printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1458        ret = -EINVAL;
1459        goto error;
1460
1461error_getting_vma:
1462        kmem_cache_free(vm_region_jar, region);
1463        printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1464               " from process %d failed\n",
1465               len, current->pid);
1466        show_free_areas(0);
1467        return -ENOMEM;
1468
1469error_getting_region:
1470        printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1471               " from process %d failed\n",
1472               len, current->pid);
1473        show_free_areas(0);
1474        return -ENOMEM;
1475}
1476
1477SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1478                unsigned long, prot, unsigned long, flags,
1479                unsigned long, fd, unsigned long, pgoff)
1480{
1481        struct file *file = NULL;
1482        unsigned long retval = -EBADF;
1483
1484        audit_mmap_fd(fd, flags);
1485        if (!(flags & MAP_ANONYMOUS)) {
1486                file = fget(fd);
1487                if (!file)
1488                        goto out;
1489        }
1490
1491        flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1492
1493        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1494
1495        if (file)
1496                fput(file);
1497out:
1498        return retval;
1499}
1500
1501#ifdef __ARCH_WANT_SYS_OLD_MMAP
1502struct mmap_arg_struct {
1503        unsigned long addr;
1504        unsigned long len;
1505        unsigned long prot;
1506        unsigned long flags;
1507        unsigned long fd;
1508        unsigned long offset;
1509};
1510
1511SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1512{
1513        struct mmap_arg_struct a;
1514
1515        if (copy_from_user(&a, arg, sizeof(a)))
1516                return -EFAULT;
1517        if (a.offset & ~PAGE_MASK)
1518                return -EINVAL;
1519
1520        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1521                              a.offset >> PAGE_SHIFT);
1522}
1523#endif /* __ARCH_WANT_SYS_OLD_MMAP */
1524
1525/*
1526 * split a vma into two pieces at address 'addr', a new vma is allocated either
1527 * for the first part or the tail.
1528 */
1529int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1530              unsigned long addr, int new_below)
1531{
1532        struct vm_area_struct *new;
1533        struct vm_region *region;
1534        unsigned long npages;
1535
1536        kenter("");
1537
1538        /* we're only permitted to split anonymous regions (these should have
1539         * only a single usage on the region) */
1540        if (vma->vm_file)
1541                return -ENOMEM;
1542
1543        if (mm->map_count >= sysctl_max_map_count)
1544                return -ENOMEM;
1545
1546        region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1547        if (!region)
1548                return -ENOMEM;
1549
1550        new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1551        if (!new) {
1552                kmem_cache_free(vm_region_jar, region);
1553                return -ENOMEM;
1554        }
1555
1556        /* most fields are the same, copy all, and then fixup */
1557        *new = *vma;
1558        *region = *vma->vm_region;
1559        new->vm_region = region;
1560
1561        npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1562
1563        if (new_below) {
1564                region->vm_top = region->vm_end = new->vm_end = addr;
1565        } else {
1566                region->vm_start = new->vm_start = addr;
1567                region->vm_pgoff = new->vm_pgoff += npages;
1568        }
1569
1570        if (new->vm_ops && new->vm_ops->open)
1571                new->vm_ops->open(new);
1572
1573        delete_vma_from_mm(vma);
1574        down_write(&nommu_region_sem);
1575        delete_nommu_region(vma->vm_region);
1576        if (new_below) {
1577                vma->vm_region->vm_start = vma->vm_start = addr;
1578                vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1579        } else {
1580                vma->vm_region->vm_end = vma->vm_end = addr;
1581                vma->vm_region->vm_top = addr;
1582        }
1583        add_nommu_region(vma->vm_region);
1584        add_nommu_region(new->vm_region);
1585        up_write(&nommu_region_sem);
1586        add_vma_to_mm(mm, vma);
1587        add_vma_to_mm(mm, new);
1588        return 0;
1589}
1590
1591/*
1592 * shrink a VMA by removing the specified chunk from either the beginning or
1593 * the end
1594 */
1595static int shrink_vma(struct mm_struct *mm,
1596                      struct vm_area_struct *vma,
1597                      unsigned long from, unsigned long to)
1598{
1599        struct vm_region *region;
1600
1601        kenter("");
1602
1603        /* adjust the VMA's pointers, which may reposition it in the MM's tree
1604         * and list */
1605        delete_vma_from_mm(vma);
1606        if (from > vma->vm_start)
1607                vma->vm_end = from;
1608        else
1609                vma->vm_start = to;
1610        add_vma_to_mm(mm, vma);
1611
1612        /* cut the backing region down to size */
1613        region = vma->vm_region;
1614        BUG_ON(region->vm_usage != 1);
1615
1616        down_write(&nommu_region_sem);
1617        delete_nommu_region(region);
1618        if (from > region->vm_start) {
1619                to = region->vm_top;
1620                region->vm_top = region->vm_end = from;
1621        } else {
1622                region->vm_start = to;
1623        }
1624        add_nommu_region(region);
1625        up_write(&nommu_region_sem);
1626
1627        free_page_series(from, to);
1628        return 0;
1629}
1630
1631/*
1632 * release a mapping
1633 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1634 *   VMA, though it need not cover the whole VMA
1635 */
1636int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1637{
1638        struct vm_area_struct *vma;
1639        unsigned long end;
1640        int ret;
1641
1642        kenter(",%lx,%zx", start, len);
1643
1644        len = PAGE_ALIGN(len);
1645        if (len == 0)
1646                return -EINVAL;
1647
1648        end = start + len;
1649
1650        /* find the first potentially overlapping VMA */
1651        vma = find_vma(mm, start);
1652        if (!vma) {
1653                static int limit = 0;
1654                if (limit < 5) {
1655                        printk(KERN_WARNING
1656                               "munmap of memory not mmapped by process %d"
1657                               " (%s): 0x%lx-0x%lx\n",
1658                               current->pid, current->comm,
1659                               start, start + len - 1);
1660                        limit++;
1661                }
1662                return -EINVAL;
1663        }
1664
1665        /* we're allowed to split an anonymous VMA but not a file-backed one */
1666        if (vma->vm_file) {
1667                do {
1668                        if (start > vma->vm_start) {
1669                                kleave(" = -EINVAL [miss]");
1670                                return -EINVAL;
1671                        }
1672                        if (end == vma->vm_end)
1673                                goto erase_whole_vma;
1674                        vma = vma->vm_next;
1675                } while (vma);
1676                kleave(" = -EINVAL [split file]");
1677                return -EINVAL;
1678        } else {
1679                /* the chunk must be a subset of the VMA found */
1680                if (start == vma->vm_start && end == vma->vm_end)
1681                        goto erase_whole_vma;
1682                if (start < vma->vm_start || end > vma->vm_end) {
1683                        kleave(" = -EINVAL [superset]");
1684                        return -EINVAL;
1685                }
1686                if (start & ~PAGE_MASK) {
1687                        kleave(" = -EINVAL [unaligned start]");
1688                        return -EINVAL;
1689                }
1690                if (end != vma->vm_end && end & ~PAGE_MASK) {
1691                        kleave(" = -EINVAL [unaligned split]");
1692                        return -EINVAL;
1693                }
1694                if (start != vma->vm_start && end != vma->vm_end) {
1695                        ret = split_vma(mm, vma, start, 1);
1696                        if (ret < 0) {
1697                                kleave(" = %d [split]", ret);
1698                                return ret;
1699                        }
1700                }
1701                return shrink_vma(mm, vma, start, end);
1702        }
1703
1704erase_whole_vma:
1705        delete_vma_from_mm(vma);
1706        delete_vma(mm, vma);
1707        kleave(" = 0");
1708        return 0;
1709}
1710EXPORT_SYMBOL(do_munmap);
1711
1712int vm_munmap(unsigned long addr, size_t len)
1713{
1714        struct mm_struct *mm = current->mm;
1715        int ret;
1716
1717        down_write(&mm->mmap_sem);
1718        ret = do_munmap(mm, addr, len);
1719        up_write(&mm->mmap_sem);
1720        return ret;
1721}
1722EXPORT_SYMBOL(vm_munmap);
1723
1724SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1725{
1726        return vm_munmap(addr, len);
1727}
1728
1729/*
1730 * release all the mappings made in a process's VM space
1731 */
1732void exit_mmap(struct mm_struct *mm)
1733{
1734        struct vm_area_struct *vma;
1735
1736        if (!mm)
1737                return;
1738
1739        kenter("");
1740
1741        mm->total_vm = 0;
1742
1743        while ((vma = mm->mmap)) {
1744                mm->mmap = vma->vm_next;
1745                delete_vma_from_mm(vma);
1746                delete_vma(mm, vma);
1747                cond_resched();
1748        }
1749
1750        kleave("");
1751}
1752
1753unsigned long vm_brk(unsigned long addr, unsigned long len)
1754{
1755        return -ENOMEM;
1756}
1757
1758/*
1759 * expand (or shrink) an existing mapping, potentially moving it at the same
1760 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1761 *
1762 * under NOMMU conditions, we only permit changing a mapping's size, and only
1763 * as long as it stays within the region allocated by do_mmap_private() and the
1764 * block is not shareable
1765 *
1766 * MREMAP_FIXED is not supported under NOMMU conditions
1767 */
1768unsigned long do_mremap(unsigned long addr,
1769                        unsigned long old_len, unsigned long new_len,
1770                        unsigned long flags, unsigned long new_addr)
1771{
1772        struct vm_area_struct *vma;
1773
1774        /* insanity checks first */
1775        old_len = PAGE_ALIGN(old_len);
1776        new_len = PAGE_ALIGN(new_len);
1777        if (old_len == 0 || new_len == 0)
1778                return (unsigned long) -EINVAL;
1779
1780        if (addr & ~PAGE_MASK)
1781                return -EINVAL;
1782
1783        if (flags & MREMAP_FIXED && new_addr != addr)
1784                return (unsigned long) -EINVAL;
1785
1786        vma = find_vma_exact(current->mm, addr, old_len);
1787        if (!vma)
1788                return (unsigned long) -EINVAL;
1789
1790        if (vma->vm_end != vma->vm_start + old_len)
1791                return (unsigned long) -EFAULT;
1792
1793        if (vma->vm_flags & VM_MAYSHARE)
1794                return (unsigned long) -EPERM;
1795
1796        if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1797                return (unsigned long) -ENOMEM;
1798
1799        /* all checks complete - do it */
1800        vma->vm_end = vma->vm_start + new_len;
1801        return vma->vm_start;
1802}
1803EXPORT_SYMBOL(do_mremap);
1804
1805SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1806                unsigned long, new_len, unsigned long, flags,
1807                unsigned long, new_addr)
1808{
1809        unsigned long ret;
1810
1811        down_write(&current->mm->mmap_sem);
1812        ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1813        up_write(&current->mm->mmap_sem);
1814        return ret;
1815}
1816
1817struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1818                        unsigned int foll_flags)
1819{
1820        return NULL;
1821}
1822
1823int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1824                unsigned long pfn, unsigned long size, pgprot_t prot)
1825{
1826        if (addr != (pfn << PAGE_SHIFT))
1827                return -EINVAL;
1828
1829        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1830        return 0;
1831}
1832EXPORT_SYMBOL(remap_pfn_range);
1833
1834int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1835                        unsigned long pgoff)
1836{
1837        unsigned int size = vma->vm_end - vma->vm_start;
1838
1839        if (!(vma->vm_flags & VM_USERMAP))
1840                return -EINVAL;
1841
1842        vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1843        vma->vm_end = vma->vm_start + size;
1844
1845        return 0;
1846}
1847EXPORT_SYMBOL(remap_vmalloc_range);
1848
1849unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1850        unsigned long len, unsigned long pgoff, unsigned long flags)
1851{
1852        return -ENOMEM;
1853}
1854
1855void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1856{
1857}
1858
1859void unmap_mapping_range(struct address_space *mapping,
1860                         loff_t const holebegin, loff_t const holelen,
1861                         int even_cows)
1862{
1863}
1864EXPORT_SYMBOL(unmap_mapping_range);
1865
1866/*
1867 * Check that a process has enough memory to allocate a new virtual
1868 * mapping. 0 means there is enough memory for the allocation to
1869 * succeed and -ENOMEM implies there is not.
1870 *
1871 * We currently support three overcommit policies, which are set via the
1872 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1873 *
1874 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1875 * Additional code 2002 Jul 20 by Robert Love.
1876 *
1877 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1878 *
1879 * Note this is a helper function intended to be used by LSMs which
1880 * wish to use this logic.
1881 */
1882int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1883{
1884        unsigned long free, allowed;
1885
1886        vm_acct_memory(pages);
1887
1888        /*
1889         * Sometimes we want to use more memory than we have
1890         */
1891        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1892                return 0;
1893
1894        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1895                free = global_page_state(NR_FREE_PAGES);
1896                free += global_page_state(NR_FILE_PAGES);
1897
1898                /*
1899                 * shmem pages shouldn't be counted as free in this
1900                 * case, they can't be purged, only swapped out, and
1901                 * that won't affect the overall amount of available
1902                 * memory in the system.
1903                 */
1904                free -= global_page_state(NR_SHMEM);
1905
1906                free += nr_swap_pages;
1907
1908                /*
1909                 * Any slabs which are created with the
1910                 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1911                 * which are reclaimable, under pressure.  The dentry
1912                 * cache and most inode caches should fall into this
1913                 */
1914                free += global_page_state(NR_SLAB_RECLAIMABLE);
1915
1916                /*
1917                 * Leave reserved pages. The pages are not for anonymous pages.
1918                 */
1919                if (free <= totalreserve_pages)
1920                        goto error;
1921                else
1922                        free -= totalreserve_pages;
1923
1924                /*
1925                 * Leave the last 3% for root
1926                 */
1927                if (!cap_sys_admin)
1928                        free -= free / 32;
1929
1930                if (free > pages)
1931                        return 0;
1932
1933                goto error;
1934        }
1935
1936        allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1937        /*
1938         * Leave the last 3% for root
1939         */
1940        if (!cap_sys_admin)
1941                allowed -= allowed / 32;
1942        allowed += total_swap_pages;
1943
1944        /* Don't let a single process grow too big:
1945           leave 3% of the size of this process for other processes */
1946        if (mm)
1947                allowed -= mm->total_vm / 32;
1948
1949        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1950                return 0;
1951
1952error:
1953        vm_unacct_memory(pages);
1954
1955        return -ENOMEM;
1956}
1957
1958int in_gate_area_no_mm(unsigned long addr)
1959{
1960        return 0;
1961}
1962
1963int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1964{
1965        BUG();
1966        return 0;
1967}
1968EXPORT_SYMBOL(filemap_fault);
1969
1970int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
1971                             unsigned long size, pgoff_t pgoff)
1972{
1973        BUG();
1974        return 0;
1975}
1976EXPORT_SYMBOL(generic_file_remap_pages);
1977
1978static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1979                unsigned long addr, void *buf, int len, int write)
1980{
1981        struct vm_area_struct *vma;
1982
1983        down_read(&mm->mmap_sem);
1984
1985        /* the access must start within one of the target process's mappings */
1986        vma = find_vma(mm, addr);
1987        if (vma) {
1988                /* don't overrun this mapping */
1989                if (addr + len >= vma->vm_end)
1990                        len = vma->vm_end - addr;
1991
1992                /* only read or write mappings where it is permitted */
1993                if (write && vma->vm_flags & VM_MAYWRITE)
1994                        copy_to_user_page(vma, NULL, addr,
1995                                         (void *) addr, buf, len);
1996                else if (!write && vma->vm_flags & VM_MAYREAD)
1997                        copy_from_user_page(vma, NULL, addr,
1998                                            buf, (void *) addr, len);
1999                else
2000                        len = 0;
2001        } else {
2002                len = 0;
2003        }
2004
2005        up_read(&mm->mmap_sem);
2006
2007        return len;
2008}
2009
2010/**
2011 * @access_remote_vm - access another process' address space
2012 * @mm:         the mm_struct of the target address space
2013 * @addr:       start address to access
2014 * @buf:        source or destination buffer
2015 * @len:        number of bytes to transfer
2016 * @write:      whether the access is a write
2017 *
2018 * The caller must hold a reference on @mm.
2019 */
2020int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2021                void *buf, int len, int write)
2022{
2023        return __access_remote_vm(NULL, mm, addr, buf, len, write);
2024}
2025
2026/*
2027 * Access another process' address space.
2028 * - source/target buffer must be kernel space
2029 */
2030int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2031{
2032        struct mm_struct *mm;
2033
2034        if (addr + len < addr)
2035                return 0;
2036
2037        mm = get_task_mm(tsk);
2038        if (!mm)
2039                return 0;
2040
2041        len = __access_remote_vm(tsk, mm, addr, buf, len, write);
2042
2043        mmput(mm);
2044        return len;
2045}
2046
2047/**
2048 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
2049 * @inode: The inode to check
2050 * @size: The current filesize of the inode
2051 * @newsize: The proposed filesize of the inode
2052 *
2053 * Check the shared mappings on an inode on behalf of a shrinking truncate to
2054 * make sure that that any outstanding VMAs aren't broken and then shrink the
2055 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
2056 * automatically grant mappings that are too large.
2057 */
2058int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2059                                size_t newsize)
2060{
2061        struct vm_area_struct *vma;
2062        struct vm_region *region;
2063        pgoff_t low, high;
2064        size_t r_size, r_top;
2065
2066        low = newsize >> PAGE_SHIFT;
2067        high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2068
2069        down_write(&nommu_region_sem);
2070        mutex_lock(&inode->i_mapping->i_mmap_mutex);
2071
2072        /* search for VMAs that fall within the dead zone */
2073        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2074                /* found one - only interested if it's shared out of the page
2075                 * cache */
2076                if (vma->vm_flags & VM_SHARED) {
2077                        mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2078                        up_write(&nommu_region_sem);
2079                        return -ETXTBSY; /* not quite true, but near enough */
2080                }
2081        }
2082
2083        /* reduce any regions that overlap the dead zone - if in existence,
2084         * these will be pointed to by VMAs that don't overlap the dead zone
2085         *
2086         * we don't check for any regions that start beyond the EOF as there
2087         * shouldn't be any
2088         */
2089        vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
2090                                  0, ULONG_MAX) {
2091                if (!(vma->vm_flags & VM_SHARED))
2092                        continue;
2093
2094                region = vma->vm_region;
2095                r_size = region->vm_top - region->vm_start;
2096                r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2097
2098                if (r_top > newsize) {
2099                        region->vm_top -= r_top - newsize;
2100                        if (region->vm_end > region->vm_top)
2101                                region->vm_end = region->vm_top;
2102                }
2103        }
2104
2105        mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2106        up_write(&nommu_region_sem);
2107        return 0;
2108}
2109
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.