linux/include/linux/mm.h
<<
>>
Prefs
   1#ifndef _LINUX_MM_H
   2#define _LINUX_MM_H
   3
   4#include <linux/errno.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <linux/gfp.h>
   9#include <linux/list.h>
  10#include <linux/mmdebug.h>
  11#include <linux/mmzone.h>
  12#include <linux/rbtree.h>
  13#include <linux/prio_tree.h>
  14#include <linux/debug_locks.h>
  15#include <linux/mm_types.h>
  16
  17struct mempolicy;
  18struct anon_vma;
  19struct file_ra_state;
  20struct user_struct;
  21struct writeback_control;
  22
  23#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
  24extern unsigned long max_mapnr;
  25#endif
  26
  27extern unsigned long num_physpages;
  28extern void * high_memory;
  29extern int page_cluster;
  30
  31#ifdef CONFIG_SYSCTL
  32extern int sysctl_legacy_va_layout;
  33#else
  34#define sysctl_legacy_va_layout 0
  35#endif
  36
  37extern unsigned long mmap_min_addr;
  38
  39#include <asm/page.h>
  40#include <asm/pgtable.h>
  41#include <asm/processor.h>
  42
  43#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
  44
  45/* to align the pointer to the (next) page boundary */
  46#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
  47
  48/*
  49 * Linux kernel virtual memory manager primitives.
  50 * The idea being to have a "virtual" mm in the same way
  51 * we have a virtual fs - giving a cleaner interface to the
  52 * mm details, and allowing different kinds of memory mappings
  53 * (from shared memory to executable loading to arbitrary
  54 * mmap() functions).
  55 */
  56
  57extern struct kmem_cache *vm_area_cachep;
  58
  59/*
  60 * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
  61 * disabled, then there's a single shared list of VMAs maintained by the
  62 * system, and mm's subscribe to these individually
  63 */
  64struct vm_list_struct {
  65        struct vm_list_struct   *next;
  66        struct vm_area_struct   *vma;
  67};
  68
  69#ifndef CONFIG_MMU
  70extern struct rb_root nommu_vma_tree;
  71extern struct rw_semaphore nommu_vma_sem;
  72
  73extern unsigned int kobjsize(const void *objp);
  74#endif
  75
  76/*
  77 * vm_flags in vm_area_struct, see mm_types.h.
  78 */
  79#define VM_READ         0x00000001      /* currently active flags */
  80#define VM_WRITE        0x00000002
  81#define VM_EXEC         0x00000004
  82#define VM_SHARED       0x00000008
  83
  84/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
  85#define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
  86#define VM_MAYWRITE     0x00000020
  87#define VM_MAYEXEC      0x00000040
  88#define VM_MAYSHARE     0x00000080
  89
  90#define VM_GROWSDOWN    0x00000100      /* general info on the segment */
  91#define VM_GROWSUP      0x00000200
  92#define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
  93#define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
  94
  95#define VM_EXECUTABLE   0x00001000
  96#define VM_LOCKED       0x00002000
  97#define VM_IO           0x00004000      /* Memory mapped I/O or similar */
  98
  99                                        /* Used by sys_madvise() */
 100#define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
 101#define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
 102
 103#define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
 104#define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
 105#define VM_RESERVED     0x00080000      /* Count as reserved_vm like IO */
 106#define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
 107#define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
 108#define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
 109#define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
 110#define VM_MAPPED_COPY  0x01000000      /* T if mapped copy of data (nommu mmap) */
 111#define VM_INSERTPAGE   0x02000000      /* The vma has had "vm_insert_page()" done on it */
 112#define VM_ALWAYSDUMP   0x04000000      /* Always include in core dumps */
 113
 114#define VM_CAN_NONLINEAR 0x08000000     /* Has ->fault & does nonlinear pages */
 115#define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
 116#define VM_SAO          0x20000000      /* Strong Access Ordering (powerpc) */
 117
 118#ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
 119#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 120#endif
 121
 122#ifdef CONFIG_STACK_GROWSUP
 123#define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 124#else
 125#define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 126#endif
 127
 128#define VM_READHINTMASK                 (VM_SEQ_READ | VM_RAND_READ)
 129#define VM_ClearReadHint(v)             (v)->vm_flags &= ~VM_READHINTMASK
 130#define VM_NormalReadHint(v)            (!((v)->vm_flags & VM_READHINTMASK))
 131#define VM_SequentialReadHint(v)        ((v)->vm_flags & VM_SEQ_READ)
 132#define VM_RandomReadHint(v)            ((v)->vm_flags & VM_RAND_READ)
 133
 134/*
 135 * special vmas that are non-mergable, non-mlock()able
 136 */
 137#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
 138
 139/*
 140 * mapping from the currently active vm_flags protection bits (the
 141 * low four bits) to a page protection mask..
 142 */
 143extern pgprot_t protection_map[16];
 144
 145#define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
 146#define FAULT_FLAG_NONLINEAR    0x02    /* Fault was via a nonlinear mapping */
 147
 148
 149/*
 150 * vm_fault is filled by the the pagefault handler and passed to the vma's
 151 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 152 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 153 *
 154 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 155 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
 156 * mapping support.
 157 */
 158struct vm_fault {
 159        unsigned int flags;             /* FAULT_FLAG_xxx flags */
 160        pgoff_t pgoff;                  /* Logical page offset based on vma */
 161        void __user *virtual_address;   /* Faulting virtual address */
 162
 163        struct page *page;              /* ->fault handlers should return a
 164                                         * page here, unless VM_FAULT_NOPAGE
 165                                         * is set (which is also implied by
 166                                         * VM_FAULT_ERROR).
 167                                         */
 168};
 169
 170/*
 171 * These are the virtual MM functions - opening of an area, closing and
 172 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 173 * to the functions called when a no-page or a wp-page exception occurs. 
 174 */
 175struct vm_operations_struct {
 176        void (*open)(struct vm_area_struct * area);
 177        void (*close)(struct vm_area_struct * area);
 178        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 179
 180        /* notification that a previously read-only page is about to become
 181         * writable, if an error is returned it will cause a SIGBUS */
 182        int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
 183
 184        /* called by access_process_vm when get_user_pages() fails, typically
 185         * for use by special VMAs that can switch between memory and hardware
 186         */
 187        int (*access)(struct vm_area_struct *vma, unsigned long addr,
 188                      void *buf, int len, int write);
 189#ifdef CONFIG_NUMA
 190        /*
 191         * set_policy() op must add a reference to any non-NULL @new mempolicy
 192         * to hold the policy upon return.  Caller should pass NULL @new to
 193         * remove a policy and fall back to surrounding context--i.e. do not
 194         * install a MPOL_DEFAULT policy, nor the task or system default
 195         * mempolicy.
 196         */
 197        int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 198
 199        /*
 200         * get_policy() op must add reference [mpol_get()] to any policy at
 201         * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 202         * in mm/mempolicy.c will do this automatically.
 203         * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 204         * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 205         * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 206         * must return NULL--i.e., do not "fallback" to task or system default
 207         * policy.
 208         */
 209        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 210                                        unsigned long addr);
 211        int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
 212                const nodemask_t *to, unsigned long flags);
 213#endif
 214};
 215
 216struct mmu_gather;
 217struct inode;
 218
 219#define page_private(page)              ((page)->private)
 220#define set_page_private(page, v)       ((page)->private = (v))
 221
 222/*
 223 * FIXME: take this include out, include page-flags.h in
 224 * files which need it (119 of them)
 225 */
 226#include <linux/page-flags.h>
 227
 228/*
 229 * Methods to modify the page usage count.
 230 *
 231 * What counts for a page usage:
 232 * - cache mapping   (page->mapping)
 233 * - private data    (page->private)
 234 * - page mapped in a task's page tables, each mapping
 235 *   is counted separately
 236 *
 237 * Also, many kernel routines increase the page count before a critical
 238 * routine so they can be sure the page doesn't go away from under them.
 239 */
 240
 241/*
 242 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 243 */
 244static inline int put_page_testzero(struct page *page)
 245{
 246        VM_BUG_ON(atomic_read(&page->_count) == 0);
 247        return atomic_dec_and_test(&page->_count);
 248}
 249
 250/*
 251 * Try to grab a ref unless the page has a refcount of zero, return false if
 252 * that is the case.
 253 */
 254static inline int get_page_unless_zero(struct page *page)
 255{
 256        VM_BUG_ON(PageTail(page));
 257        return atomic_inc_not_zero(&page->_count);
 258}
 259
 260/* Support for virtually mapped pages */
 261struct page *vmalloc_to_page(const void *addr);
 262unsigned long vmalloc_to_pfn(const void *addr);
 263
 264/*
 265 * Determine if an address is within the vmalloc range
 266 *
 267 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 268 * is no special casing required.
 269 */
 270static inline int is_vmalloc_addr(const void *x)
 271{
 272#ifdef CONFIG_MMU
 273        unsigned long addr = (unsigned long)x;
 274
 275        return addr >= VMALLOC_START && addr < VMALLOC_END;
 276#else
 277        return 0;
 278#endif
 279}
 280
 281static inline struct page *compound_head(struct page *page)
 282{
 283        if (unlikely(PageTail(page)))
 284                return page->first_page;
 285        return page;
 286}
 287
 288static inline int page_count(struct page *page)
 289{
 290        return atomic_read(&compound_head(page)->_count);
 291}
 292
 293static inline void get_page(struct page *page)
 294{
 295        page = compound_head(page);
 296        VM_BUG_ON(atomic_read(&page->_count) == 0);
 297        atomic_inc(&page->_count);
 298}
 299
 300static inline struct page *virt_to_head_page(const void *x)
 301{
 302        struct page *page = virt_to_page(x);
 303        return compound_head(page);
 304}
 305
 306/*
 307 * Setup the page count before being freed into the page allocator for
 308 * the first time (boot or memory hotplug)
 309 */
 310static inline void init_page_count(struct page *page)
 311{
 312        atomic_set(&page->_count, 1);
 313}
 314
 315void put_page(struct page *page);
 316void put_pages_list(struct list_head *pages);
 317
 318void split_page(struct page *page, unsigned int order);
 319
 320/*
 321 * Compound pages have a destructor function.  Provide a
 322 * prototype for that function and accessor functions.
 323 * These are _only_ valid on the head of a PG_compound page.
 324 */
 325typedef void compound_page_dtor(struct page *);
 326
 327static inline void set_compound_page_dtor(struct page *page,
 328                                                compound_page_dtor *dtor)
 329{
 330        page[1].lru.next = (void *)dtor;
 331}
 332
 333static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 334{
 335        return (compound_page_dtor *)page[1].lru.next;
 336}
 337
 338static inline int compound_order(struct page *page)
 339{
 340        if (!PageHead(page))
 341                return 0;
 342        return (unsigned long)page[1].lru.prev;
 343}
 344
 345static inline void set_compound_order(struct page *page, unsigned long order)
 346{
 347        page[1].lru.prev = (void *)order;
 348}
 349
 350/*
 351 * Multiple processes may "see" the same page. E.g. for untouched
 352 * mappings of /dev/null, all processes see the same page full of
 353 * zeroes, and text pages of executables and shared libraries have
 354 * only one copy in memory, at most, normally.
 355 *
 356 * For the non-reserved pages, page_count(page) denotes a reference count.
 357 *   page_count() == 0 means the page is free. page->lru is then used for
 358 *   freelist management in the buddy allocator.
 359 *   page_count() > 0  means the page has been allocated.
 360 *
 361 * Pages are allocated by the slab allocator in order to provide memory
 362 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 363 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 364 * unless a particular usage is carefully commented. (the responsibility of
 365 * freeing the kmalloc memory is the caller's, of course).
 366 *
 367 * A page may be used by anyone else who does a __get_free_page().
 368 * In this case, page_count still tracks the references, and should only
 369 * be used through the normal accessor functions. The top bits of page->flags
 370 * and page->virtual store page management information, but all other fields
 371 * are unused and could be used privately, carefully. The management of this
 372 * page is the responsibility of the one who allocated it, and those who have
 373 * subsequently been given references to it.
 374 *
 375 * The other pages (we may call them "pagecache pages") are completely
 376 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 377 * The following discussion applies only to them.
 378 *
 379 * A pagecache page contains an opaque `private' member, which belongs to the
 380 * page's address_space. Usually, this is the address of a circular list of
 381 * the page's disk buffers. PG_private must be set to tell the VM to call
 382 * into the filesystem to release these pages.
 383 *
 384 * A page may belong to an inode's memory mapping. In this case, page->mapping
 385 * is the pointer to the inode, and page->index is the file offset of the page,
 386 * in units of PAGE_CACHE_SIZE.
 387 *
 388 * If pagecache pages are not associated with an inode, they are said to be
 389 * anonymous pages. These may become associated with the swapcache, and in that
 390 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 391 *
 392 * In either case (swapcache or inode backed), the pagecache itself holds one
 393 * reference to the page. Setting PG_private should also increment the
 394 * refcount. The each user mapping also has a reference to the page.
 395 *
 396 * The pagecache pages are stored in a per-mapping radix tree, which is
 397 * rooted at mapping->page_tree, and indexed by offset.
 398 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 399 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 400 *
 401 * All pagecache pages may be subject to I/O:
 402 * - inode pages may need to be read from disk,
 403 * - inode pages which have been modified and are MAP_SHARED may need
 404 *   to be written back to the inode on disk,
 405 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 406 *   modified may need to be swapped out to swap space and (later) to be read
 407 *   back into memory.
 408 */
 409
 410/*
 411 * The zone field is never updated after free_area_init_core()
 412 * sets it, so none of the operations on it need to be atomic.
 413 */
 414
 415
 416/*
 417 * page->flags layout:
 418 *
 419 * There are three possibilities for how page->flags get
 420 * laid out.  The first is for the normal case, without
 421 * sparsemem.  The second is for sparsemem when there is
 422 * plenty of space for node and section.  The last is when
 423 * we have run out of space and have to fall back to an
 424 * alternate (slower) way of determining the node.
 425 *
 426 * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE | ... | FLAGS |
 427 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
 428 * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
 429 */
 430#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 431#define SECTIONS_WIDTH          SECTIONS_SHIFT
 432#else
 433#define SECTIONS_WIDTH          0
 434#endif
 435
 436#define ZONES_WIDTH             ZONES_SHIFT
 437
 438#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
 439#define NODES_WIDTH             NODES_SHIFT
 440#else
 441#ifdef CONFIG_SPARSEMEM_VMEMMAP
 442#error "Vmemmap: No space for nodes field in page flags"
 443#endif
 444#define NODES_WIDTH             0
 445#endif
 446
 447/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
 448#define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 449#define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
 450#define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
 451
 452/*
 453 * We are going to use the flags for the page to node mapping if its in
 454 * there.  This includes the case where there is no node, so it is implicit.
 455 */
 456#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
 457#define NODE_NOT_IN_PAGE_FLAGS
 458#endif
 459
 460#ifndef PFN_SECTION_SHIFT
 461#define PFN_SECTION_SHIFT 0
 462#endif
 463
 464/*
 465 * Define the bit shifts to access each section.  For non-existant
 466 * sections we define the shift as 0; that plus a 0 mask ensures
 467 * the compiler will optimise away reference to them.
 468 */
 469#define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 470#define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
 471#define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
 472
 473/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
 474#ifdef NODE_NOT_IN_PAGEFLAGS
 475#define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
 476#define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
 477                                                SECTIONS_PGOFF : ZONES_PGOFF)
 478#else
 479#define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
 480#define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
 481                                                NODES_PGOFF : ZONES_PGOFF)
 482#endif
 483
 484#define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 485
 486#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 487#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 488#endif
 489
 490#define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
 491#define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
 492#define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
 493#define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
 494
 495static inline enum zone_type page_zonenum(struct page *page)
 496{
 497        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 498}
 499
 500/*
 501 * The identification function is only used by the buddy allocator for
 502 * determining if two pages could be buddies. We are not really
 503 * identifying a zone since we could be using a the section number
 504 * id if we have not node id available in page flags.
 505 * We guarantee only that it will return the same value for two
 506 * combinable pages in a zone.
 507 */
 508static inline int page_zone_id(struct page *page)
 509{
 510        return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 511}
 512
 513static inline int zone_to_nid(struct zone *zone)
 514{
 515#ifdef CONFIG_NUMA
 516        return zone->node;
 517#else
 518        return 0;
 519#endif
 520}
 521
 522#ifdef NODE_NOT_IN_PAGE_FLAGS
 523extern int page_to_nid(struct page *page);
 524#else
 525static inline int page_to_nid(struct page *page)
 526{
 527        return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 528}
 529#endif
 530
 531static inline struct zone *page_zone(struct page *page)
 532{
 533        return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 534}
 535
 536#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 537static inline unsigned long page_to_section(struct page *page)
 538{
 539        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 540}
 541#endif
 542
 543static inline void set_page_zone(struct page *page, enum zone_type zone)
 544{
 545        page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
 546        page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
 547}
 548
 549static inline void set_page_node(struct page *page, unsigned long node)
 550{
 551        page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
 552        page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 553}
 554
 555static inline void set_page_section(struct page *page, unsigned long section)
 556{
 557        page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
 558        page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 559}
 560
 561static inline void set_page_links(struct page *page, enum zone_type zone,
 562        unsigned long node, unsigned long pfn)
 563{
 564        set_page_zone(page, zone);
 565        set_page_node(page, node);
 566        set_page_section(page, pfn_to_section_nr(pfn));
 567}
 568
 569/*
 570 * If a hint addr is less than mmap_min_addr change hint to be as
 571 * low as possible but still greater than mmap_min_addr
 572 */
 573static inline unsigned long round_hint_to_min(unsigned long hint)
 574{
 575#ifdef CONFIG_SECURITY
 576        hint &= PAGE_MASK;
 577        if (((void *)hint != NULL) &&
 578            (hint < mmap_min_addr))
 579                return PAGE_ALIGN(mmap_min_addr);
 580#endif
 581        return hint;
 582}
 583
 584/*
 585 * Some inline functions in vmstat.h depend on page_zone()
 586 */
 587#include <linux/vmstat.h>
 588
 589static __always_inline void *lowmem_page_address(struct page *page)
 590{
 591        return __va(page_to_pfn(page) << PAGE_SHIFT);
 592}
 593
 594#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
 595#define HASHED_PAGE_VIRTUAL
 596#endif
 597
 598#if defined(WANT_PAGE_VIRTUAL)
 599#define page_address(page) ((page)->virtual)
 600#define set_page_address(page, address)                 \
 601        do {                                            \
 602                (page)->virtual = (address);            \
 603        } while(0)
 604#define page_address_init()  do { } while(0)
 605#endif
 606
 607#if defined(HASHED_PAGE_VIRTUAL)
 608void *page_address(struct page *page);
 609void set_page_address(struct page *page, void *virtual);
 610void page_address_init(void);
 611#endif
 612
 613#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
 614#define page_address(page) lowmem_page_address(page)
 615#define set_page_address(page, address)  do { } while(0)
 616#define page_address_init()  do { } while(0)
 617#endif
 618
 619/*
 620 * On an anonymous page mapped into a user virtual memory area,
 621 * page->mapping points to its anon_vma, not to a struct address_space;
 622 * with the PAGE_MAPPING_ANON bit set to distinguish it.
 623 *
 624 * Please note that, confusingly, "page_mapping" refers to the inode
 625 * address_space which maps the page from disk; whereas "page_mapped"
 626 * refers to user virtual address space into which the page is mapped.
 627 */
 628#define PAGE_MAPPING_ANON       1
 629
 630extern struct address_space swapper_space;
 631static inline struct address_space *page_mapping(struct page *page)
 632{
 633        struct address_space *mapping = page->mapping;
 634
 635        VM_BUG_ON(PageSlab(page));
 636#ifdef CONFIG_SWAP
 637        if (unlikely(PageSwapCache(page)))
 638                mapping = &swapper_space;
 639        else
 640#endif
 641        if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
 642                mapping = NULL;
 643        return mapping;
 644}
 645
 646static inline int PageAnon(struct page *page)
 647{
 648        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 649}
 650
 651/*
 652 * Return the pagecache index of the passed page.  Regular pagecache pages
 653 * use ->index whereas swapcache pages use ->private
 654 */
 655static inline pgoff_t page_index(struct page *page)
 656{
 657        if (unlikely(PageSwapCache(page)))
 658                return page_private(page);
 659        return page->index;
 660}
 661
 662/*
 663 * The atomic page->_mapcount, like _count, starts from -1:
 664 * so that transitions both from it and to it can be tracked,
 665 * using atomic_inc_and_test and atomic_add_negative(-1).
 666 */
 667static inline void reset_page_mapcount(struct page *page)
 668{
 669        atomic_set(&(page)->_mapcount, -1);
 670}
 671
 672static inline int page_mapcount(struct page *page)
 673{
 674        return atomic_read(&(page)->_mapcount) + 1;
 675}
 676
 677/*
 678 * Return true if this page is mapped into pagetables.
 679 */
 680static inline int page_mapped(struct page *page)
 681{
 682        return atomic_read(&(page)->_mapcount) >= 0;
 683}
 684
 685/*
 686 * Different kinds of faults, as returned by handle_mm_fault().
 687 * Used to decide whether a process gets delivered SIGBUS or
 688 * just gets major/minor fault counters bumped up.
 689 */
 690
 691#define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
 692
 693#define VM_FAULT_OOM    0x0001
 694#define VM_FAULT_SIGBUS 0x0002
 695#define VM_FAULT_MAJOR  0x0004
 696#define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
 697
 698#define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
 699#define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
 700
 701#define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS)
 702
 703#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
 704
 705extern void show_free_areas(void);
 706
 707#ifdef CONFIG_SHMEM
 708extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
 709#else
 710static inline int shmem_lock(struct file *file, int lock,
 711                            struct user_struct *user)
 712{
 713        return 0;
 714}
 715#endif
 716struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
 717
 718int shmem_zero_setup(struct vm_area_struct *);
 719
 720#ifndef CONFIG_MMU
 721extern unsigned long shmem_get_unmapped_area(struct file *file,
 722                                             unsigned long addr,
 723                                             unsigned long len,
 724                                             unsigned long pgoff,
 725                                             unsigned long flags);
 726#endif
 727
 728extern int can_do_mlock(void);
 729extern int user_shm_lock(size_t, struct user_struct *);
 730extern void user_shm_unlock(size_t, struct user_struct *);
 731
 732/*
 733 * Parameter block passed down to zap_pte_range in exceptional cases.
 734 */
 735struct zap_details {
 736        struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
 737        struct address_space *check_mapping;    /* Check page->mapping if set */
 738        pgoff_t first_index;                    /* Lowest page->index to unmap */
 739        pgoff_t last_index;                     /* Highest page->index to unmap */
 740        spinlock_t *i_mmap_lock;                /* For unmap_mapping_range: */
 741        unsigned long truncate_count;           /* Compare vm_truncate_count */
 742};
 743
 744struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
 745                pte_t pte);
 746
 747int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 748                unsigned long size);
 749unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
 750                unsigned long size, struct zap_details *);
 751unsigned long unmap_vmas(struct mmu_gather **tlb,
 752                struct vm_area_struct *start_vma, unsigned long start_addr,
 753                unsigned long end_addr, unsigned long *nr_accounted,
 754                struct zap_details *);
 755
 756/**
 757 * mm_walk - callbacks for walk_page_range
 758 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
 759 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 760 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
 761 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
 762 * @pte_hole: if set, called for each hole at all levels
 763 *
 764 * (see walk_page_range for more details)
 765 */
 766struct mm_walk {
 767        int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
 768        int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
 769        int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
 770        int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
 771        int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
 772        struct mm_struct *mm;
 773        void *private;
 774};
 775
 776int walk_page_range(unsigned long addr, unsigned long end,
 777                struct mm_walk *walk);
 778void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
 779                unsigned long end, unsigned long floor, unsigned long ceiling);
 780int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
 781                        struct vm_area_struct *vma);
 782void unmap_mapping_range(struct address_space *mapping,
 783                loff_t const holebegin, loff_t const holelen, int even_cows);
 784int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 785                        void *buf, int len, int write);
 786
 787static inline void unmap_shared_mapping_range(struct address_space *mapping,
 788                loff_t const holebegin, loff_t const holelen)
 789{
 790        unmap_mapping_range(mapping, holebegin, holelen, 0);
 791}
 792
 793extern int vmtruncate(struct inode * inode, loff_t offset);
 794extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
 795
 796#ifdef CONFIG_MMU
 797extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 798                        unsigned long address, int write_access);
 799#else
 800static inline int handle_mm_fault(struct mm_struct *mm,
 801                        struct vm_area_struct *vma, unsigned long address,
 802                        int write_access)
 803{
 804        /* should never happen if there's no MMU */
 805        BUG();
 806        return VM_FAULT_SIGBUS;
 807}
 808#endif
 809
 810extern int make_pages_present(unsigned long addr, unsigned long end);
 811extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
 812
 813int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
 814                int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
 815
 816extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
 817extern void do_invalidatepage(struct page *page, unsigned long offset);
 818
 819int __set_page_dirty_nobuffers(struct page *page);
 820int __set_page_dirty_no_writeback(struct page *page);
 821int redirty_page_for_writepage(struct writeback_control *wbc,
 822                                struct page *page);
 823int set_page_dirty(struct page *page);
 824int set_page_dirty_lock(struct page *page);
 825int clear_page_dirty_for_io(struct page *page);
 826
 827extern unsigned long move_page_tables(struct vm_area_struct *vma,
 828                unsigned long old_addr, struct vm_area_struct *new_vma,
 829                unsigned long new_addr, unsigned long len);
 830extern unsigned long do_mremap(unsigned long addr,
 831                               unsigned long old_len, unsigned long new_len,
 832                               unsigned long flags, unsigned long new_addr);
 833extern int mprotect_fixup(struct vm_area_struct *vma,
 834                          struct vm_area_struct **pprev, unsigned long start,
 835                          unsigned long end, unsigned long newflags);
 836
 837/*
 838 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 839 * operating on current and current->mm (force=0 and doesn't return any vmas).
 840 *
 841 * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
 842 * can be made about locking. get_user_pages_fast is to be implemented in a
 843 * way that is advantageous (vs get_user_pages()) when the user memory area is
 844 * already faulted in and present in ptes. However if the pages have to be
 845 * faulted in, it may turn out to be slightly slower).
 846 */
 847int get_user_pages_fast(unsigned long start, int nr_pages, int write,
 848                        struct page **pages);
 849
 850/*
 851 * A callback you can register to apply pressure to ageable caches.
 852 *
 853 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'.  It should
 854 * look through the least-recently-used 'nr_to_scan' entries and
 855 * attempt to free them up.  It should return the number of objects
 856 * which remain in the cache.  If it returns -1, it means it cannot do
 857 * any scanning at this time (eg. there is a risk of deadlock).
 858 *
 859 * The 'gfpmask' refers to the allocation we are currently trying to
 860 * fulfil.
 861 *
 862 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
 863 * querying the cache size, so a fastpath for that case is appropriate.
 864 */
 865struct shrinker {
 866        int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
 867        int seeks;      /* seeks to recreate an obj */
 868
 869        /* These are for internal use */
 870        struct list_head list;
 871        long nr;        /* objs pending delete */
 872};
 873#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 874extern void register_shrinker(struct shrinker *);
 875extern void unregister_shrinker(struct shrinker *);
 876
 877int vma_wants_writenotify(struct vm_area_struct *vma);
 878
 879extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
 880
 881#ifdef __PAGETABLE_PUD_FOLDED
 882static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
 883                                                unsigned long address)
 884{
 885        return 0;
 886}
 887#else
 888int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
 889#endif
 890
 891#ifdef __PAGETABLE_PMD_FOLDED
 892static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
 893                                                unsigned long address)
 894{
 895        return 0;
 896}
 897#else
 898int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 899#endif
 900
 901int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
 902int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
 903
 904/*
 905 * The following ifdef needed to get the 4level-fixup.h header to work.
 906 * Remove it when 4level-fixup.h has been removed.
 907 */
 908#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
 909static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 910{
 911        return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
 912                NULL: pud_offset(pgd, address);
 913}
 914
 915static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 916{
 917        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
 918                NULL: pmd_offset(pud, address);
 919}
 920#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 921
 922#if USE_SPLIT_PTLOCKS
 923/*
 924 * We tuck a spinlock to guard each pagetable page into its struct page,
 925 * at page->private, with BUILD_BUG_ON to make sure that this will not
 926 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
 927 * When freeing, reset page->mapping so free_pages_check won't complain.
 928 */
 929#define __pte_lockptr(page)     &((page)->ptl)
 930#define pte_lock_init(_page)    do {                                    \
 931        spin_lock_init(__pte_lockptr(_page));                           \
 932} while (0)
 933#define pte_lock_deinit(page)   ((page)->mapping = NULL)
 934#define pte_lockptr(mm, pmd)    ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
 935#else   /* !USE_SPLIT_PTLOCKS */
 936/*
 937 * We use mm->page_table_lock to guard all pagetable pages of the mm.
 938 */
 939#define pte_lock_init(page)     do {} while (0)
 940#define pte_lock_deinit(page)   do {} while (0)
 941#define pte_lockptr(mm, pmd)    ({(void)(pmd); &(mm)->page_table_lock;})
 942#endif /* USE_SPLIT_PTLOCKS */
 943
 944static inline void pgtable_page_ctor(struct page *page)
 945{
 946        pte_lock_init(page);
 947        inc_zone_page_state(page, NR_PAGETABLE);
 948}
 949
 950static inline void pgtable_page_dtor(struct page *page)
 951{
 952        pte_lock_deinit(page);
 953        dec_zone_page_state(page, NR_PAGETABLE);
 954}
 955
 956#define pte_offset_map_lock(mm, pmd, address, ptlp)     \
 957({                                                      \
 958        spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
 959        pte_t *__pte = pte_offset_map(pmd, address);    \
 960        *(ptlp) = __ptl;                                \
 961        spin_lock(__ptl);                               \
 962        __pte;                                          \
 963})
 964
 965#define pte_unmap_unlock(pte, ptl)      do {            \
 966        spin_unlock(ptl);                               \
 967        pte_unmap(pte);                                 \
 968} while (0)
 969
 970#define pte_alloc_map(mm, pmd, address)                 \
 971        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
 972                NULL: pte_offset_map(pmd, address))
 973
 974#define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
 975        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
 976                NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
 977
 978#define pte_alloc_kernel(pmd, address)                  \
 979        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
 980                NULL: pte_offset_kernel(pmd, address))
 981
 982extern void free_area_init(unsigned long * zones_size);
 983extern void free_area_init_node(int nid, unsigned long * zones_size,
 984                unsigned long zone_start_pfn, unsigned long *zholes_size);
 985#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 986/*
 987 * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
 988 * zones, allocate the backing mem_map and account for memory holes in a more
 989 * architecture independent manner. This is a substitute for creating the
 990 * zone_sizes[] and zholes_size[] arrays and passing them to
 991 * free_area_init_node()
 992 *
 993 * An architecture is expected to register range of page frames backed by
 994 * physical memory with add_active_range() before calling
 995 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
 996 * usage, an architecture is expected to do something like
 997 *
 998 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
 999 *                                                       max_highmem_pfn};
1000 * for_each_valid_physical_page_range()
1001 *      add_active_range(node_id, start_pfn, end_pfn)
1002 * free_area_init_nodes(max_zone_pfns);
1003 *
1004 * If the architecture guarantees that there are no holes in the ranges
1005 * registered with add_active_range(), free_bootmem_active_regions()
1006 * will call free_bootmem_node() for each registered physical page range.
1007 * Similarly sparse_memory_present_with_active_regions() calls
1008 * memory_present() for each range when SPARSEMEM is enabled.
1009 *
1010 * See mm/page_alloc.c for more information on each function exposed by
1011 * CONFIG_ARCH_POPULATES_NODE_MAP
1012 */
1013extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1014extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1015                                        unsigned long end_pfn);
1016extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1017                                        unsigned long end_pfn);
1018extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
1019                                        unsigned long end_pfn);
1020extern void remove_all_active_ranges(void);
1021extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1022                                                unsigned long end_pfn);
1023extern void get_pfn_range_for_nid(unsigned int nid,
1024                        unsigned long *start_pfn, unsigned long *end_pfn);
1025extern unsigned long find_min_pfn_with_active_regions(void);
1026extern void free_bootmem_with_active_regions(int nid,
1027                                                unsigned long max_low_pfn);
1028typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
1029extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
1030extern void sparse_memory_present_with_active_regions(int nid);
1031#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1032extern int early_pfn_to_nid(unsigned long pfn);
1033#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
1034#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
1035extern void set_dma_reserve(unsigned long new_dma_reserve);
1036extern void memmap_init_zone(unsigned long, int, unsigned long,
1037                                unsigned long, enum memmap_context);
1038extern void setup_per_zone_pages_min(void);
1039extern void mem_init(void);
1040extern void show_mem(void);
1041extern void si_meminfo(struct sysinfo * val);
1042extern void si_meminfo_node(struct sysinfo *val, int nid);
1043extern int after_bootmem;
1044
1045#ifdef CONFIG_NUMA
1046extern void setup_per_cpu_pageset(void);
1047#else
1048static inline void setup_per_cpu_pageset(void) {}
1049#endif
1050
1051/* prio_tree.c */
1052void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
1053void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
1054void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
1055struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
1056        struct prio_tree_iter *iter);
1057
1058#define vma_prio_tree_foreach(vma, iter, root, begin, end)      \
1059        for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;   \
1060                (vma = vma_prio_tree_next(vma, iter)); )
1061
1062static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1063                                        struct list_head *list)
1064{
1065        vma->shared.vm_set.parent = NULL;
1066        list_add_tail(&vma->shared.vm_set.list, list);
1067}
1068
1069/* mmap.c */
1070extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1071extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
1072        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1073extern struct vm_area_struct *vma_merge(struct mm_struct *,
1074        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1075        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1076        struct mempolicy *);
1077extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1078extern int split_vma(struct mm_struct *,
1079        struct vm_area_struct *, unsigned long addr, int new_below);
1080extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1081extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1082        struct rb_node **, struct rb_node *);
1083extern void unlink_file_vma(struct vm_area_struct *);
1084extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1085        unsigned long addr, unsigned long len, pgoff_t pgoff);
1086extern void exit_mmap(struct mm_struct *);
1087
1088extern int mm_take_all_locks(struct mm_struct *mm);
1089extern void mm_drop_all_locks(struct mm_struct *mm);
1090
1091#ifdef CONFIG_PROC_FS
1092/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
1093extern void added_exe_file_vma(struct mm_struct *mm);
1094extern void removed_exe_file_vma(struct mm_struct *mm);
1095#else
1096static inline void added_exe_file_vma(struct mm_struct *mm)
1097{}
1098
1099static inline void removed_exe_file_vma(struct mm_struct *mm)
1100{}
1101#endif /* CONFIG_PROC_FS */
1102
1103extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
1104extern int install_special_mapping(struct mm_struct *mm,
1105                                   unsigned long addr, unsigned long len,
1106                                   unsigned long flags, struct page **pages);
1107
1108extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1109
1110extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1111        unsigned long len, unsigned long prot,
1112        unsigned long flag, unsigned long pgoff);
1113extern unsigned long mmap_region(struct file *file, unsigned long addr,
1114        unsigned long len, unsigned long flags,
1115        unsigned int vm_flags, unsigned long pgoff,
1116        int accountable);
1117
1118static inline unsigned long do_mmap(struct file *file, unsigned long addr,
1119        unsigned long len, unsigned long prot,
1120        unsigned long flag, unsigned long offset)
1121{
1122        unsigned long ret = -EINVAL;
1123        if ((offset + PAGE_ALIGN(len)) < offset)
1124                goto out;
1125        if (!(offset & ~PAGE_MASK))
1126                ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1127out:
1128        return ret;
1129}
1130
1131extern int do_munmap(struct mm_struct *, unsigned long, size_t);
1132
1133extern unsigned long do_brk(unsigned long, unsigned long);
1134
1135/* filemap.c */
1136extern unsigned long page_unuse(struct page *);
1137extern void truncate_inode_pages(struct address_space *, loff_t);
1138extern void truncate_inode_pages_range(struct address_space *,
1139                                       loff_t lstart, loff_t lend);
1140
1141/* generic vm_area_ops exported for stackable file systems */
1142extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1143
1144/* mm/page-writeback.c */
1145int write_one_page(struct page *page, int wait);
1146
1147/* readahead.c */
1148#define VM_MAX_READAHEAD        128     /* kbytes */
1149#define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
1150
1151int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
1152                        pgoff_t offset, unsigned long nr_to_read);
1153int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1154                        pgoff_t offset, unsigned long nr_to_read);
1155
1156void page_cache_sync_readahead(struct address_space *mapping,
1157                               struct file_ra_state *ra,
1158                               struct file *filp,
1159                               pgoff_t offset,
1160                               unsigned long size);
1161
1162void page_cache_async_readahead(struct address_space *mapping,
1163                                struct file_ra_state *ra,
1164                                struct file *filp,
1165                                struct page *pg,
1166                                pgoff_t offset,
1167                                unsigned long size);
1168
1169unsigned long max_sane_readahead(unsigned long nr);
1170
1171/* Do stack extension */
1172extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1173#ifdef CONFIG_IA64
1174extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1175#endif
1176extern int expand_stack_downwards(struct vm_area_struct *vma,
1177                                  unsigned long address);
1178
1179/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1180extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1181extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1182                                             struct vm_area_struct **pprev);
1183
1184/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1185   NULL if none.  Assume start_addr < end_addr. */
1186static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1187{
1188        struct vm_area_struct * vma = find_vma(mm,start_addr);
1189
1190        if (vma && end_addr <= vma->vm_start)
1191                vma = NULL;
1192        return vma;
1193}
1194
1195static inline unsigned long vma_pages(struct vm_area_struct *vma)
1196{
1197        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1198}
1199
1200pgprot_t vm_get_page_prot(unsigned long vm_flags);
1201struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1202int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1203                        unsigned long pfn, unsigned long size, pgprot_t);
1204int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1205int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1206                        unsigned long pfn);
1207int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1208                        unsigned long pfn);
1209
1210struct page *follow_page(struct vm_area_struct *, unsigned long address,
1211                        unsigned int foll_flags);
1212#define FOLL_WRITE      0x01    /* check pte is writable */
1213#define FOLL_TOUCH      0x02    /* mark page accessed */
1214#define FOLL_GET        0x04    /* do get_page on page */
1215#define FOLL_ANON       0x08    /* give ZERO_PAGE if no pgtable */
1216
1217typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1218                        void *data);
1219extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1220                               unsigned long size, pte_fn_t fn, void *data);
1221
1222#ifdef CONFIG_PROC_FS
1223void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1224#else
1225static inline void vm_stat_account(struct mm_struct *mm,
1226                        unsigned long flags, struct file *file, long pages)
1227{
1228}
1229#endif /* CONFIG_PROC_FS */
1230
1231#ifdef CONFIG_DEBUG_PAGEALLOC
1232extern int debug_pagealloc_enabled;
1233
1234extern void kernel_map_pages(struct page *page, int numpages, int enable);
1235
1236static inline void enable_debug_pagealloc(void)
1237{
1238        debug_pagealloc_enabled = 1;
1239}
1240#ifdef CONFIG_HIBERNATION
1241extern bool kernel_page_present(struct page *page);
1242#endif /* CONFIG_HIBERNATION */
1243#else
1244static inline void
1245kernel_map_pages(struct page *page, int numpages, int enable) {}
1246static inline void enable_debug_pagealloc(void)
1247{
1248}
1249#ifdef CONFIG_HIBERNATION
1250static inline bool kernel_page_present(struct page *page) { return true; }
1251#endif /* CONFIG_HIBERNATION */
1252#endif
1253
1254extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
1255#ifdef  __HAVE_ARCH_GATE_AREA
1256int in_gate_area_no_task(unsigned long addr);
1257int in_gate_area(struct task_struct *task, unsigned long addr);
1258#else
1259int in_gate_area_no_task(unsigned long addr);
1260#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1261#endif  /* __HAVE_ARCH_GATE_AREA */
1262
1263int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1264                                        void __user *, size_t *, loff_t *);
1265unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1266                        unsigned long lru_pages);
1267
1268#ifndef CONFIG_MMU
1269#define randomize_va_space 0
1270#else
1271extern int randomize_va_space;
1272#endif
1273
1274const char * arch_vma_name(struct vm_area_struct *vma);
1275void print_vma_addr(char *prefix, unsigned long rip);
1276
1277struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
1278pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1279pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1280pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1281pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1282void *vmemmap_alloc_block(unsigned long size, int node);
1283void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
1284int vmemmap_populate_basepages(struct page *start_page,
1285                                                unsigned long pages, int node);
1286int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1287void vmemmap_populate_print_last(void);
1288
1289#endif /* __KERNEL__ */
1290#endif /* _LINUX_MM_H */
1291