linux/include/linux/mm_types.h
<<
>>
Prefs
   1#ifndef _LINUX_MM_TYPES_H
   2#define _LINUX_MM_TYPES_H
   3
   4#include <linux/auxvec.h>
   5#include <linux/types.h>
   6#include <linux/threads.h>
   7#include <linux/list.h>
   8#include <linux/spinlock.h>
   9#include <linux/rbtree.h>
  10#include <linux/rwsem.h>
  11#include <linux/completion.h>
  12#include <linux/cpumask.h>
  13#include <linux/page-debug-flags.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <asm/page.h>
  17#include <asm/mmu.h>
  18
  19#ifndef AT_VECTOR_SIZE_ARCH
  20#define AT_VECTOR_SIZE_ARCH 0
  21#endif
  22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  23
  24struct address_space;
  25
  26#define USE_SPLIT_PTLOCKS       (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
  27
  28/*
  29 * Each physical page in the system has a struct page associated with
  30 * it to keep track of whatever it is we are using the page for at the
  31 * moment. Note that we have no way to track which tasks are using
  32 * a page, though if it is a pagecache page, rmap structures can tell us
  33 * who is mapping it.
  34 *
  35 * The objects in struct page are organized in double word blocks in
  36 * order to allows us to use atomic double word operations on portions
  37 * of struct page. That is currently only used by slub but the arrangement
  38 * allows the use of atomic double word operations on the flags/mapping
  39 * and lru list pointers also.
  40 */
  41struct page {
  42        /* First double word block */
  43        unsigned long flags;            /* Atomic flags, some possibly
  44                                         * updated asynchronously */
  45        struct address_space *mapping;  /* If low bit clear, points to
  46                                         * inode address_space, or NULL.
  47                                         * If page mapped as anonymous
  48                                         * memory, low bit is set, and
  49                                         * it points to anon_vma object:
  50                                         * see PAGE_MAPPING_ANON below.
  51                                         */
  52        /* Second double word */
  53        struct {
  54                union {
  55                        pgoff_t index;          /* Our offset within mapping. */
  56                        void *freelist;         /* slub/slob first free object */
  57                        bool pfmemalloc;        /* If set by the page allocator,
  58                                                 * ALLOC_NO_WATERMARKS was set
  59                                                 * and the low watermark was not
  60                                                 * met implying that the system
  61                                                 * is under some pressure. The
  62                                                 * caller should try ensure
  63                                                 * this page is only used to
  64                                                 * free other pages.
  65                                                 */
  66                };
  67
  68                union {
  69#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
  70        defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
  71                        /* Used for cmpxchg_double in slub */
  72                        unsigned long counters;
  73#else
  74                        /*
  75                         * Keep _count separate from slub cmpxchg_double data.
  76                         * As the rest of the double word is protected by
  77                         * slab_lock but _count is not.
  78                         */
  79                        unsigned counters;
  80#endif
  81
  82                        struct {
  83
  84                                union {
  85                                        /*
  86                                         * Count of ptes mapped in
  87                                         * mms, to show when page is
  88                                         * mapped & limit reverse map
  89                                         * searches.
  90                                         *
  91                                         * Used also for tail pages
  92                                         * refcounting instead of
  93                                         * _count. Tail pages cannot
  94                                         * be mapped and keeping the
  95                                         * tail page _count zero at
  96                                         * all times guarantees
  97                                         * get_page_unless_zero() will
  98                                         * never succeed on tail
  99                                         * pages.
 100                                         */
 101                                        atomic_t _mapcount;
 102
 103                                        struct { /* SLUB */
 104                                                unsigned inuse:16;
 105                                                unsigned objects:15;
 106                                                unsigned frozen:1;
 107                                        };
 108                                        int units;      /* SLOB */
 109                                };
 110                                atomic_t _count;                /* Usage count, see below. */
 111                        };
 112                };
 113        };
 114
 115        /* Third double word block */
 116        union {
 117                struct list_head lru;   /* Pageout list, eg. active_list
 118                                         * protected by zone->lru_lock !
 119                                         */
 120                struct {                /* slub per cpu partial pages */
 121                        struct page *next;      /* Next partial slab */
 122#ifdef CONFIG_64BIT
 123                        int pages;      /* Nr of partial slabs left */
 124                        int pobjects;   /* Approximate # of objects */
 125#else
 126                        short int pages;
 127                        short int pobjects;
 128#endif
 129                };
 130
 131                struct list_head list;  /* slobs list of pages */
 132                struct slab *slab_page; /* slab fields */
 133        };
 134
 135        /* Remainder is not double word aligned */
 136        union {
 137                unsigned long private;          /* Mapping-private opaque data:
 138                                                 * usually used for buffer_heads
 139                                                 * if PagePrivate set; used for
 140                                                 * swp_entry_t if PageSwapCache;
 141                                                 * indicates order in the buddy
 142                                                 * system if PG_buddy is set.
 143                                                 */
 144#if USE_SPLIT_PTLOCKS
 145                spinlock_t ptl;
 146#endif
 147                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
 148                struct page *first_page;        /* Compound tail pages */
 149        };
 150
 151        /*
 152         * On machines where all RAM is mapped into kernel address space,
 153         * we can simply calculate the virtual address. On machines with
 154         * highmem some memory is mapped into kernel virtual memory
 155         * dynamically, so we need a place to store that address.
 156         * Note that this field could be 16 bits on x86 ... ;)
 157         *
 158         * Architectures with slow multiplication can define
 159         * WANT_PAGE_VIRTUAL in asm/page.h
 160         */
 161#if defined(WANT_PAGE_VIRTUAL)
 162        void *virtual;                  /* Kernel virtual address (NULL if
 163                                           not kmapped, ie. highmem) */
 164#endif /* WANT_PAGE_VIRTUAL */
 165#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
 166        unsigned long debug_flags;      /* Use atomic bitops on this */
 167#endif
 168
 169#ifdef CONFIG_KMEMCHECK
 170        /*
 171         * kmemcheck wants to track the status of each byte in a page; this
 172         * is a pointer to such a status block. NULL if not tracked.
 173         */
 174        void *shadow;
 175#endif
 176
 177#ifdef LAST_NID_NOT_IN_PAGE_FLAGS
 178        int _last_nid;
 179#endif
 180}
 181/*
 182 * The struct page can be forced to be double word aligned so that atomic ops
 183 * on double words work. The SLUB allocator can make use of such a feature.
 184 */
 185#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
 186        __aligned(2 * sizeof(unsigned long))
 187#endif
 188;
 189
 190struct page_frag {
 191        struct page *page;
 192#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 193        __u32 offset;
 194        __u32 size;
 195#else
 196        __u16 offset;
 197        __u16 size;
 198#endif
 199};
 200
 201typedef unsigned long __nocast vm_flags_t;
 202
 203/*
 204 * A region containing a mapping of a non-memory backed file under NOMMU
 205 * conditions.  These are held in a global tree and are pinned by the VMAs that
 206 * map parts of them.
 207 */
 208struct vm_region {
 209        struct rb_node  vm_rb;          /* link in global region tree */
 210        vm_flags_t      vm_flags;       /* VMA vm_flags */
 211        unsigned long   vm_start;       /* start address of region */
 212        unsigned long   vm_end;         /* region initialised to here */
 213        unsigned long   vm_top;         /* region allocated to here */
 214        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 215        struct file     *vm_file;       /* the backing file or NULL */
 216
 217        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 218        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 219                                                * this region */
 220};
 221
 222/*
 223 * This struct defines a memory VMM memory area. There is one of these
 224 * per VM-area/task.  A VM area is any part of the process virtual memory
 225 * space that has a special rule for the page-fault handlers (ie a shared
 226 * library, the executable area etc).
 227 */
 228struct vm_area_struct {
 229        /* The first cache line has the info for VMA tree walking. */
 230
 231        unsigned long vm_start;         /* Our start address within vm_mm. */
 232        unsigned long vm_end;           /* The first byte after our end address
 233                                           within vm_mm. */
 234
 235        /* linked list of VM areas per task, sorted by address */
 236        struct vm_area_struct *vm_next, *vm_prev;
 237
 238        struct rb_node vm_rb;
 239
 240        /*
 241         * Largest free memory gap in bytes to the left of this VMA.
 242         * Either between this VMA and vma->vm_prev, or between one of the
 243         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 244         * get_unmapped_area find a free area of the right size.
 245         */
 246        unsigned long rb_subtree_gap;
 247
 248        /* Second cache line starts here. */
 249
 250        struct mm_struct *vm_mm;        /* The address space we belong to. */
 251        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
 252        unsigned long vm_flags;         /* Flags, see mm.h. */
 253
 254        /*
 255         * For areas with an address space and backing store,
 256         * linkage into the address_space->i_mmap interval tree, or
 257         * linkage of vma in the address_space->i_mmap_nonlinear list.
 258         */
 259        union {
 260                struct {
 261                        struct rb_node rb;
 262                        unsigned long rb_subtree_last;
 263                } linear;
 264                struct list_head nonlinear;
 265        } shared;
 266
 267        /*
 268         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 269         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 270         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 271         * or brk vma (with NULL file) can only be in an anon_vma list.
 272         */
 273        struct list_head anon_vma_chain; /* Serialized by mmap_sem &
 274                                          * page_table_lock */
 275        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 276
 277        /* Function pointers to deal with this struct. */
 278        const struct vm_operations_struct *vm_ops;
 279
 280        /* Information about our backing store: */
 281        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 282                                           units, *not* PAGE_CACHE_SIZE */
 283        struct file * vm_file;          /* File we map to (can be NULL). */
 284        void * vm_private_data;         /* was vm_pte (shared mem) */
 285
 286#ifndef CONFIG_MMU
 287        struct vm_region *vm_region;    /* NOMMU mapping region */
 288#endif
 289#ifdef CONFIG_NUMA
 290        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 291#endif
 292};
 293
 294struct core_thread {
 295        struct task_struct *task;
 296        struct core_thread *next;
 297};
 298
 299struct core_state {
 300        atomic_t nr_threads;
 301        struct core_thread dumper;
 302        struct completion startup;
 303};
 304
 305enum {
 306        MM_FILEPAGES,
 307        MM_ANONPAGES,
 308        MM_SWAPENTS,
 309        NR_MM_COUNTERS
 310};
 311
 312#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
 313#define SPLIT_RSS_COUNTING
 314/* per-thread cached information, */
 315struct task_rss_stat {
 316        int events;     /* for synchronization threshold */
 317        int count[NR_MM_COUNTERS];
 318};
 319#endif /* USE_SPLIT_PTLOCKS */
 320
 321struct mm_rss_stat {
 322        atomic_long_t count[NR_MM_COUNTERS];
 323};
 324
 325struct mm_struct {
 326        struct vm_area_struct * mmap;           /* list of VMAs */
 327        struct rb_root mm_rb;
 328        struct vm_area_struct * mmap_cache;     /* last find_vma result */
 329#ifdef CONFIG_MMU
 330        unsigned long (*get_unmapped_area) (struct file *filp,
 331                                unsigned long addr, unsigned long len,
 332                                unsigned long pgoff, unsigned long flags);
 333        void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
 334#endif
 335        unsigned long mmap_base;                /* base of mmap area */
 336        unsigned long task_size;                /* size of task vm space */
 337        unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */
 338        unsigned long free_area_cache;          /* first hole of size cached_hole_size or larger */
 339        unsigned long highest_vm_end;           /* highest vma end address */
 340        pgd_t * pgd;
 341        atomic_t mm_users;                      /* How many users with user space? */
 342        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
 343        int map_count;                          /* number of VMAs */
 344
 345        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 346        struct rw_semaphore mmap_sem;
 347
 348        struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
 349                                                 * together off init_mm.mmlist, and are protected
 350                                                 * by mmlist_lock
 351                                                 */
 352
 353
 354        unsigned long hiwater_rss;      /* High-watermark of RSS usage */
 355        unsigned long hiwater_vm;       /* High-water virtual memory usage */
 356
 357        unsigned long total_vm;         /* Total pages mapped */
 358        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
 359        unsigned long pinned_vm;        /* Refcount permanently increased */
 360        unsigned long shared_vm;        /* Shared pages (files) */
 361        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
 362        unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
 363        unsigned long def_flags;
 364        unsigned long nr_ptes;          /* Page table pages */
 365        unsigned long start_code, end_code, start_data, end_data;
 366        unsigned long start_brk, brk, start_stack;
 367        unsigned long arg_start, arg_end, env_start, env_end;
 368
 369        unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 370
 371        /*
 372         * Special counters, in some configurations protected by the
 373         * page_table_lock, in other configurations by being atomic.
 374         */
 375        struct mm_rss_stat rss_stat;
 376
 377        struct linux_binfmt *binfmt;
 378
 379        cpumask_var_t cpu_vm_mask_var;
 380
 381        /* Architecture-specific MM context */
 382        mm_context_t context;
 383
 384        unsigned long flags; /* Must use atomic bitops to access the bits */
 385
 386        struct core_state *core_state; /* coredumping support */
 387#ifdef CONFIG_AIO
 388        spinlock_t              ioctx_lock;
 389        struct hlist_head       ioctx_list;
 390#endif
 391#ifdef CONFIG_MM_OWNER
 392        /*
 393         * "owner" points to a task that is regarded as the canonical
 394         * user/owner of this mm. All of the following must be true in
 395         * order for it to be changed:
 396         *
 397         * current == mm->owner
 398         * current->mm != mm
 399         * new_owner->mm == mm
 400         * new_owner->alloc_lock is held
 401         */
 402        struct task_struct __rcu *owner;
 403#endif
 404
 405        /* store ref to file /proc/<pid>/exe symlink points to */
 406        struct file *exe_file;
 407#ifdef CONFIG_MMU_NOTIFIER
 408        struct mmu_notifier_mm *mmu_notifier_mm;
 409#endif
 410#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 411        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 412#endif
 413#ifdef CONFIG_CPUMASK_OFFSTACK
 414        struct cpumask cpumask_allocation;
 415#endif
 416#ifdef CONFIG_NUMA_BALANCING
 417        /*
 418         * numa_next_scan is the next time that the PTEs will be marked
 419         * pte_numa. NUMA hinting faults will gather statistics and migrate
 420         * pages to new nodes if necessary.
 421         */
 422        unsigned long numa_next_scan;
 423
 424        /* numa_next_reset is when the PTE scanner period will be reset */
 425        unsigned long numa_next_reset;
 426
 427        /* Restart point for scanning and setting pte_numa */
 428        unsigned long numa_scan_offset;
 429
 430        /* numa_scan_seq prevents two threads setting pte_numa */
 431        int numa_scan_seq;
 432
 433        /*
 434         * The first node a task was scheduled on. If a task runs on
 435         * a different node than Make PTE Scan Go Now.
 436         */
 437        int first_nid;
 438#endif
 439        struct uprobes_state uprobes_state;
 440};
 441
 442/* first nid will either be a valid NID or one of these values */
 443#define NUMA_PTE_SCAN_INIT      -1
 444#define NUMA_PTE_SCAN_ACTIVE    -2
 445
 446static inline void mm_init_cpumask(struct mm_struct *mm)
 447{
 448#ifdef CONFIG_CPUMASK_OFFSTACK
 449        mm->cpu_vm_mask_var = &mm->cpumask_allocation;
 450#endif
 451}
 452
 453/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 454static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 455{
 456        return mm->cpu_vm_mask_var;
 457}
 458
 459#endif /* _LINUX_MM_TYPES_H */
 460
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.