linux/include/linux/rmap.h
<<
>>
Prefs
   1#ifndef _LINUX_RMAP_H
   2#define _LINUX_RMAP_H
   3/*
   4 * Declarations for Reverse Mapping functions in mm/rmap.c
   5 */
   6
   7#include <linux/list.h>
   8#include <linux/slab.h>
   9#include <linux/mm.h>
  10#include <linux/mutex.h>
  11#include <linux/memcontrol.h>
  12
  13/*
  14 * The anon_vma heads a list of private "related" vmas, to scan if
  15 * an anonymous page pointing to this anon_vma needs to be unmapped:
  16 * the vmas on the list will be related by forking, or by splitting.
  17 *
  18 * Since vmas come and go as they are split and merged (particularly
  19 * in mprotect), the mapping field of an anonymous page cannot point
  20 * directly to a vma: instead it points to an anon_vma, on whose list
  21 * the related vmas can be easily linked or unlinked.
  22 *
  23 * After unlinking the last vma on the list, we must garbage collect
  24 * the anon_vma object itself: we're guaranteed no page can be
  25 * pointing to this anon_vma once its vma list is empty.
  26 */
  27struct anon_vma {
  28        struct anon_vma *root;  /* Root of this anon_vma tree */
  29        struct mutex mutex;     /* Serialize access to vma list */
  30        /*
  31         * The refcount is taken on an anon_vma when there is no
  32         * guarantee that the vma of page tables will exist for
  33         * the duration of the operation. A caller that takes
  34         * the reference is responsible for clearing up the
  35         * anon_vma if they are the last user on release
  36         */
  37        atomic_t refcount;
  38
  39        /*
  40         * NOTE: the LSB of the rb_root.rb_node is set by
  41         * mm_take_all_locks() _after_ taking the above lock. So the
  42         * rb_root must only be read/written after taking the above lock
  43         * to be sure to see a valid next pointer. The LSB bit itself
  44         * is serialized by a system wide lock only visible to
  45         * mm_take_all_locks() (mm_all_locks_mutex).
  46         */
  47        struct rb_root rb_root; /* Interval tree of private "related" vmas */
  48};
  49
  50/*
  51 * The copy-on-write semantics of fork mean that an anon_vma
  52 * can become associated with multiple processes. Furthermore,
  53 * each child process will have its own anon_vma, where new
  54 * pages for that process are instantiated.
  55 *
  56 * This structure allows us to find the anon_vmas associated
  57 * with a VMA, or the VMAs associated with an anon_vma.
  58 * The "same_vma" list contains the anon_vma_chains linking
  59 * all the anon_vmas associated with this VMA.
  60 * The "rb" field indexes on an interval tree the anon_vma_chains
  61 * which link all the VMAs associated with this anon_vma.
  62 */
  63struct anon_vma_chain {
  64        struct vm_area_struct *vma;
  65        struct anon_vma *anon_vma;
  66        struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
  67        struct rb_node rb;                      /* locked by anon_vma->mutex */
  68        unsigned long rb_subtree_last;
  69#ifdef CONFIG_DEBUG_VM_RB
  70        unsigned long cached_vma_start, cached_vma_last;
  71#endif
  72};
  73
  74enum ttu_flags {
  75        TTU_UNMAP = 0,                  /* unmap mode */
  76        TTU_MIGRATION = 1,              /* migration mode */
  77        TTU_MUNLOCK = 2,                /* munlock mode */
  78        TTU_ACTION_MASK = 0xff,
  79
  80        TTU_IGNORE_MLOCK = (1 << 8),    /* ignore mlock */
  81        TTU_IGNORE_ACCESS = (1 << 9),   /* don't age */
  82        TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
  83};
  84
  85#ifdef CONFIG_MMU
  86static inline void get_anon_vma(struct anon_vma *anon_vma)
  87{
  88        atomic_inc(&anon_vma->refcount);
  89}
  90
  91void __put_anon_vma(struct anon_vma *anon_vma);
  92
  93static inline void put_anon_vma(struct anon_vma *anon_vma)
  94{
  95        if (atomic_dec_and_test(&anon_vma->refcount))
  96                __put_anon_vma(anon_vma);
  97}
  98
  99static inline struct anon_vma *page_anon_vma(struct page *page)
 100{
 101        if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
 102                                            PAGE_MAPPING_ANON)
 103                return NULL;
 104        return page_rmapping(page);
 105}
 106
 107static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
 108{
 109        struct anon_vma *anon_vma = vma->anon_vma;
 110        if (anon_vma)
 111                mutex_lock(&anon_vma->root->mutex);
 112}
 113
 114static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
 115{
 116        struct anon_vma *anon_vma = vma->anon_vma;
 117        if (anon_vma)
 118                mutex_unlock(&anon_vma->root->mutex);
 119}
 120
 121static inline void anon_vma_lock(struct anon_vma *anon_vma)
 122{
 123        mutex_lock(&anon_vma->root->mutex);
 124}
 125
 126static inline void anon_vma_unlock(struct anon_vma *anon_vma)
 127{
 128        mutex_unlock(&anon_vma->root->mutex);
 129}
 130
 131/*
 132 * anon_vma helper functions.
 133 */
 134void anon_vma_init(void);       /* create anon_vma_cachep */
 135int  anon_vma_prepare(struct vm_area_struct *);
 136void unlink_anon_vmas(struct vm_area_struct *);
 137int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
 138int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
 139
 140static inline void anon_vma_merge(struct vm_area_struct *vma,
 141                                  struct vm_area_struct *next)
 142{
 143        VM_BUG_ON(vma->anon_vma != next->anon_vma);
 144        unlink_anon_vmas(next);
 145}
 146
 147struct anon_vma *page_get_anon_vma(struct page *page);
 148
 149/*
 150 * rmap interfaces called when adding or removing pte of page
 151 */
 152void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 153void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 154void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
 155                           unsigned long, int);
 156void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
 157void page_add_file_rmap(struct page *);
 158void page_remove_rmap(struct page *);
 159
 160void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
 161                            unsigned long);
 162void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
 163                                unsigned long);
 164
 165static inline void page_dup_rmap(struct page *page)
 166{
 167        atomic_inc(&page->_mapcount);
 168}
 169
 170/*
 171 * Called from mm/vmscan.c to handle paging out
 172 */
 173int page_referenced(struct page *, int is_locked,
 174                        struct mem_cgroup *memcg, unsigned long *vm_flags);
 175int page_referenced_one(struct page *, struct vm_area_struct *,
 176        unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
 177
 178#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 179
 180int try_to_unmap(struct page *, enum ttu_flags flags);
 181int try_to_unmap_one(struct page *, struct vm_area_struct *,
 182                        unsigned long address, enum ttu_flags flags);
 183
 184/*
 185 * Called from mm/filemap_xip.c to unmap empty zero page
 186 */
 187pte_t *__page_check_address(struct page *, struct mm_struct *,
 188                                unsigned long, spinlock_t **, int);
 189
 190static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
 191                                        unsigned long address,
 192                                        spinlock_t **ptlp, int sync)
 193{
 194        pte_t *ptep;
 195
 196        __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
 197                                                       ptlp, sync));
 198        return ptep;
 199}
 200
 201/*
 202 * Used by swapoff to help locate where page is expected in vma.
 203 */
 204unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
 205
 206/*
 207 * Cleans the PTEs of shared mappings.
 208 * (and since clean PTEs should also be readonly, write protects them too)
 209 *
 210 * returns the number of cleaned PTEs.
 211 */
 212int page_mkclean(struct page *);
 213
 214/*
 215 * called in munlock()/munmap() path to check for other vmas holding
 216 * the page mlocked.
 217 */
 218int try_to_munlock(struct page *);
 219
 220/*
 221 * Called by memory-failure.c to kill processes.
 222 */
 223struct anon_vma *page_lock_anon_vma(struct page *page);
 224void page_unlock_anon_vma(struct anon_vma *anon_vma);
 225int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
 226
 227/*
 228 * Called by migrate.c to remove migration ptes, but might be used more later.
 229 */
 230int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
 231                struct vm_area_struct *, unsigned long, void *), void *arg);
 232
 233#else   /* !CONFIG_MMU */
 234
 235#define anon_vma_init()         do {} while (0)
 236#define anon_vma_prepare(vma)   (0)
 237#define anon_vma_link(vma)      do {} while (0)
 238
 239static inline int page_referenced(struct page *page, int is_locked,
 240                                  struct mem_cgroup *memcg,
 241                                  unsigned long *vm_flags)
 242{
 243        *vm_flags = 0;
 244        return 0;
 245}
 246
 247#define try_to_unmap(page, refs) SWAP_FAIL
 248
 249static inline int page_mkclean(struct page *page)
 250{
 251        return 0;
 252}
 253
 254
 255#endif  /* CONFIG_MMU */
 256
 257/*
 258 * Return values of try_to_unmap
 259 */
 260#define SWAP_SUCCESS    0
 261#define SWAP_AGAIN      1
 262#define SWAP_FAIL       2
 263#define SWAP_MLOCK      3
 264
 265#endif  /* _LINUX_RMAP_H */
 266
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.