linux/include/linux/mempolicy.h
<<
>>
Prefs
   1#ifndef _LINUX_MEMPOLICY_H
   2#define _LINUX_MEMPOLICY_H 1
   3
   4#include <linux/errno.h>
   5
   6/*
   7 * NUMA memory policies for Linux.
   8 * Copyright 2003,2004 Andi Kleen SuSE Labs
   9 */
  10
  11/*
  12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
  13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
  14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
  15 */
  16
  17/* Policies */
  18enum {
  19        MPOL_DEFAULT,
  20        MPOL_PREFERRED,
  21        MPOL_BIND,
  22        MPOL_INTERLEAVE,
  23        MPOL_MAX,       /* always last member of enum */
  24};
  25
  26enum mpol_rebind_step {
  27        MPOL_REBIND_ONCE,       /* do rebind work at once(not by two step) */
  28        MPOL_REBIND_STEP1,      /* first step(set all the newly nodes) */
  29        MPOL_REBIND_STEP2,      /* second step(clean all the disallowed nodes)*/
  30        MPOL_REBIND_NSTEP,
  31};
  32
  33/* Flags for set_mempolicy */
  34#define MPOL_F_STATIC_NODES     (1 << 15)
  35#define MPOL_F_RELATIVE_NODES   (1 << 14)
  36
  37/*
  38 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
  39 * either set_mempolicy() or mbind().
  40 */
  41#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
  42
  43/* Flags for get_mempolicy */
  44#define MPOL_F_NODE     (1<<0)  /* return next IL mode instead of node mask */
  45#define MPOL_F_ADDR     (1<<1)  /* look up vma using address */
  46#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
  47
  48/* Flags for mbind */
  49#define MPOL_MF_STRICT  (1<<0)  /* Verify existing pages in the mapping */
  50#define MPOL_MF_MOVE    (1<<1)  /* Move pages owned by this process to conform to mapping */
  51#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
  52#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
  53
  54/*
  55 * Internal flags that share the struct mempolicy flags word with
  56 * "mode flags".  These flags are allocated from bit 0 up, as they
  57 * are never OR'ed into the mode in mempolicy API arguments.
  58 */
  59#define MPOL_F_SHARED  (1 << 0) /* identify shared policies */
  60#define MPOL_F_LOCAL   (1 << 1) /* preferred local allocation */
  61#define MPOL_F_REBINDING (1 << 2)       /* identify policies in rebinding */
  62
  63#ifdef __KERNEL__
  64
  65#include <linux/mmzone.h>
  66#include <linux/slab.h>
  67#include <linux/rbtree.h>
  68#include <linux/spinlock.h>
  69#include <linux/nodemask.h>
  70#include <linux/pagemap.h>
  71
  72struct mm_struct;
  73
  74#ifdef CONFIG_NUMA
  75
  76/*
  77 * Describe a memory policy.
  78 *
  79 * A mempolicy can be either associated with a process or with a VMA.
  80 * For VMA related allocations the VMA policy is preferred, otherwise
  81 * the process policy is used. Interrupts ignore the memory policy
  82 * of the current process.
  83 *
  84 * Locking policy for interlave:
  85 * In process context there is no locking because only the process accesses
  86 * its own state. All vma manipulation is somewhat protected by a down_read on
  87 * mmap_sem.
  88 *
  89 * Freeing policy:
  90 * Mempolicy objects are reference counted.  A mempolicy will be freed when
  91 * mpol_put() decrements the reference count to zero.
  92 *
  93 * Duplicating policy objects:
  94 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
  95 * to the new storage.  The reference count of the new object is initialized
  96 * to 1, representing the caller of mpol_dup().
  97 */
  98struct mempolicy {
  99        atomic_t refcnt;
 100        unsigned short mode;    /* See MPOL_* above */
 101        unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
 102        union {
 103                short            preferred_node; /* preferred */
 104                nodemask_t       nodes;         /* interleave/bind */
 105                /* undefined for default */
 106        } v;
 107        union {
 108                nodemask_t cpuset_mems_allowed; /* relative to these nodes */
 109                nodemask_t user_nodemask;       /* nodemask passed by user */
 110        } w;
 111};
 112
 113/*
 114 * Support for managing mempolicy data objects (clone, copy, destroy)
 115 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
 116 */
 117
 118extern void __mpol_put(struct mempolicy *pol);
 119static inline void mpol_put(struct mempolicy *pol)
 120{
 121        if (pol)
 122                __mpol_put(pol);
 123}
 124
 125/*
 126 * Does mempolicy pol need explicit unref after use?
 127 * Currently only needed for shared policies.
 128 */
 129static inline int mpol_needs_cond_ref(struct mempolicy *pol)
 130{
 131        return (pol && (pol->flags & MPOL_F_SHARED));
 132}
 133
 134static inline void mpol_cond_put(struct mempolicy *pol)
 135{
 136        if (mpol_needs_cond_ref(pol))
 137                __mpol_put(pol);
 138}
 139
 140extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
 141                                          struct mempolicy *frompol);
 142static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
 143                                                struct mempolicy *frompol)
 144{
 145        if (!frompol)
 146                return frompol;
 147        return __mpol_cond_copy(tompol, frompol);
 148}
 149
 150extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
 151static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 152{
 153        if (pol)
 154                pol = __mpol_dup(pol);
 155        return pol;
 156}
 157
 158#define vma_policy(vma) ((vma)->vm_policy)
 159#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
 160
 161static inline void mpol_get(struct mempolicy *pol)
 162{
 163        if (pol)
 164                atomic_inc(&pol->refcnt);
 165}
 166
 167extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
 168static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 169{
 170        if (a == b)
 171                return true;
 172        return __mpol_equal(a, b);
 173}
 174
 175/*
 176 * Tree of shared policies for a shared memory region.
 177 * Maintain the policies in a pseudo mm that contains vmas. The vmas
 178 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
 179 * bytes, so that we can work with shared memory segments bigger than
 180 * unsigned long.
 181 */
 182
 183struct sp_node {
 184        struct rb_node nd;
 185        unsigned long start, end;
 186        struct mempolicy *policy;
 187};
 188
 189struct shared_policy {
 190        struct rb_root root;
 191        struct mutex mutex;
 192};
 193
 194void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 195int mpol_set_shared_policy(struct shared_policy *info,
 196                                struct vm_area_struct *vma,
 197                                struct mempolicy *new);
 198void mpol_free_shared_policy(struct shared_policy *p);
 199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
 200                                            unsigned long idx);
 201
 202struct mempolicy *get_vma_policy(struct task_struct *tsk,
 203                struct vm_area_struct *vma, unsigned long addr);
 204
 205extern void numa_default_policy(void);
 206extern void numa_policy_init(void);
 207extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 208                                enum mpol_rebind_step step);
 209extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 210extern void mpol_fix_fork_child_flag(struct task_struct *p);
 211
 212extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 213                                unsigned long addr, gfp_t gfp_flags,
 214                                struct mempolicy **mpol, nodemask_t **nodemask);
 215extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
 216extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 217                                const nodemask_t *mask);
 218extern unsigned slab_node(void);
 219
 220extern enum zone_type policy_zone;
 221
 222static inline void check_highest_zone(enum zone_type k)
 223{
 224        if (k > policy_zone && k != ZONE_MOVABLE)
 225                policy_zone = k;
 226}
 227
 228int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 229                     const nodemask_t *to, int flags);
 230
 231
 232#ifdef CONFIG_TMPFS
 233extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
 234#endif
 235
 236extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
 237                        int no_context);
 238
 239/* Check if a vma is migratable */
 240static inline int vma_migratable(struct vm_area_struct *vma)
 241{
 242        if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
 243                return 0;
 244        /*
 245         * Migration allocates pages in the highest zone. If we cannot
 246         * do so then migration (at least from node to node) is not
 247         * possible.
 248         */
 249        if (vma->vm_file &&
 250                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
 251                                                                < policy_zone)
 252                        return 0;
 253        return 1;
 254}
 255
 256#else
 257
 258struct mempolicy {};
 259
 260static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
 261{
 262        return true;
 263}
 264
 265static inline void mpol_put(struct mempolicy *p)
 266{
 267}
 268
 269static inline void mpol_cond_put(struct mempolicy *pol)
 270{
 271}
 272
 273static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
 274                                                struct mempolicy *from)
 275{
 276        return from;
 277}
 278
 279static inline void mpol_get(struct mempolicy *pol)
 280{
 281}
 282
 283static inline struct mempolicy *mpol_dup(struct mempolicy *old)
 284{
 285        return NULL;
 286}
 287
 288struct shared_policy {};
 289
 290static inline int mpol_set_shared_policy(struct shared_policy *info,
 291                                        struct vm_area_struct *vma,
 292                                        struct mempolicy *new)
 293{
 294        return -EINVAL;
 295}
 296
 297static inline void mpol_shared_policy_init(struct shared_policy *sp,
 298                                                struct mempolicy *mpol)
 299{
 300}
 301
 302static inline void mpol_free_shared_policy(struct shared_policy *p)
 303{
 304}
 305
 306static inline struct mempolicy *
 307mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 308{
 309        return NULL;
 310}
 311
 312#define vma_policy(vma) NULL
 313#define vma_set_policy(vma, pol) do {} while(0)
 314
 315static inline void numa_policy_init(void)
 316{
 317}
 318
 319static inline void numa_default_policy(void)
 320{
 321}
 322
 323static inline void mpol_rebind_task(struct task_struct *tsk,
 324                                const nodemask_t *new,
 325                                enum mpol_rebind_step step)
 326{
 327}
 328
 329static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 330{
 331}
 332
 333static inline void mpol_fix_fork_child_flag(struct task_struct *p)
 334{
 335}
 336
 337static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
 338                                unsigned long addr, gfp_t gfp_flags,
 339                                struct mempolicy **mpol, nodemask_t **nodemask)
 340{
 341        *mpol = NULL;
 342        *nodemask = NULL;
 343        return node_zonelist(0, gfp_flags);
 344}
 345
 346static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
 347{
 348        return false;
 349}
 350
 351static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
 352                        const nodemask_t *mask)
 353{
 354        return false;
 355}
 356
 357static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 358                                   const nodemask_t *to, int flags)
 359{
 360        return 0;
 361}
 362
 363static inline void check_highest_zone(int k)
 364{
 365}
 366
 367#ifdef CONFIG_TMPFS
 368static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
 369                                int no_context)
 370{
 371        return 1;       /* error */
 372}
 373#endif
 374
 375static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
 376                                int no_context)
 377{
 378        return 0;
 379}
 380
 381#endif /* CONFIG_NUMA */
 382#endif /* __KERNEL__ */
 383
 384#endif
 385
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.