linux/mm/mempolicy.c
<<
>>
Prefs
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case node -1 here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *                in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#include <linux/mempolicy.h>
  69#include <linux/mm.h>
  70#include <linux/highmem.h>
  71#include <linux/hugetlb.h>
  72#include <linux/kernel.h>
  73#include <linux/sched.h>
  74#include <linux/nodemask.h>
  75#include <linux/cpuset.h>
  76#include <linux/gfp.h>
  77#include <linux/slab.h>
  78#include <linux/string.h>
  79#include <linux/module.h>
  80#include <linux/nsproxy.h>
  81#include <linux/interrupt.h>
  82#include <linux/init.h>
  83#include <linux/compat.h>
  84#include <linux/swap.h>
  85#include <linux/seq_file.h>
  86#include <linux/proc_fs.h>
  87#include <linux/migrate.h>
  88#include <linux/rmap.h>
  89#include <linux/security.h>
  90#include <linux/syscalls.h>
  91#include <linux/ctype.h>
  92
  93#include <asm/tlbflush.h>
  94#include <asm/uaccess.h>
  95
  96#include "internal.h"
  97
  98/* Internal flags */
  99#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)    /* Skip checks for continuous vmas */
 100#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)          /* Invert check for nodemask */
 101#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2)           /* Gather statistics */
 102
 103static struct kmem_cache *policy_cache;
 104static struct kmem_cache *sn_cache;
 105
 106/* Highest zone. An specific allocation for a zone below that is not
 107   policied. */
 108enum zone_type policy_zone = 0;
 109
 110/*
 111 * run-time system-wide default policy => local allocation
 112 */
 113struct mempolicy default_policy = {
 114        .refcnt = ATOMIC_INIT(1), /* never free it */
 115        .mode = MPOL_PREFERRED,
 116        .flags = MPOL_F_LOCAL,
 117};
 118
 119static const struct mempolicy_operations {
 120        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 121        void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 122} mpol_ops[MPOL_MAX];
 123
 124/* Check that the nodemask contains at least one populated zone */
 125static int is_valid_nodemask(const nodemask_t *nodemask)
 126{
 127        int nd, k;
 128
 129        /* Check that there is something useful in this mask */
 130        k = policy_zone;
 131
 132        for_each_node_mask(nd, *nodemask) {
 133                struct zone *z;
 134
 135                for (k = 0; k <= policy_zone; k++) {
 136                        z = &NODE_DATA(nd)->node_zones[k];
 137                        if (z->present_pages > 0)
 138                                return 1;
 139                }
 140        }
 141
 142        return 0;
 143}
 144
 145static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 146{
 147        return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
 148}
 149
 150static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 151                                   const nodemask_t *rel)
 152{
 153        nodemask_t tmp;
 154        nodes_fold(tmp, *orig, nodes_weight(*rel));
 155        nodes_onto(*ret, tmp, *rel);
 156}
 157
 158static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 159{
 160        if (nodes_empty(*nodes))
 161                return -EINVAL;
 162        pol->v.nodes = *nodes;
 163        return 0;
 164}
 165
 166static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 167{
 168        if (!nodes)
 169                pol->flags |= MPOL_F_LOCAL;     /* local allocation */
 170        else if (nodes_empty(*nodes))
 171                return -EINVAL;                 /*  no allowed nodes */
 172        else
 173                pol->v.preferred_node = first_node(*nodes);
 174        return 0;
 175}
 176
 177static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 178{
 179        if (!is_valid_nodemask(nodes))
 180                return -EINVAL;
 181        pol->v.nodes = *nodes;
 182        return 0;
 183}
 184
 185/* Create a new policy */
 186static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 187                                  nodemask_t *nodes)
 188{
 189        struct mempolicy *policy;
 190        nodemask_t cpuset_context_nmask;
 191        int ret;
 192
 193        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 194                 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 195
 196        if (mode == MPOL_DEFAULT) {
 197                if (nodes && !nodes_empty(*nodes))
 198                        return ERR_PTR(-EINVAL);
 199                return NULL;    /* simply delete any existing policy */
 200        }
 201        VM_BUG_ON(!nodes);
 202
 203        /*
 204         * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 205         * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 206         * All other modes require a valid pointer to a non-empty nodemask.
 207         */
 208        if (mode == MPOL_PREFERRED) {
 209                if (nodes_empty(*nodes)) {
 210                        if (((flags & MPOL_F_STATIC_NODES) ||
 211                             (flags & MPOL_F_RELATIVE_NODES)))
 212                                return ERR_PTR(-EINVAL);
 213                        nodes = NULL;   /* flag local alloc */
 214                }
 215        } else if (nodes_empty(*nodes))
 216                return ERR_PTR(-EINVAL);
 217        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 218        if (!policy)
 219                return ERR_PTR(-ENOMEM);
 220        atomic_set(&policy->refcnt, 1);
 221        policy->mode = mode;
 222        policy->flags = flags;
 223
 224        if (nodes) {
 225                /*
 226                 * cpuset related setup doesn't apply to local allocation
 227                 */
 228                cpuset_update_task_memory_state();
 229                if (flags & MPOL_F_RELATIVE_NODES)
 230                        mpol_relative_nodemask(&cpuset_context_nmask, nodes,
 231                                               &cpuset_current_mems_allowed);
 232                else
 233                        nodes_and(cpuset_context_nmask, *nodes,
 234                                  cpuset_current_mems_allowed);
 235                if (mpol_store_user_nodemask(policy))
 236                        policy->w.user_nodemask = *nodes;
 237                else
 238                        policy->w.cpuset_mems_allowed =
 239                                                cpuset_mems_allowed(current);
 240        }
 241
 242        ret = mpol_ops[mode].create(policy,
 243                                nodes ? &cpuset_context_nmask : NULL);
 244        if (ret < 0) {
 245                kmem_cache_free(policy_cache, policy);
 246                return ERR_PTR(ret);
 247        }
 248        return policy;
 249}
 250
 251/* Slow path of a mpol destructor. */
 252void __mpol_put(struct mempolicy *p)
 253{
 254        if (!atomic_dec_and_test(&p->refcnt))
 255                return;
 256        kmem_cache_free(policy_cache, p);
 257}
 258
 259static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 260{
 261}
 262
 263static void mpol_rebind_nodemask(struct mempolicy *pol,
 264                                 const nodemask_t *nodes)
 265{
 266        nodemask_t tmp;
 267
 268        if (pol->flags & MPOL_F_STATIC_NODES)
 269                nodes_and(tmp, pol->w.user_nodemask, *nodes);
 270        else if (pol->flags & MPOL_F_RELATIVE_NODES)
 271                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 272        else {
 273                nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
 274                            *nodes);
 275                pol->w.cpuset_mems_allowed = *nodes;
 276        }
 277
 278        pol->v.nodes = tmp;
 279        if (!node_isset(current->il_next, tmp)) {
 280                current->il_next = next_node(current->il_next, tmp);
 281                if (current->il_next >= MAX_NUMNODES)
 282                        current->il_next = first_node(tmp);
 283                if (current->il_next >= MAX_NUMNODES)
 284                        current->il_next = numa_node_id();
 285        }
 286}
 287
 288static void mpol_rebind_preferred(struct mempolicy *pol,
 289                                  const nodemask_t *nodes)
 290{
 291        nodemask_t tmp;
 292
 293        if (pol->flags & MPOL_F_STATIC_NODES) {
 294                int node = first_node(pol->w.user_nodemask);
 295
 296                if (node_isset(node, *nodes)) {
 297                        pol->v.preferred_node = node;
 298                        pol->flags &= ~MPOL_F_LOCAL;
 299                } else
 300                        pol->flags |= MPOL_F_LOCAL;
 301        } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 302                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 303                pol->v.preferred_node = first_node(tmp);
 304        } else if (!(pol->flags & MPOL_F_LOCAL)) {
 305                pol->v.preferred_node = node_remap(pol->v.preferred_node,
 306                                                   pol->w.cpuset_mems_allowed,
 307                                                   *nodes);
 308                pol->w.cpuset_mems_allowed = *nodes;
 309        }
 310}
 311
 312/* Migrate a policy to a different set of nodes */
 313static void mpol_rebind_policy(struct mempolicy *pol,
 314                               const nodemask_t *newmask)
 315{
 316        if (!pol)
 317                return;
 318        if (!mpol_store_user_nodemask(pol) &&
 319            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 320                return;
 321        mpol_ops[pol->mode].rebind(pol, newmask);
 322}
 323
 324/*
 325 * Wrapper for mpol_rebind_policy() that just requires task
 326 * pointer, and updates task mempolicy.
 327 */
 328
 329void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 330{
 331        mpol_rebind_policy(tsk->mempolicy, new);
 332}
 333
 334/*
 335 * Rebind each vma in mm to new nodemask.
 336 *
 337 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 338 */
 339
 340void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 341{
 342        struct vm_area_struct *vma;
 343
 344        down_write(&mm->mmap_sem);
 345        for (vma = mm->mmap; vma; vma = vma->vm_next)
 346                mpol_rebind_policy(vma->vm_policy, new);
 347        up_write(&mm->mmap_sem);
 348}
 349
 350static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 351        [MPOL_DEFAULT] = {
 352                .rebind = mpol_rebind_default,
 353        },
 354        [MPOL_INTERLEAVE] = {
 355                .create = mpol_new_interleave,
 356                .rebind = mpol_rebind_nodemask,
 357        },
 358        [MPOL_PREFERRED] = {
 359                .create = mpol_new_preferred,
 360                .rebind = mpol_rebind_preferred,
 361        },
 362        [MPOL_BIND] = {
 363                .create = mpol_new_bind,
 364                .rebind = mpol_rebind_nodemask,
 365        },
 366};
 367
 368static void gather_stats(struct page *, void *, int pte_dirty);
 369static void migrate_page_add(struct page *page, struct list_head *pagelist,
 370                                unsigned long flags);
 371
 372/* Scan through pages checking if pages follow certain conditions. */
 373static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 374                unsigned long addr, unsigned long end,
 375                const nodemask_t *nodes, unsigned long flags,
 376                void *private)
 377{
 378        pte_t *orig_pte;
 379        pte_t *pte;
 380        spinlock_t *ptl;
 381
 382        orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 383        do {
 384                struct page *page;
 385                int nid;
 386
 387                if (!pte_present(*pte))
 388                        continue;
 389                page = vm_normal_page(vma, addr, *pte);
 390                if (!page)
 391                        continue;
 392                /*
 393                 * The check for PageReserved here is important to avoid
 394                 * handling zero pages and other pages that may have been
 395                 * marked special by the system.
 396                 *
 397                 * If the PageReserved would not be checked here then f.e.
 398                 * the location of the zero page could have an influence
 399                 * on MPOL_MF_STRICT, zero pages would be counted for
 400                 * the per node stats, and there would be useless attempts
 401                 * to put zero pages on the migration list.
 402                 */
 403                if (PageReserved(page))
 404                        continue;
 405                nid = page_to_nid(page);
 406                if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
 407                        continue;
 408
 409                if (flags & MPOL_MF_STATS)
 410                        gather_stats(page, private, pte_dirty(*pte));
 411                else if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 412                        migrate_page_add(page, private, flags);
 413                else
 414                        break;
 415        } while (pte++, addr += PAGE_SIZE, addr != end);
 416        pte_unmap_unlock(orig_pte, ptl);
 417        return addr != end;
 418}
 419
 420static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 421                unsigned long addr, unsigned long end,
 422                const nodemask_t *nodes, unsigned long flags,
 423                void *private)
 424{
 425        pmd_t *pmd;
 426        unsigned long next;
 427
 428        pmd = pmd_offset(pud, addr);
 429        do {
 430                next = pmd_addr_end(addr, end);
 431                if (pmd_none_or_clear_bad(pmd))
 432                        continue;
 433                if (check_pte_range(vma, pmd, addr, next, nodes,
 434                                    flags, private))
 435                        return -EIO;
 436        } while (pmd++, addr = next, addr != end);
 437        return 0;
 438}
 439
 440static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 441                unsigned long addr, unsigned long end,
 442                const nodemask_t *nodes, unsigned long flags,
 443                void *private)
 444{
 445        pud_t *pud;
 446        unsigned long next;
 447
 448        pud = pud_offset(pgd, addr);
 449        do {
 450                next = pud_addr_end(addr, end);
 451                if (pud_none_or_clear_bad(pud))
 452                        continue;
 453                if (check_pmd_range(vma, pud, addr, next, nodes,
 454                                    flags, private))
 455                        return -EIO;
 456        } while (pud++, addr = next, addr != end);
 457        return 0;
 458}
 459
 460static inline int check_pgd_range(struct vm_area_struct *vma,
 461                unsigned long addr, unsigned long end,
 462                const nodemask_t *nodes, unsigned long flags,
 463                void *private)
 464{
 465        pgd_t *pgd;
 466        unsigned long next;
 467
 468        pgd = pgd_offset(vma->vm_mm, addr);
 469        do {
 470                next = pgd_addr_end(addr, end);
 471                if (pgd_none_or_clear_bad(pgd))
 472                        continue;
 473                if (check_pud_range(vma, pgd, addr, next, nodes,
 474                                    flags, private))
 475                        return -EIO;
 476        } while (pgd++, addr = next, addr != end);
 477        return 0;
 478}
 479
 480/*
 481 * Check if all pages in a range are on a set of nodes.
 482 * If pagelist != NULL then isolate pages from the LRU and
 483 * put them on the pagelist.
 484 */
 485static struct vm_area_struct *
 486check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 487                const nodemask_t *nodes, unsigned long flags, void *private)
 488{
 489        int err;
 490        struct vm_area_struct *first, *vma, *prev;
 491
 492
 493        first = find_vma(mm, start);
 494        if (!first)
 495                return ERR_PTR(-EFAULT);
 496        prev = NULL;
 497        for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
 498                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 499                        if (!vma->vm_next && vma->vm_end < end)
 500                                return ERR_PTR(-EFAULT);
 501                        if (prev && prev->vm_end < vma->vm_start)
 502                                return ERR_PTR(-EFAULT);
 503                }
 504                if (!is_vm_hugetlb_page(vma) &&
 505                    ((flags & MPOL_MF_STRICT) ||
 506                     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 507                                vma_migratable(vma)))) {
 508                        unsigned long endvma = vma->vm_end;
 509
 510                        if (endvma > end)
 511                                endvma = end;
 512                        if (vma->vm_start > start)
 513                                start = vma->vm_start;
 514                        err = check_pgd_range(vma, start, endvma, nodes,
 515                                                flags, private);
 516                        if (err) {
 517                                first = ERR_PTR(err);
 518                                break;
 519                        }
 520                }
 521                prev = vma;
 522        }
 523        return first;
 524}
 525
 526/* Apply policy to a single VMA */
 527static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
 528{
 529        int err = 0;
 530        struct mempolicy *old = vma->vm_policy;
 531
 532        pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 533                 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 534                 vma->vm_ops, vma->vm_file,
 535                 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 536
 537        if (vma->vm_ops && vma->vm_ops->set_policy)
 538                err = vma->vm_ops->set_policy(vma, new);
 539        if (!err) {
 540                mpol_get(new);
 541                vma->vm_policy = new;
 542                mpol_put(old);
 543        }
 544        return err;
 545}
 546
 547/* Step 2: apply policy to a range and do splits. */
 548static int mbind_range(struct vm_area_struct *vma, unsigned long start,
 549                       unsigned long end, struct mempolicy *new)
 550{
 551        struct vm_area_struct *next;
 552        int err;
 553
 554        err = 0;
 555        for (; vma && vma->vm_start < end; vma = next) {
 556                next = vma->vm_next;
 557                if (vma->vm_start < start)
 558                        err = split_vma(vma->vm_mm, vma, start, 1);
 559                if (!err && vma->vm_end > end)
 560                        err = split_vma(vma->vm_mm, vma, end, 0);
 561                if (!err)
 562                        err = policy_vma(vma, new);
 563                if (err)
 564                        break;
 565        }
 566        return err;
 567}
 568
 569/*
 570 * Update task->flags PF_MEMPOLICY bit: set iff non-default
 571 * mempolicy.  Allows more rapid checking of this (combined perhaps
 572 * with other PF_* flag bits) on memory allocation hot code paths.
 573 *
 574 * If called from outside this file, the task 'p' should -only- be
 575 * a newly forked child not yet visible on the task list, because
 576 * manipulating the task flags of a visible task is not safe.
 577 *
 578 * The above limitation is why this routine has the funny name
 579 * mpol_fix_fork_child_flag().
 580 *
 581 * It is also safe to call this with a task pointer of current,
 582 * which the static wrapper mpol_set_task_struct_flag() does,
 583 * for use within this file.
 584 */
 585
 586void mpol_fix_fork_child_flag(struct task_struct *p)
 587{
 588        if (p->mempolicy)
 589                p->flags |= PF_MEMPOLICY;
 590        else
 591                p->flags &= ~PF_MEMPOLICY;
 592}
 593
 594static void mpol_set_task_struct_flag(void)
 595{
 596        mpol_fix_fork_child_flag(current);
 597}
 598
 599/* Set the process memory policy */
 600static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 601                             nodemask_t *nodes)
 602{
 603        struct mempolicy *new;
 604        struct mm_struct *mm = current->mm;
 605
 606        new = mpol_new(mode, flags, nodes);
 607        if (IS_ERR(new))
 608                return PTR_ERR(new);
 609
 610        /*
 611         * prevent changing our mempolicy while show_numa_maps()
 612         * is using it.
 613         * Note:  do_set_mempolicy() can be called at init time
 614         * with no 'mm'.
 615         */
 616        if (mm)
 617                down_write(&mm->mmap_sem);
 618        mpol_put(current->mempolicy);
 619        current->mempolicy = new;
 620        mpol_set_task_struct_flag();
 621        if (new && new->mode == MPOL_INTERLEAVE &&
 622            nodes_weight(new->v.nodes))
 623                current->il_next = first_node(new->v.nodes);
 624        if (mm)
 625                up_write(&mm->mmap_sem);
 626
 627        return 0;
 628}
 629
 630/*
 631 * Return nodemask for policy for get_mempolicy() query
 632 */
 633static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 634{
 635        nodes_clear(*nodes);
 636        if (p == &default_policy)
 637                return;
 638
 639        switch (p->mode) {
 640        case MPOL_BIND:
 641                /* Fall through */
 642        case MPOL_INTERLEAVE:
 643                *nodes = p->v.nodes;
 644                break;
 645        case MPOL_PREFERRED:
 646                if (!(p->flags & MPOL_F_LOCAL))
 647                        node_set(p->v.preferred_node, *nodes);
 648                /* else return empty node mask for local allocation */
 649                break;
 650        default:
 651                BUG();
 652        }
 653}
 654
 655static int lookup_node(struct mm_struct *mm, unsigned long addr)
 656{
 657        struct page *p;
 658        int err;
 659
 660        err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 661        if (err >= 0) {
 662                err = page_to_nid(p);
 663                put_page(p);
 664        }
 665        return err;
 666}
 667
 668/* Retrieve NUMA policy */
 669static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 670                             unsigned long addr, unsigned long flags)
 671{
 672        int err;
 673        struct mm_struct *mm = current->mm;
 674        struct vm_area_struct *vma = NULL;
 675        struct mempolicy *pol = current->mempolicy;
 676
 677        cpuset_update_task_memory_state();
 678        if (flags &
 679                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 680                return -EINVAL;
 681
 682        if (flags & MPOL_F_MEMS_ALLOWED) {
 683                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 684                        return -EINVAL;
 685                *policy = 0;    /* just so it's initialized */
 686                *nmask  = cpuset_current_mems_allowed;
 687                return 0;
 688        }
 689
 690        if (flags & MPOL_F_ADDR) {
 691                /*
 692                 * Do NOT fall back to task policy if the
 693                 * vma/shared policy at addr is NULL.  We
 694                 * want to return MPOL_DEFAULT in this case.
 695                 */
 696                down_read(&mm->mmap_sem);
 697                vma = find_vma_intersection(mm, addr, addr+1);
 698                if (!vma) {
 699                        up_read(&mm->mmap_sem);
 700                        return -EFAULT;
 701                }
 702                if (vma->vm_ops && vma->vm_ops->get_policy)
 703                        pol = vma->vm_ops->get_policy(vma, addr);
 704                else
 705                        pol = vma->vm_policy;
 706        } else if (addr)
 707                return -EINVAL;
 708
 709        if (!pol)
 710                pol = &default_policy;  /* indicates default behavior */
 711
 712        if (flags & MPOL_F_NODE) {
 713                if (flags & MPOL_F_ADDR) {
 714                        err = lookup_node(mm, addr);
 715                        if (err < 0)
 716                                goto out;
 717                        *policy = err;
 718                } else if (pol == current->mempolicy &&
 719                                pol->mode == MPOL_INTERLEAVE) {
 720                        *policy = current->il_next;
 721                } else {
 722                        err = -EINVAL;
 723                        goto out;
 724                }
 725        } else {
 726                *policy = pol == &default_policy ? MPOL_DEFAULT :
 727                                                pol->mode;
 728                /*
 729                 * Internal mempolicy flags must be masked off before exposing
 730                 * the policy to userspace.
 731                 */
 732                *policy |= (pol->flags & MPOL_MODE_FLAGS);
 733        }
 734
 735        if (vma) {
 736                up_read(&current->mm->mmap_sem);
 737                vma = NULL;
 738        }
 739
 740        err = 0;
 741        if (nmask)
 742                get_policy_nodemask(pol, nmask);
 743
 744 out:
 745        mpol_cond_put(pol);
 746        if (vma)
 747                up_read(&current->mm->mmap_sem);
 748        return err;
 749}
 750
 751#ifdef CONFIG_MIGRATION
 752/*
 753 * page migration
 754 */
 755static void migrate_page_add(struct page *page, struct list_head *pagelist,
 756                                unsigned long flags)
 757{
 758        /*
 759         * Avoid migrating a page that is shared with others.
 760         */
 761        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
 762                if (!isolate_lru_page(page)) {
 763                        list_add_tail(&page->lru, pagelist);
 764                }
 765        }
 766}
 767
 768static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 769{
 770        return alloc_pages_node(node, GFP_HIGHUSER_MOVABLE, 0);
 771}
 772
 773/*
 774 * Migrate pages from one node to a target node.
 775 * Returns error or the number of pages not migrated.
 776 */
 777static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 778                           int flags)
 779{
 780        nodemask_t nmask;
 781        LIST_HEAD(pagelist);
 782        int err = 0;
 783
 784        nodes_clear(nmask);
 785        node_set(source, nmask);
 786
 787        check_range(mm, mm->mmap->vm_start, TASK_SIZE, &nmask,
 788                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 789
 790        if (!list_empty(&pagelist))
 791                err = migrate_pages(&pagelist, new_node_page, dest);
 792
 793        return err;
 794}
 795
 796/*
 797 * Move pages between the two nodesets so as to preserve the physical
 798 * layout as much as possible.
 799 *
 800 * Returns the number of page that could not be moved.
 801 */
 802int do_migrate_pages(struct mm_struct *mm,
 803        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
 804{
 805        int busy = 0;
 806        int err;
 807        nodemask_t tmp;
 808
 809        err = migrate_prep();
 810        if (err)
 811                return err;
 812
 813        down_read(&mm->mmap_sem);
 814
 815        err = migrate_vmas(mm, from_nodes, to_nodes, flags);
 816        if (err)
 817                goto out;
 818
 819/*
 820 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 821 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 822 * bit in 'tmp', and return that <source, dest> pair for migration.
 823 * The pair of nodemasks 'to' and 'from' define the map.
 824 *
 825 * If no pair of bits is found that way, fallback to picking some
 826 * pair of 'source' and 'dest' bits that are not the same.  If the
 827 * 'source' and 'dest' bits are the same, this represents a node
 828 * that will be migrating to itself, so no pages need move.
 829 *
 830 * If no bits are left in 'tmp', or if all remaining bits left
 831 * in 'tmp' correspond to the same bit in 'to', return false
 832 * (nothing left to migrate).
 833 *
 834 * This lets us pick a pair of nodes to migrate between, such that
 835 * if possible the dest node is not already occupied by some other
 836 * source node, minimizing the risk of overloading the memory on a
 837 * node that would happen if we migrated incoming memory to a node
 838 * before migrating outgoing memory source that same node.
 839 *
 840 * A single scan of tmp is sufficient.  As we go, we remember the
 841 * most recent <s, d> pair that moved (s != d).  If we find a pair
 842 * that not only moved, but what's better, moved to an empty slot
 843 * (d is not set in tmp), then we break out then, with that pair.
 844 * Otherwise when we finish scannng from_tmp, we at least have the
 845 * most recent <s, d> pair that moved.  If we get all the way through
 846 * the scan of tmp without finding any node that moved, much less
 847 * moved to an empty node, then there is nothing left worth migrating.
 848 */
 849
 850        tmp = *from_nodes;
 851        while (!nodes_empty(tmp)) {
 852                int s,d;
 853                int source = -1;
 854                int dest = 0;
 855
 856                for_each_node_mask(s, tmp) {
 857                        d = node_remap(s, *from_nodes, *to_nodes);
 858                        if (s == d)
 859                                continue;
 860
 861                        source = s;     /* Node moved. Memorize */
 862                        dest = d;
 863
 864                        /* dest not in remaining from nodes? */
 865                        if (!node_isset(dest, tmp))
 866                                break;
 867                }
 868                if (source == -1)
 869                        break;
 870
 871                node_clear(source, tmp);
 872                err = migrate_to_node(mm, source, dest, flags);
 873                if (err > 0)
 874                        busy += err;
 875                if (err < 0)
 876                        break;
 877        }
 878out:
 879        up_read(&mm->mmap_sem);
 880        if (err < 0)
 881                return err;
 882        return busy;
 883
 884}
 885
 886/*
 887 * Allocate a new page for page migration based on vma policy.
 888 * Start assuming that page is mapped by vma pointed to by @private.
 889 * Search forward from there, if not.  N.B., this assumes that the
 890 * list of pages handed to migrate_pages()--which is how we get here--
 891 * is in virtual address order.
 892 */
 893static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
 894{
 895        struct vm_area_struct *vma = (struct vm_area_struct *)private;
 896        unsigned long uninitialized_var(address);
 897
 898        while (vma) {
 899                address = page_address_in_vma(page, vma);
 900                if (address != -EFAULT)
 901                        break;
 902                vma = vma->vm_next;
 903        }
 904
 905        /*
 906         * if !vma, alloc_page_vma() will use task or system default policy
 907         */
 908        return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
 909}
 910#else
 911
 912static void migrate_page_add(struct page *page, struct list_head *pagelist,
 913                                unsigned long flags)
 914{
 915}
 916
 917int do_migrate_pages(struct mm_struct *mm,
 918        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
 919{
 920        return -ENOSYS;
 921}
 922
 923static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
 924{
 925        return NULL;
 926}
 927#endif
 928
 929static long do_mbind(unsigned long start, unsigned long len,
 930                     unsigned short mode, unsigned short mode_flags,
 931                     nodemask_t *nmask, unsigned long flags)
 932{
 933        struct vm_area_struct *vma;
 934        struct mm_struct *mm = current->mm;
 935        struct mempolicy *new;
 936        unsigned long end;
 937        int err;
 938        LIST_HEAD(pagelist);
 939
 940        if (flags & ~(unsigned long)(MPOL_MF_STRICT |
 941                                     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 942                return -EINVAL;
 943        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
 944                return -EPERM;
 945
 946        if (start & ~PAGE_MASK)
 947                return -EINVAL;
 948
 949        if (mode == MPOL_DEFAULT)
 950                flags &= ~MPOL_MF_STRICT;
 951
 952        len = (len + PAGE_SIZE - 1) & PAGE_MASK;
 953        end = start + len;
 954
 955        if (end < start)
 956                return -EINVAL;
 957        if (end == start)
 958                return 0;
 959
 960        new = mpol_new(mode, mode_flags, nmask);
 961        if (IS_ERR(new))
 962                return PTR_ERR(new);
 963
 964        /*
 965         * If we are using the default policy then operation
 966         * on discontinuous address spaces is okay after all
 967         */
 968        if (!new)
 969                flags |= MPOL_MF_DISCONTIG_OK;
 970
 971        pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
 972                 start, start + len, mode, mode_flags,
 973                 nmask ? nodes_addr(*nmask)[0] : -1);
 974
 975        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 976
 977                err = migrate_prep();
 978                if (err)
 979                        return err;
 980        }
 981        down_write(&mm->mmap_sem);
 982        vma = check_range(mm, start, end, nmask,
 983                          flags | MPOL_MF_INVERT, &pagelist);
 984
 985        err = PTR_ERR(vma);
 986        if (!IS_ERR(vma)) {
 987                int nr_failed = 0;
 988
 989                err = mbind_range(vma, start, end, new);
 990
 991                if (!list_empty(&pagelist))
 992                        nr_failed = migrate_pages(&pagelist, new_vma_page,
 993                                                (unsigned long)vma);
 994
 995                if (!err && nr_failed && (flags & MPOL_MF_STRICT))
 996                        err = -EIO;
 997        }
 998
 999        up_write(&mm->mmap_sem);
1000        mpol_put(new);
1001        return err;
1002}
1003
1004/*
1005 * User space interface with variable sized bitmaps for nodelists.
1006 */
1007
1008/* Copy a node mask from user space. */
1009static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1010                     unsigned long maxnode)
1011{
1012        unsigned long k;
1013        unsigned long nlongs;
1014        unsigned long endmask;
1015
1016        --maxnode;
1017        nodes_clear(*nodes);
1018        if (maxnode == 0 || !nmask)
1019                return 0;
1020        if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1021                return -EINVAL;
1022
1023        nlongs = BITS_TO_LONGS(maxnode);
1024        if ((maxnode % BITS_PER_LONG) == 0)
1025                endmask = ~0UL;
1026        else
1027                endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1028
1029        /* When the user specified more nodes than supported just check
1030           if the non supported part is all zero. */
1031        if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1032                if (nlongs > PAGE_SIZE/sizeof(long))
1033                        return -EINVAL;
1034                for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1035                        unsigned long t;
1036                        if (get_user(t, nmask + k))
1037                                return -EFAULT;
1038                        if (k == nlongs - 1) {
1039                                if (t & endmask)
1040                                        return -EINVAL;
1041                        } else if (t)
1042                                return -EINVAL;
1043                }
1044                nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1045                endmask = ~0UL;
1046        }
1047
1048        if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1049                return -EFAULT;
1050        nodes_addr(*nodes)[nlongs-1] &= endmask;
1051        return 0;
1052}
1053
1054/* Copy a kernel node mask to user space */
1055static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1056                              nodemask_t *nodes)
1057{
1058        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1059        const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1060
1061        if (copy > nbytes) {
1062                if (copy > PAGE_SIZE)
1063                        return -EINVAL;
1064                if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1065                        return -EFAULT;
1066                copy = nbytes;
1067        }
1068        return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1069}
1070
1071asmlinkage long sys_mbind(unsigned long start, unsigned long len,
1072                        unsigned long mode,
1073                        unsigned long __user *nmask, unsigned long maxnode,
1074                        unsigned flags)
1075{
1076        nodemask_t nodes;
1077        int err;
1078        unsigned short mode_flags;
1079
1080        mode_flags = mode & MPOL_MODE_FLAGS;
1081        mode &= ~MPOL_MODE_FLAGS;
1082        if (mode >= MPOL_MAX)
1083                return -EINVAL;
1084        if ((mode_flags & MPOL_F_STATIC_NODES) &&
1085            (mode_flags & MPOL_F_RELATIVE_NODES))
1086                return -EINVAL;
1087        err = get_nodes(&nodes, nmask, maxnode);
1088        if (err)
1089                return err;
1090        return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1091}
1092
1093/* Set the process memory policy */
1094asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
1095                unsigned long maxnode)
1096{
1097        int err;
1098        nodemask_t nodes;
1099        unsigned short flags;
1100
1101        flags = mode & MPOL_MODE_FLAGS;
1102        mode &= ~MPOL_MODE_FLAGS;
1103        if ((unsigned int)mode >= MPOL_MAX)
1104                return -EINVAL;
1105        if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1106                return -EINVAL;
1107        err = get_nodes(&nodes, nmask, maxnode);
1108        if (err)
1109                return err;
1110        return do_set_mempolicy(mode, flags, &nodes);
1111}
1112
1113asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
1114                const unsigned long __user *old_nodes,
1115                const unsigned long __user *new_nodes)
1116{
1117        struct mm_struct *mm;
1118        struct task_struct *task;
1119        nodemask_t old;
1120        nodemask_t new;
1121        nodemask_t task_nodes;
1122        int err;
1123
1124        err = get_nodes(&old, old_nodes, maxnode);
1125        if (err)
1126                return err;
1127
1128        err = get_nodes(&new, new_nodes, maxnode);
1129        if (err)
1130                return err;
1131
1132        /* Find the mm_struct */
1133        read_lock(&tasklist_lock);
1134        task = pid ? find_task_by_vpid(pid) : current;
1135        if (!task) {
1136                read_unlock(&tasklist_lock);
1137                return -ESRCH;
1138        }
1139        mm = get_task_mm(task);
1140        read_unlock(&tasklist_lock);
1141
1142        if (!mm)
1143                return -EINVAL;
1144
1145        /*
1146         * Check if this process has the right to modify the specified
1147         * process. The right exists if the process has administrative
1148         * capabilities, superuser privileges or the same
1149         * userid as the target process.
1150         */
1151        if ((current->euid != task->suid) && (current->euid != task->uid) &&
1152            (current->uid != task->suid) && (current->uid != task->uid) &&
1153            !capable(CAP_SYS_NICE)) {
1154                err = -EPERM;
1155                goto out;
1156        }
1157
1158        task_nodes = cpuset_mems_allowed(task);
1159        /* Is the user allowed to access the target nodes? */
1160        if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
1161                err = -EPERM;
1162                goto out;
1163        }
1164
1165        if (!nodes_subset(new, node_states[N_HIGH_MEMORY])) {
1166                err = -EINVAL;
1167                goto out;
1168        }
1169
1170        err = security_task_movememory(task);
1171        if (err)
1172                goto out;
1173
1174        err = do_migrate_pages(mm, &old, &new,
1175                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1176out:
1177        mmput(mm);
1178        return err;
1179}
1180
1181
1182/* Retrieve NUMA policy */
1183asmlinkage long sys_get_mempolicy(int __user *policy,
1184                                unsigned long __user *nmask,
1185                                unsigned long maxnode,
1186                                unsigned long addr, unsigned long flags)
1187{
1188        int err;
1189        int uninitialized_var(pval);
1190        nodemask_t nodes;
1191
1192        if (nmask != NULL && maxnode < MAX_NUMNODES)
1193                return -EINVAL;
1194
1195        err = do_get_mempolicy(&pval, &nodes, addr, flags);
1196
1197        if (err)
1198                return err;
1199
1200        if (policy && put_user(pval, policy))
1201                return -EFAULT;
1202
1203        if (nmask)
1204                err = copy_nodes_to_user(nmask, maxnode, &nodes);
1205
1206        return err;
1207}
1208
1209#ifdef CONFIG_COMPAT
1210
1211asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1212                                     compat_ulong_t __user *nmask,
1213                                     compat_ulong_t maxnode,
1214                                     compat_ulong_t addr, compat_ulong_t flags)
1215{
1216        long err;
1217        unsigned long __user *nm = NULL;
1218        unsigned long nr_bits, alloc_size;
1219        DECLARE_BITMAP(bm, MAX_NUMNODES);
1220
1221        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1222        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1223
1224        if (nmask)
1225                nm = compat_alloc_user_space(alloc_size);
1226
1227        err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1228
1229        if (!err && nmask) {
1230                err = copy_from_user(bm, nm, alloc_size);
1231                /* ensure entire bitmap is zeroed */
1232                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1233                err |= compat_put_bitmap(nmask, bm, nr_bits);
1234        }
1235
1236        return err;
1237}
1238
1239asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1240                                     compat_ulong_t maxnode)
1241{
1242        long err = 0;
1243        unsigned long __user *nm = NULL;
1244        unsigned long nr_bits, alloc_size;
1245        DECLARE_BITMAP(bm, MAX_NUMNODES);
1246
1247        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1248        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1249
1250        if (nmask) {
1251                err = compat_get_bitmap(bm, nmask, nr_bits);
1252                nm = compat_alloc_user_space(alloc_size);
1253                err |= copy_to_user(nm, bm, alloc_size);
1254        }
1255
1256        if (err)
1257                return -EFAULT;
1258
1259        return sys_set_mempolicy(mode, nm, nr_bits+1);
1260}
1261
1262asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1263                             compat_ulong_t mode, compat_ulong_t __user *nmask,
1264                             compat_ulong_t maxnode, compat_ulong_t flags)
1265{
1266        long err = 0;
1267        unsigned long __user *nm = NULL;
1268        unsigned long nr_bits, alloc_size;
1269        nodemask_t bm;
1270
1271        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1272        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1273
1274        if (nmask) {
1275                err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1276                nm = compat_alloc_user_space(alloc_size);
1277                err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1278        }
1279
1280        if (err)
1281                return -EFAULT;
1282
1283        return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1284}
1285
1286#endif
1287
1288/*
1289 * get_vma_policy(@task, @vma, @addr)
1290 * @task - task for fallback if vma policy == default
1291 * @vma   - virtual memory area whose policy is sought
1292 * @addr  - address in @vma for shared policy lookup
1293 *
1294 * Returns effective policy for a VMA at specified address.
1295 * Falls back to @task or system default policy, as necessary.
1296 * Current or other task's task mempolicy and non-shared vma policies
1297 * are protected by the task's mmap_sem, which must be held for read by
1298 * the caller.
1299 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1300 * count--added by the get_policy() vm_op, as appropriate--to protect against
1301 * freeing by another task.  It is the caller's responsibility to free the
1302 * extra reference for shared policies.
1303 */
1304static struct mempolicy *get_vma_policy(struct task_struct *task,
1305                struct vm_area_struct *vma, unsigned long addr)
1306{
1307        struct mempolicy *pol = task->mempolicy;
1308
1309        if (vma) {
1310                if (vma->vm_ops && vma->vm_ops->get_policy) {
1311                        struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1312                                                                        addr);
1313                        if (vpol)
1314                                pol = vpol;
1315                } else if (vma->vm_policy)
1316                        pol = vma->vm_policy;
1317        }
1318        if (!pol)
1319                pol = &default_policy;
1320        return pol;
1321}
1322
1323/*
1324 * Return a nodemask representing a mempolicy for filtering nodes for
1325 * page allocation
1326 */
1327static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1328{
1329        /* Lower zones don't get a nodemask applied for MPOL_BIND */
1330        if (unlikely(policy->mode == MPOL_BIND) &&
1331                        gfp_zone(gfp) >= policy_zone &&
1332                        cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1333                return &policy->v.nodes;
1334
1335        return NULL;
1336}
1337
1338/* Return a zonelist indicated by gfp for node representing a mempolicy */
1339static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
1340{
1341        int nd = numa_node_id();
1342
1343        switch (policy->mode) {
1344        case MPOL_PREFERRED:
1345                if (!(policy->flags & MPOL_F_LOCAL))
1346                        nd = policy->v.preferred_node;
1347                break;
1348        case MPOL_BIND:
1349                /*
1350                 * Normally, MPOL_BIND allocations are node-local within the
1351                 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1352                 * current node is part of the mask, we use the zonelist for
1353                 * the first node in the mask instead.
1354                 */
1355                if (unlikely(gfp & __GFP_THISNODE) &&
1356                                unlikely(!node_isset(nd, policy->v.nodes)))
1357                        nd = first_node(policy->v.nodes);
1358                break;
1359        case MPOL_INTERLEAVE: /* should not happen */
1360                break;
1361        default:
1362                BUG();
1363        }
1364        return node_zonelist(nd, gfp);
1365}
1366
1367/* Do dynamic interleaving for a process */
1368static unsigned interleave_nodes(struct mempolicy *policy)
1369{
1370        unsigned nid, next;
1371        struct task_struct *me = current;
1372
1373        nid = me->il_next;
1374        next = next_node(nid, policy->v.nodes);
1375        if (next >= MAX_NUMNODES)
1376                next = first_node(policy->v.nodes);
1377        if (next < MAX_NUMNODES)
1378                me->il_next = next;
1379        return nid;
1380}
1381
1382/*
1383 * Depending on the memory policy provide a node from which to allocate the
1384 * next slab entry.
1385 * @policy must be protected by freeing by the caller.  If @policy is
1386 * the current task's mempolicy, this protection is implicit, as only the
1387 * task can change it's policy.  The system default policy requires no
1388 * such protection.
1389 */
1390unsigned slab_node(struct mempolicy *policy)
1391{
1392        if (!policy || policy->flags & MPOL_F_LOCAL)
1393                return numa_node_id();
1394
1395        switch (policy->mode) {
1396        case MPOL_PREFERRED:
1397                /*
1398                 * handled MPOL_F_LOCAL above
1399                 */
1400                return policy->v.preferred_node;
1401
1402        case MPOL_INTERLEAVE:
1403                return interleave_nodes(policy);
1404
1405        case MPOL_BIND: {
1406                /*
1407                 * Follow bind policy behavior and start allocation at the
1408                 * first node.
1409                 */
1410                struct zonelist *zonelist;
1411                struct zone *zone;
1412                enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1413                zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1414                (void)first_zones_zonelist(zonelist, highest_zoneidx,
1415                                                        &policy->v.nodes,
1416                                                        &zone);
1417                return zone->node;
1418        }
1419
1420        default:
1421                BUG();
1422        }
1423}
1424
1425/* Do static interleaving for a VMA with known offset. */
1426static unsigned offset_il_node(struct mempolicy *pol,
1427                struct vm_area_struct *vma, unsigned long off)
1428{
1429        unsigned nnodes = nodes_weight(pol->v.nodes);
1430        unsigned target;
1431        int c;
1432        int nid = -1;
1433
1434        if (!nnodes)
1435                return numa_node_id();
1436        target = (unsigned int)off % nnodes;
1437        c = 0;
1438        do {
1439                nid = next_node(nid, pol->v.nodes);
1440                c++;
1441        } while (c <= target);
1442        return nid;
1443}
1444
1445/* Determine a node number for interleave */
1446static inline unsigned interleave_nid(struct mempolicy *pol,
1447                 struct vm_area_struct *vma, unsigned long addr, int shift)
1448{
1449        if (vma) {
1450                unsigned long off;
1451
1452                /*
1453                 * for small pages, there is no difference between
1454                 * shift and PAGE_SHIFT, so the bit-shift is safe.
1455                 * for huge pages, since vm_pgoff is in units of small
1456                 * pages, we need to shift off the always 0 bits to get
1457                 * a useful offset.
1458                 */
1459                BUG_ON(shift < PAGE_SHIFT);
1460                off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1461                off += (addr - vma->vm_start) >> shift;
1462                return offset_il_node(pol, vma, off);
1463        } else
1464                return interleave_nodes(pol);
1465}
1466
1467#ifdef CONFIG_HUGETLBFS
1468/*
1469 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1470 * @vma = virtual memory area whose policy is sought
1471 * @addr = address in @vma for shared policy lookup and interleave policy
1472 * @gfp_flags = for requested zone
1473 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1474 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1475 *
1476 * Returns a zonelist suitable for a huge page allocation and a pointer
1477 * to the struct mempolicy for conditional unref after allocation.
1478 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1479 * @nodemask for filtering the zonelist.
1480 */
1481struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1482                                gfp_t gfp_flags, struct mempolicy **mpol,
1483                                nodemask_t **nodemask)
1484{
1485        struct zonelist *zl;
1486
1487        *mpol = get_vma_policy(current, vma, addr);
1488        *nodemask = NULL;       /* assume !MPOL_BIND */
1489
1490        if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1491                zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1492                                huge_page_shift(hstate_vma(vma))), gfp_flags);
1493        } else {
1494                zl = policy_zonelist(gfp_flags, *mpol);
1495                if ((*mpol)->mode == MPOL_BIND)
1496                        *nodemask = &(*mpol)->v.nodes;
1497        }
1498        return zl;
1499}
1500#endif
1501
1502/* Allocate a page in interleaved policy.
1503   Own path because it needs to do special accounting. */
1504static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1505                                        unsigned nid)
1506{
1507        struct zonelist *zl;
1508        struct page *page;
1509
1510        zl = node_zonelist(nid, gfp);
1511        page = __alloc_pages(gfp, order, zl);
1512        if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1513                inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1514        return page;
1515}
1516
1517/**
1518 *      alloc_page_vma  - Allocate a page for a VMA.
1519 *
1520 *      @gfp:
1521 *      %GFP_USER    user allocation.
1522 *      %GFP_KERNEL  kernel allocations,
1523 *      %GFP_HIGHMEM highmem/user allocations,
1524 *      %GFP_FS      allocation should not call back into a file system.
1525 *      %GFP_ATOMIC  don't sleep.
1526 *
1527 *      @vma:  Pointer to VMA or NULL if not available.
1528 *      @addr: Virtual Address of the allocation. Must be inside the VMA.
1529 *
1530 *      This function allocates a page from the kernel page pool and applies
1531 *      a NUMA policy associated with the VMA or the current process.
1532 *      When VMA is not NULL caller must hold down_read on the mmap_sem of the
1533 *      mm_struct of the VMA to prevent it from going away. Should be used for
1534 *      all allocations for pages that will be mapped into
1535 *      user space. Returns NULL when no page can be allocated.
1536 *
1537 *      Should be called with the mm_sem of the vma hold.
1538 */
1539struct page *
1540alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
1541{
1542        struct mempolicy *pol = get_vma_policy(current, vma, addr);
1543        struct zonelist *zl;
1544
1545        cpuset_update_task_memory_state();
1546
1547        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1548                unsigned nid;
1549
1550                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
1551                mpol_cond_put(pol);
1552                return alloc_page_interleave(gfp, 0, nid);
1553        }
1554        zl = policy_zonelist(gfp, pol);
1555        if (unlikely(mpol_needs_cond_ref(pol))) {
1556                /*
1557                 * slow path: ref counted shared policy
1558                 */
1559                struct page *page =  __alloc_pages_nodemask(gfp, 0,
1560                                                zl, policy_nodemask(gfp, pol));
1561                __mpol_put(pol);
1562                return page;
1563        }
1564        /*
1565         * fast path:  default or task policy
1566         */
1567        return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
1568}
1569
1570/**
1571 *      alloc_pages_current - Allocate pages.
1572 *
1573 *      @gfp:
1574 *              %GFP_USER   user allocation,
1575 *              %GFP_KERNEL kernel allocation,
1576 *              %GFP_HIGHMEM highmem allocation,
1577 *              %GFP_FS     don't call back into a file system.
1578 *              %GFP_ATOMIC don't sleep.
1579 *      @order: Power of two of allocation size in pages. 0 is a single page.
1580 *
1581 *      Allocate a page from the kernel page pool.  When not in
1582 *      interrupt context and apply the current process NUMA policy.
1583 *      Returns NULL when no page can be allocated.
1584 *
1585 *      Don't call cpuset_update_task_memory_state() unless
1586 *      1) it's ok to take cpuset_sem (can WAIT), and
1587 *      2) allocating for current task (not interrupt).
1588 */
1589struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1590{
1591        struct mempolicy *pol = current->mempolicy;
1592
1593        if ((gfp & __GFP_WAIT) && !in_interrupt())
1594                cpuset_update_task_memory_state();
1595        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1596                pol = &default_policy;
1597
1598        /*
1599         * No reference counting needed for current->mempolicy
1600         * nor system default_policy
1601         */
1602        if (pol->mode == MPOL_INTERLEAVE)
1603                return alloc_page_interleave(gfp, order, interleave_nodes(pol));
1604        return __alloc_pages_nodemask(gfp, order,
1605                        policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
1606}
1607EXPORT_SYMBOL(alloc_pages_current);
1608
1609/*
1610 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1611 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1612 * with the mems_allowed returned by cpuset_mems_allowed().  This
1613 * keeps mempolicies cpuset relative after its cpuset moves.  See
1614 * further kernel/cpuset.c update_nodemask().
1615 */
1616
1617/* Slow path of a mempolicy duplicate */
1618struct mempolicy *__mpol_dup(struct mempolicy *old)
1619{
1620        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1621
1622        if (!new)
1623                return ERR_PTR(-ENOMEM);
1624        if (current_cpuset_is_being_rebound()) {
1625                nodemask_t mems = cpuset_mems_allowed(current);
1626                mpol_rebind_policy(old, &mems);
1627        }
1628        *new = *old;
1629        atomic_set(&new->refcnt, 1);
1630        return new;
1631}
1632
1633/*
1634 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1635 * eliminate the * MPOL_F_* flags that require conditional ref and
1636 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1637 * after return.  Use the returned value.
1638 *
1639 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1640 * policy lookup, even if the policy needs/has extra ref on lookup.
1641 * shmem_readahead needs this.
1642 */
1643struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1644                                                struct mempolicy *frompol)
1645{
1646        if (!mpol_needs_cond_ref(frompol))
1647                return frompol;
1648
1649        *tompol = *frompol;
1650        tompol->flags &= ~MPOL_F_SHARED;        /* copy doesn't need unref */
1651        __mpol_put(frompol);
1652        return tompol;
1653}
1654
1655static int mpol_match_intent(const struct mempolicy *a,
1656                             const struct mempolicy *b)
1657{
1658        if (a->flags != b->flags)
1659                return 0;
1660        if (!mpol_store_user_nodemask(a))
1661                return 1;
1662        return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
1663}
1664
1665/* Slow path of a mempolicy comparison */
1666int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1667{
1668        if (!a || !b)
1669                return 0;
1670        if (a->mode != b->mode)
1671                return 0;
1672        if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
1673                return 0;
1674        switch (a->mode) {
1675        case MPOL_BIND:
1676                /* Fall through */
1677        case MPOL_INTERLEAVE:
1678                return nodes_equal(a->v.nodes, b->v.nodes);
1679        case MPOL_PREFERRED:
1680                return a->v.preferred_node == b->v.preferred_node &&
1681                        a->flags == b->flags;
1682        default:
1683                BUG();
1684                return 0;
1685        }
1686}
1687
1688/*
1689 * Shared memory backing store policy support.
1690 *
1691 * Remember policies even when nobody has shared memory mapped.
1692 * The policies are kept in Red-Black tree linked from the inode.
1693 * They are protected by the sp->lock spinlock, which should be held
1694 * for any accesses to the tree.
1695 */
1696
1697/* lookup first element intersecting start-end */
1698/* Caller holds sp->lock */
1699static struct sp_node *
1700sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
1701{
1702        struct rb_node *n = sp->root.rb_node;
1703
1704        while (n) {
1705                struct sp_node *p = rb_entry(n, struct sp_node, nd);
1706
1707                if (start >= p->end)
1708                        n = n->rb_right;
1709                else if (end <= p->start)
1710                        n = n->rb_left;
1711                else
1712                        break;
1713        }
1714        if (!n)
1715                return NULL;
1716        for (;;) {
1717                struct sp_node *w = NULL;
1718                struct rb_node *prev = rb_prev(n);
1719                if (!prev)
1720                        break;
1721                w = rb_entry(prev, struct sp_node, nd);
1722                if (w->end <= start)
1723                        break;
1724                n = prev;
1725        }
1726        return rb_entry(n, struct sp_node, nd);
1727}
1728
1729/* Insert a new shared policy into the list. */
1730/* Caller holds sp->lock */
1731static void sp_insert(struct shared_policy *sp, struct sp_node *new)
1732{
1733        struct rb_node **p = &sp->root.rb_node;
1734        struct rb_node *parent = NULL;
1735        struct sp_node *nd;
1736
1737        while (*p) {
1738                parent = *p;
1739                nd = rb_entry(parent, struct sp_node, nd);
1740                if (new->start < nd->start)
1741                        p = &(*p)->rb_left;
1742                else if (new->end > nd->end)
1743                        p = &(*p)->rb_right;
1744                else
1745                        BUG();
1746        }
1747        rb_link_node(&new->nd, parent, p);
1748        rb_insert_color(&new->nd, &sp->root);
1749        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
1750                 new->policy ? new->policy->mode : 0);
1751}
1752
1753/* Find shared policy intersecting idx */
1754struct mempolicy *
1755mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
1756{
1757        struct mempolicy *pol = NULL;
1758        struct sp_node *sn;
1759
1760        if (!sp->root.rb_node)
1761                return NULL;
1762        spin_lock(&sp->lock);
1763        sn = sp_lookup(sp, idx, idx+1);
1764        if (sn) {
1765                mpol_get(sn->policy);
1766                pol = sn->policy;
1767        }
1768        spin_unlock(&sp->lock);
1769        return pol;
1770}
1771
1772static void sp_delete(struct shared_policy *sp, struct sp_node *n)
1773{
1774        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
1775        rb_erase(&n->nd, &sp->root);
1776        mpol_put(n->policy);
1777        kmem_cache_free(sn_cache, n);
1778}
1779
1780static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
1781                                struct mempolicy *pol)
1782{
1783        struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
1784
1785        if (!n)
1786                return NULL;
1787        n->start = start;
1788        n->end = end;
1789        mpol_get(pol);
1790        pol->flags |= MPOL_F_SHARED;    /* for unref */
1791        n->policy = pol;
1792        return n;
1793}
1794
1795/* Replace a policy range. */
1796static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
1797                                 unsigned long end, struct sp_node *new)
1798{
1799        struct sp_node *n, *new2 = NULL;
1800
1801restart:
1802        spin_lock(&sp->lock);
1803        n = sp_lookup(sp, start, end);
1804        /* Take care of old policies in the same range. */
1805        while (n && n->start < end) {
1806                struct rb_node *next = rb_next(&n->nd);
1807                if (n->start >= start) {
1808                        if (n->end <= end)
1809                                sp_delete(sp, n);
1810                        else
1811                                n->start = end;
1812                } else {
1813                        /* Old policy spanning whole new range. */
1814                        if (n->end > end) {
1815                                if (!new2) {
1816                                        spin_unlock(&sp->lock);
1817                                        new2 = sp_alloc(end, n->end, n->policy);
1818                                        if (!new2)
1819                                                return -ENOMEM;
1820                                        goto restart;
1821                                }
1822                                n->end = start;
1823                                sp_insert(sp, new2);
1824                                new2 = NULL;
1825                                break;
1826                        } else
1827                                n->end = start;
1828                }
1829                if (!next)
1830                        break;
1831                n = rb_entry(next, struct sp_node, nd);
1832        }
1833        if (new)
1834                sp_insert(sp, new);
1835        spin_unlock(&sp->lock);
1836        if (new2) {
1837                mpol_put(new2->policy);
1838                kmem_cache_free(sn_cache, new2);
1839        }
1840        return 0;
1841}
1842
1843/**
1844 * mpol_shared_policy_init - initialize shared policy for inode
1845 * @sp: pointer to inode shared policy
1846 * @mpol:  struct mempolicy to install
1847 *
1848 * Install non-NULL @mpol in inode's shared policy rb-tree.
1849 * On entry, the current task has a reference on a non-NULL @mpol.
1850 * This must be released on exit.
1851 */
1852void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
1853{
1854        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
1855        spin_lock_init(&sp->lock);
1856
1857        if (mpol) {
1858                struct vm_area_struct pvma;
1859                struct mempolicy *new;
1860
1861                /* contextualize the tmpfs mount point mempolicy */
1862                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
1863                mpol_put(mpol); /* drop our ref on sb mpol */
1864                if (IS_ERR(new))
1865                        return;         /* no valid nodemask intersection */
1866
1867                /* Create pseudo-vma that contains just the policy */
1868                memset(&pvma, 0, sizeof(struct vm_area_struct));
1869                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
1870                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
1871                mpol_put(new);                  /* drop initial ref */
1872        }
1873}
1874
1875int mpol_set_shared_policy(struct shared_policy *info,
1876                        struct vm_area_struct *vma, struct mempolicy *npol)
1877{
1878        int err;
1879        struct sp_node *new = NULL;
1880        unsigned long sz = vma_pages(vma);
1881
1882        pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
1883                 vma->vm_pgoff,
1884                 sz, npol ? npol->mode : -1,
1885                 npol ? npol->flags : -1,
1886                 npol ? nodes_addr(npol->v.nodes)[0] : -1);
1887
1888        if (npol) {
1889                new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
1890                if (!new)
1891                        return -ENOMEM;
1892        }
1893        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
1894        if (err && new)
1895                kmem_cache_free(sn_cache, new);
1896        return err;
1897}
1898
1899/* Free a backing policy store on inode delete. */
1900void mpol_free_shared_policy(struct shared_policy *p)
1901{
1902        struct sp_node *n;
1903        struct rb_node *next;
1904
1905        if (!p->root.rb_node)
1906                return;
1907        spin_lock(&p->lock);
1908        next = rb_first(&p->root);
1909        while (next) {
1910                n = rb_entry(next, struct sp_node, nd);
1911                next = rb_next(&n->nd);
1912                rb_erase(&n->nd, &p->root);
1913                mpol_put(n->policy);
1914                kmem_cache_free(sn_cache, n);
1915        }
1916        spin_unlock(&p->lock);
1917}
1918
1919/* assumes fs == KERNEL_DS */
1920void __init numa_policy_init(void)
1921{
1922        nodemask_t interleave_nodes;
1923        unsigned long largest = 0;
1924        int nid, prefer = 0;
1925
1926        policy_cache = kmem_cache_create("numa_policy",
1927                                         sizeof(struct mempolicy),
1928                                         0, SLAB_PANIC, NULL);
1929
1930        sn_cache = kmem_cache_create("shared_policy_node",
1931                                     sizeof(struct sp_node),
1932                                     0, SLAB_PANIC, NULL);
1933
1934        /*
1935         * Set interleaving policy for system init. Interleaving is only
1936         * enabled across suitably sized nodes (default is >= 16MB), or
1937         * fall back to the largest node if they're all smaller.
1938         */
1939        nodes_clear(interleave_nodes);
1940        for_each_node_state(nid, N_HIGH_MEMORY) {
1941                unsigned long total_pages = node_present_pages(nid);
1942
1943                /* Preserve the largest node */
1944                if (largest < total_pages) {
1945                        largest = total_pages;
1946                        prefer = nid;
1947                }
1948
1949                /* Interleave this node? */
1950                if ((total_pages << PAGE_SHIFT) >= (16 << 20))
1951                        node_set(nid, interleave_nodes);
1952        }
1953
1954        /* All too small, use the largest */
1955        if (unlikely(nodes_empty(interleave_nodes)))
1956                node_set(prefer, interleave_nodes);
1957
1958        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
1959                printk("numa_policy_init: interleaving failed\n");
1960}
1961
1962/* Reset policy of current process to default */
1963void numa_default_policy(void)
1964{
1965        do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
1966}
1967
1968/*
1969 * Parse and format mempolicy from/to strings
1970 */
1971
1972/*
1973 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
1974 * Used only for mpol_parse_str() and mpol_to_str()
1975 */
1976#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
1977static const char * const policy_types[] =
1978        { "default", "prefer", "bind", "interleave", "local" };
1979
1980
1981#ifdef CONFIG_TMPFS
1982/**
1983 * mpol_parse_str - parse string to mempolicy
1984 * @str:  string containing mempolicy to parse
1985 * @mpol:  pointer to struct mempolicy pointer, returned on success.
1986 * @no_context:  flag whether to "contextualize" the mempolicy
1987 *
1988 * Format of input:
1989 *      <mode>[=<flags>][:<nodelist>]
1990 *
1991 * if @no_context is true, save the input nodemask in w.user_nodemask in
1992 * the returned mempolicy.  This will be used to "clone" the mempolicy in
1993 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
1994 * mount option.  Note that if 'static' or 'relative' mode flags were
1995 * specified, the input nodemask will already have been saved.  Saving
1996 * it again is redundant, but safe.
1997 *
1998 * On success, returns 0, else 1
1999 */
2000int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2001{
2002        struct mempolicy *new = NULL;
2003        unsigned short uninitialized_var(mode);
2004        unsigned short uninitialized_var(mode_flags);
2005        nodemask_t nodes;
2006        char *nodelist = strchr(str, ':');
2007        char *flags = strchr(str, '=');
2008        int i;
2009        int err = 1;
2010
2011        if (nodelist) {
2012                /* NUL-terminate mode or flags string */
2013                *nodelist++ = '\0';
2014                if (nodelist_parse(nodelist, nodes))
2015                        goto out;
2016                if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2017                        goto out;
2018        } else
2019                nodes_clear(nodes);
2020
2021        if (flags)
2022                *flags++ = '\0';        /* terminate mode string */
2023
2024        for (i = 0; i <= MPOL_LOCAL; i++) {
2025                if (!strcmp(str, policy_types[i])) {
2026                        mode = i;
2027                        break;
2028                }
2029        }
2030        if (i > MPOL_LOCAL)
2031                goto out;
2032
2033        switch (mode) {
2034        case MPOL_PREFERRED:
2035                /*
2036                 * Insist on a nodelist of one node only
2037                 */
2038                if (nodelist) {
2039                        char *rest = nodelist;
2040                        while (isdigit(*rest))
2041                                rest++;
2042                        if (!*rest)
2043                                err = 0;
2044                }
2045                break;
2046        case MPOL_INTERLEAVE:
2047                /*
2048                 * Default to online nodes with memory if no nodelist
2049                 */
2050                if (!nodelist)
2051                        nodes = node_states[N_HIGH_MEMORY];
2052                err = 0;
2053                break;
2054        case MPOL_LOCAL:
2055                /*
2056                 * Don't allow a nodelist;  mpol_new() checks flags
2057                 */
2058                if (nodelist)
2059                        goto out;
2060                mode = MPOL_PREFERRED;
2061                break;
2062
2063        /*
2064         * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
2065         * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
2066         */
2067        }
2068
2069        mode_flags = 0;
2070        if (flags) {
2071                /*
2072                 * Currently, we only support two mutually exclusive
2073                 * mode flags.
2074                 */
2075                if (!strcmp(flags, "static"))
2076                        mode_flags |= MPOL_F_STATIC_NODES;
2077                else if (!strcmp(flags, "relative"))
2078                        mode_flags |= MPOL_F_RELATIVE_NODES;
2079                else
2080                        err = 1;
2081        }
2082
2083        new = mpol_new(mode, mode_flags, &nodes);
2084        if (IS_ERR(new))
2085                err = 1;
2086        else if (no_context)
2087                new->w.user_nodemask = nodes;   /* save for contextualization */
2088
2089out:
2090        /* Restore string for error message */
2091        if (nodelist)
2092                *--nodelist = ':';
2093        if (flags)
2094                *--flags = '=';
2095        if (!err)
2096                *mpol = new;
2097        return err;
2098}
2099#endif /* CONFIG_TMPFS */
2100
2101/**
2102 * mpol_to_str - format a mempolicy structure for printing
2103 * @buffer:  to contain formatted mempolicy string
2104 * @maxlen:  length of @buffer
2105 * @pol:  pointer to mempolicy to be formatted
2106 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2107 *
2108 * Convert a mempolicy into a string.
2109 * Returns the number of characters in buffer (if positive)
2110 * or an error (negative)
2111 */
2112int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2113{
2114        char *p = buffer;
2115        int l;
2116        nodemask_t nodes;
2117        unsigned short mode;
2118        unsigned short flags = pol ? pol->flags : 0;
2119
2120        /*
2121         * Sanity check:  room for longest mode, flag and some nodes
2122         */
2123        VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2124
2125        if (!pol || pol == &default_policy)
2126                mode = MPOL_DEFAULT;
2127        else
2128                mode = pol->mode;
2129
2130        switch (mode) {
2131        case MPOL_DEFAULT:
2132                nodes_clear(nodes);
2133                break;
2134
2135        case MPOL_PREFERRED:
2136                nodes_clear(nodes);
2137                if (flags & MPOL_F_LOCAL)
2138                        mode = MPOL_LOCAL;      /* pseudo-policy */
2139                else
2140                        node_set(pol->v.preferred_node, nodes);
2141                break;
2142
2143        case MPOL_BIND:
2144                /* Fall through */
2145        case MPOL_INTERLEAVE:
2146                if (no_context)
2147                        nodes = pol->w.user_nodemask;
2148                else
2149                        nodes = pol->v.nodes;
2150                break;
2151
2152        default:
2153                BUG();
2154        }
2155
2156        l = strlen(policy_types[mode]);
2157        if (buffer + maxlen < p + l + 1)
2158                return -ENOSPC;
2159
2160        strcpy(p, policy_types[mode]);
2161        p += l;
2162
2163        if (flags & MPOL_MODE_FLAGS) {
2164                if (buffer + maxlen < p + 2)
2165                        return -ENOSPC;
2166                *p++ = '=';
2167
2168                /*
2169                 * Currently, the only defined flags are mutually exclusive
2170                 */
2171                if (flags & MPOL_F_STATIC_NODES)
2172                        p += snprintf(p, buffer + maxlen - p, "static");
2173                else if (flags & MPOL_F_RELATIVE_NODES)
2174                        p += snprintf(p, buffer + maxlen - p, "relative");
2175        }
2176
2177        if (!nodes_empty(nodes)) {
2178                if (buffer + maxlen < p + 2)
2179                        return -ENOSPC;
2180                *p++ = ':';
2181                p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2182        }
2183        return p - buffer;
2184}
2185
2186struct numa_maps {
2187        unsigned long pages;
2188        unsigned long anon;
2189        unsigned long active;
2190        unsigned long writeback;
2191        unsigned long mapcount_max;
2192        unsigned long dirty;
2193        unsigned long swapcache;
2194        unsigned long node[MAX_NUMNODES];
2195};
2196
2197static void gather_stats(struct page *page, void *private, int pte_dirty)
2198{
2199        struct numa_maps *md = private;
2200        int count = page_mapcount(page);
2201
2202        md->pages++;
2203        if (pte_dirty || PageDirty(page))
2204                md->dirty++;
2205
2206        if (PageSwapCache(page))
2207                md->swapcache++;
2208
2209        if (PageActive(page) || PageUnevictable(page))
2210                md->active++;
2211
2212        if (PageWriteback(page))
2213                md->writeback++;
2214
2215        if (PageAnon(page))
2216                md->anon++;
2217
2218        if (count > md->mapcount_max)
2219                md->mapcount_max = count;
2220
2221        md->node[page_to_nid(page)]++;
2222}
2223
2224#ifdef CONFIG_HUGETLB_PAGE
2225static void check_huge_range(struct vm_area_struct *vma,
2226                unsigned long start, unsigned long end,
2227                struct numa_maps *md)
2228{
2229        unsigned long addr;
2230        struct page *page;
2231        struct hstate *h = hstate_vma(vma);
2232        unsigned long sz = huge_page_size(h);
2233
2234        for (addr = start; addr < end; addr += sz) {
2235                pte_t *ptep = huge_pte_offset(vma->vm_mm,
2236                                                addr & huge_page_mask(h));
2237                pte_t pte;
2238
2239                if (!ptep)
2240                        continue;
2241
2242                pte = *ptep;
2243                if (pte_none(pte))
2244                        continue;
2245
2246                page = pte_page(pte);
2247                if (!page)
2248                        continue;
2249
2250                gather_stats(page, md, pte_dirty(*ptep));
2251        }
2252}
2253#else
2254static inline void check_huge_range(struct vm_area_struct *vma,
2255                unsigned long start, unsigned long end,
2256                struct numa_maps *md)
2257{
2258}
2259#endif
2260
2261/*
2262 * Display pages allocated per node and memory policy via /proc.
2263 */
2264int show_numa_map(struct seq_file *m, void *v)
2265{
2266        struct proc_maps_private *priv = m->private;
2267        struct vm_area_struct *vma = v;
2268        struct numa_maps *md;
2269        struct file *file = vma->vm_file;
2270        struct mm_struct *mm = vma->vm_mm;
2271        struct mempolicy *pol;
2272        int n;
2273        char buffer[50];
2274
2275        if (!mm)
2276                return 0;
2277
2278        md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
2279        if (!md)
2280                return 0;
2281
2282        pol = get_vma_policy(priv->task, vma, vma->vm_start);
2283        mpol_to_str(buffer, sizeof(buffer), pol, 0);
2284        mpol_cond_put(pol);
2285
2286        seq_printf(m, "%08lx %s", vma->vm_start, buffer);
2287
2288        if (file) {
2289                seq_printf(m, " file=");
2290                seq_path(m, &file->f_path, "\n\t= ");
2291        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
2292                seq_printf(m, " heap");
2293        } else if (vma->vm_start <= mm->start_stack &&
2294                        vma->vm_end >= mm->start_stack) {
2295                seq_printf(m, " stack");
2296        }
2297
2298        if (is_vm_hugetlb_page(vma)) {
2299                check_huge_range(vma, vma->vm_start, vma->vm_end, md);
2300                seq_printf(m, " huge");
2301        } else {
2302                check_pgd_range(vma, vma->vm_start, vma->vm_end,
2303                        &node_states[N_HIGH_MEMORY], MPOL_MF_STATS, md);
2304        }
2305
2306        if (!md->pages)
2307                goto out;
2308
2309        if (md->anon)
2310                seq_printf(m," anon=%lu",md->anon);
2311
2312        if (md->dirty)
2313                seq_printf(m," dirty=%lu",md->dirty);
2314
2315        if (md->pages != md->anon && md->pages != md->dirty)
2316                seq_printf(m, " mapped=%lu", md->pages);
2317
2318        if (md->mapcount_max > 1)
2319                seq_printf(m, " mapmax=%lu", md->mapcount_max);
2320
2321        if (md->swapcache)
2322                seq_printf(m," swapcache=%lu", md->swapcache);
2323
2324        if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2325                seq_printf(m," active=%lu", md->active);
2326
2327        if (md->writeback)
2328                seq_printf(m," writeback=%lu", md->writeback);
2329
2330        for_each_node_state(n, N_HIGH_MEMORY)
2331                if (md->node[n])
2332                        seq_printf(m, " N%d=%lu", n, md->node[n]);
2333out:
2334        seq_putc(m, '\n');
2335        kfree(md);
2336
2337        if (m->count < m->size)
2338                m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
2339        return 0;
2340}
2341