linux/mm/mempolicy.c
<<
>>
Prefs
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case node -1 here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *                in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#include <linux/mempolicy.h>
  69#include <linux/mm.h>
  70#include <linux/highmem.h>
  71#include <linux/hugetlb.h>
  72#include <linux/kernel.h>
  73#include <linux/sched.h>
  74#include <linux/nodemask.h>
  75#include <linux/cpuset.h>
  76#include <linux/slab.h>
  77#include <linux/string.h>
  78#include <linux/export.h>
  79#include <linux/nsproxy.h>
  80#include <linux/interrupt.h>
  81#include <linux/init.h>
  82#include <linux/compat.h>
  83#include <linux/swap.h>
  84#include <linux/seq_file.h>
  85#include <linux/proc_fs.h>
  86#include <linux/migrate.h>
  87#include <linux/ksm.h>
  88#include <linux/rmap.h>
  89#include <linux/security.h>
  90#include <linux/syscalls.h>
  91#include <linux/ctype.h>
  92#include <linux/mm_inline.h>
  93
  94#include <asm/tlbflush.h>
  95#include <asm/uaccess.h>
  96#include <linux/random.h>
  97
  98#include "internal.h"
  99
 100/* Internal flags */
 101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)    /* Skip checks for continuous vmas */
 102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)          /* Invert check for nodemask */
 103
 104static struct kmem_cache *policy_cache;
 105static struct kmem_cache *sn_cache;
 106
 107/* Highest zone. An specific allocation for a zone below that is not
 108   policied. */
 109enum zone_type policy_zone = 0;
 110
 111/*
 112 * run-time system-wide default policy => local allocation
 113 */
 114static struct mempolicy default_policy = {
 115        .refcnt = ATOMIC_INIT(1), /* never free it */
 116        .mode = MPOL_PREFERRED,
 117        .flags = MPOL_F_LOCAL,
 118};
 119
 120static const struct mempolicy_operations {
 121        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 122        /*
 123         * If read-side task has no lock to protect task->mempolicy, write-side
 124         * task will rebind the task->mempolicy by two step. The first step is
 125         * setting all the newly nodes, and the second step is cleaning all the
 126         * disallowed nodes. In this way, we can avoid finding no node to alloc
 127         * page.
 128         * If we have a lock to protect task->mempolicy in read-side, we do
 129         * rebind directly.
 130         *
 131         * step:
 132         *      MPOL_REBIND_ONCE - do rebind work at once
 133         *      MPOL_REBIND_STEP1 - set all the newly nodes
 134         *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 135         */
 136        void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
 137                        enum mpol_rebind_step step);
 138} mpol_ops[MPOL_MAX];
 139
 140/* Check that the nodemask contains at least one populated zone */
 141static int is_valid_nodemask(const nodemask_t *nodemask)
 142{
 143        int nd, k;
 144
 145        for_each_node_mask(nd, *nodemask) {
 146                struct zone *z;
 147
 148                for (k = 0; k <= policy_zone; k++) {
 149                        z = &NODE_DATA(nd)->node_zones[k];
 150                        if (z->present_pages > 0)
 151                                return 1;
 152                }
 153        }
 154
 155        return 0;
 156}
 157
 158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 159{
 160        return pol->flags & MPOL_MODE_FLAGS;
 161}
 162
 163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 164                                   const nodemask_t *rel)
 165{
 166        nodemask_t tmp;
 167        nodes_fold(tmp, *orig, nodes_weight(*rel));
 168        nodes_onto(*ret, tmp, *rel);
 169}
 170
 171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 172{
 173        if (nodes_empty(*nodes))
 174                return -EINVAL;
 175        pol->v.nodes = *nodes;
 176        return 0;
 177}
 178
 179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 180{
 181        if (!nodes)
 182                pol->flags |= MPOL_F_LOCAL;     /* local allocation */
 183        else if (nodes_empty(*nodes))
 184                return -EINVAL;                 /*  no allowed nodes */
 185        else
 186                pol->v.preferred_node = first_node(*nodes);
 187        return 0;
 188}
 189
 190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 191{
 192        if (!is_valid_nodemask(nodes))
 193                return -EINVAL;
 194        pol->v.nodes = *nodes;
 195        return 0;
 196}
 197
 198/*
 199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 200 * any, for the new policy.  mpol_new() has already validated the nodes
 201 * parameter with respect to the policy mode and flags.  But, we need to
 202 * handle an empty nodemask with MPOL_PREFERRED here.
 203 *
 204 * Must be called holding task's alloc_lock to protect task's mems_allowed
 205 * and mempolicy.  May also be called holding the mmap_semaphore for write.
 206 */
 207static int mpol_set_nodemask(struct mempolicy *pol,
 208                     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 209{
 210        int ret;
 211
 212        /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 213        if (pol == NULL)
 214                return 0;
 215        /* Check N_HIGH_MEMORY */
 216        nodes_and(nsc->mask1,
 217                  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
 218
 219        VM_BUG_ON(!nodes);
 220        if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 221                nodes = NULL;   /* explicit local allocation */
 222        else {
 223                if (pol->flags & MPOL_F_RELATIVE_NODES)
 224                        mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
 225                else
 226                        nodes_and(nsc->mask2, *nodes, nsc->mask1);
 227
 228                if (mpol_store_user_nodemask(pol))
 229                        pol->w.user_nodemask = *nodes;
 230                else
 231                        pol->w.cpuset_mems_allowed =
 232                                                cpuset_current_mems_allowed;
 233        }
 234
 235        if (nodes)
 236                ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 237        else
 238                ret = mpol_ops[pol->mode].create(pol, NULL);
 239        return ret;
 240}
 241
 242/*
 243 * This function just creates a new policy, does some check and simple
 244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 245 */
 246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 247                                  nodemask_t *nodes)
 248{
 249        struct mempolicy *policy;
 250
 251        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 252                 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 253
 254        if (mode == MPOL_DEFAULT) {
 255                if (nodes && !nodes_empty(*nodes))
 256                        return ERR_PTR(-EINVAL);
 257                return NULL;    /* simply delete any existing policy */
 258        }
 259        VM_BUG_ON(!nodes);
 260
 261        /*
 262         * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 263         * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 264         * All other modes require a valid pointer to a non-empty nodemask.
 265         */
 266        if (mode == MPOL_PREFERRED) {
 267                if (nodes_empty(*nodes)) {
 268                        if (((flags & MPOL_F_STATIC_NODES) ||
 269                             (flags & MPOL_F_RELATIVE_NODES)))
 270                                return ERR_PTR(-EINVAL);
 271                }
 272        } else if (nodes_empty(*nodes))
 273                return ERR_PTR(-EINVAL);
 274        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 275        if (!policy)
 276                return ERR_PTR(-ENOMEM);
 277        atomic_set(&policy->refcnt, 1);
 278        policy->mode = mode;
 279        policy->flags = flags;
 280
 281        return policy;
 282}
 283
 284/* Slow path of a mpol destructor. */
 285void __mpol_put(struct mempolicy *p)
 286{
 287        if (!atomic_dec_and_test(&p->refcnt))
 288                return;
 289        kmem_cache_free(policy_cache, p);
 290}
 291
 292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
 293                                enum mpol_rebind_step step)
 294{
 295}
 296
 297/*
 298 * step:
 299 *      MPOL_REBIND_ONCE  - do rebind work at once
 300 *      MPOL_REBIND_STEP1 - set all the newly nodes
 301 *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 302 */
 303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
 304                                 enum mpol_rebind_step step)
 305{
 306        nodemask_t tmp;
 307
 308        if (pol->flags & MPOL_F_STATIC_NODES)
 309                nodes_and(tmp, pol->w.user_nodemask, *nodes);
 310        else if (pol->flags & MPOL_F_RELATIVE_NODES)
 311                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 312        else {
 313                /*
 314                 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
 315                 * result
 316                 */
 317                if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
 318                        nodes_remap(tmp, pol->v.nodes,
 319                                        pol->w.cpuset_mems_allowed, *nodes);
 320                        pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
 321                } else if (step == MPOL_REBIND_STEP2) {
 322                        tmp = pol->w.cpuset_mems_allowed;
 323                        pol->w.cpuset_mems_allowed = *nodes;
 324                } else
 325                        BUG();
 326        }
 327
 328        if (nodes_empty(tmp))
 329                tmp = *nodes;
 330
 331        if (step == MPOL_REBIND_STEP1)
 332                nodes_or(pol->v.nodes, pol->v.nodes, tmp);
 333        else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
 334                pol->v.nodes = tmp;
 335        else
 336                BUG();
 337
 338        if (!node_isset(current->il_next, tmp)) {
 339                current->il_next = next_node(current->il_next, tmp);
 340                if (current->il_next >= MAX_NUMNODES)
 341                        current->il_next = first_node(tmp);
 342                if (current->il_next >= MAX_NUMNODES)
 343                        current->il_next = numa_node_id();
 344        }
 345}
 346
 347static void mpol_rebind_preferred(struct mempolicy *pol,
 348                                  const nodemask_t *nodes,
 349                                  enum mpol_rebind_step step)
 350{
 351        nodemask_t tmp;
 352
 353        if (pol->flags & MPOL_F_STATIC_NODES) {
 354                int node = first_node(pol->w.user_nodemask);
 355
 356                if (node_isset(node, *nodes)) {
 357                        pol->v.preferred_node = node;
 358                        pol->flags &= ~MPOL_F_LOCAL;
 359                } else
 360                        pol->flags |= MPOL_F_LOCAL;
 361        } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 362                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 363                pol->v.preferred_node = first_node(tmp);
 364        } else if (!(pol->flags & MPOL_F_LOCAL)) {
 365                pol->v.preferred_node = node_remap(pol->v.preferred_node,
 366                                                   pol->w.cpuset_mems_allowed,
 367                                                   *nodes);
 368                pol->w.cpuset_mems_allowed = *nodes;
 369        }
 370}
 371
 372/*
 373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 374 *
 375 * If read-side task has no lock to protect task->mempolicy, write-side
 376 * task will rebind the task->mempolicy by two step. The first step is
 377 * setting all the newly nodes, and the second step is cleaning all the
 378 * disallowed nodes. In this way, we can avoid finding no node to alloc
 379 * page.
 380 * If we have a lock to protect task->mempolicy in read-side, we do
 381 * rebind directly.
 382 *
 383 * step:
 384 *      MPOL_REBIND_ONCE  - do rebind work at once
 385 *      MPOL_REBIND_STEP1 - set all the newly nodes
 386 *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 387 */
 388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
 389                                enum mpol_rebind_step step)
 390{
 391        if (!pol)
 392                return;
 393        if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
 394            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 395                return;
 396
 397        if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
 398                return;
 399
 400        if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
 401                BUG();
 402
 403        if (step == MPOL_REBIND_STEP1)
 404                pol->flags |= MPOL_F_REBINDING;
 405        else if (step == MPOL_REBIND_STEP2)
 406                pol->flags &= ~MPOL_F_REBINDING;
 407        else if (step >= MPOL_REBIND_NSTEP)
 408                BUG();
 409
 410        mpol_ops[pol->mode].rebind(pol, newmask, step);
 411}
 412
 413/*
 414 * Wrapper for mpol_rebind_policy() that just requires task
 415 * pointer, and updates task mempolicy.
 416 *
 417 * Called with task's alloc_lock held.
 418 */
 419
 420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 421                        enum mpol_rebind_step step)
 422{
 423        mpol_rebind_policy(tsk->mempolicy, new, step);
 424}
 425
 426/*
 427 * Rebind each vma in mm to new nodemask.
 428 *
 429 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 430 */
 431
 432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 433{
 434        struct vm_area_struct *vma;
 435
 436        down_write(&mm->mmap_sem);
 437        for (vma = mm->mmap; vma; vma = vma->vm_next)
 438                mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
 439        up_write(&mm->mmap_sem);
 440}
 441
 442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 443        [MPOL_DEFAULT] = {
 444                .rebind = mpol_rebind_default,
 445        },
 446        [MPOL_INTERLEAVE] = {
 447                .create = mpol_new_interleave,
 448                .rebind = mpol_rebind_nodemask,
 449        },
 450        [MPOL_PREFERRED] = {
 451                .create = mpol_new_preferred,
 452                .rebind = mpol_rebind_preferred,
 453        },
 454        [MPOL_BIND] = {
 455                .create = mpol_new_bind,
 456                .rebind = mpol_rebind_nodemask,
 457        },
 458};
 459
 460static void migrate_page_add(struct page *page, struct list_head *pagelist,
 461                                unsigned long flags);
 462
 463/* Scan through pages checking if pages follow certain conditions. */
 464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 465                unsigned long addr, unsigned long end,
 466                const nodemask_t *nodes, unsigned long flags,
 467                void *private)
 468{
 469        pte_t *orig_pte;
 470        pte_t *pte;
 471        spinlock_t *ptl;
 472
 473        orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 474        do {
 475                struct page *page;
 476                int nid;
 477
 478                if (!pte_present(*pte))
 479                        continue;
 480                page = vm_normal_page(vma, addr, *pte);
 481                if (!page)
 482                        continue;
 483                /*
 484                 * vm_normal_page() filters out zero pages, but there might
 485                 * still be PageReserved pages to skip, perhaps in a VDSO.
 486                 * And we cannot move PageKsm pages sensibly or safely yet.
 487                 */
 488                if (PageReserved(page) || PageKsm(page))
 489                        continue;
 490                nid = page_to_nid(page);
 491                if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
 492                        continue;
 493
 494                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 495                        migrate_page_add(page, private, flags);
 496                else
 497                        break;
 498        } while (pte++, addr += PAGE_SIZE, addr != end);
 499        pte_unmap_unlock(orig_pte, ptl);
 500        return addr != end;
 501}
 502
 503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 504                unsigned long addr, unsigned long end,
 505                const nodemask_t *nodes, unsigned long flags,
 506                void *private)
 507{
 508        pmd_t *pmd;
 509        unsigned long next;
 510
 511        pmd = pmd_offset(pud, addr);
 512        do {
 513                next = pmd_addr_end(addr, end);
 514                split_huge_page_pmd(vma->vm_mm, pmd);
 515                if (pmd_none_or_trans_huge_or_clear_bad(pmd))
 516                        continue;
 517                if (check_pte_range(vma, pmd, addr, next, nodes,
 518                                    flags, private))
 519                        return -EIO;
 520        } while (pmd++, addr = next, addr != end);
 521        return 0;
 522}
 523
 524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 525                unsigned long addr, unsigned long end,
 526                const nodemask_t *nodes, unsigned long flags,
 527                void *private)
 528{
 529        pud_t *pud;
 530        unsigned long next;
 531
 532        pud = pud_offset(pgd, addr);
 533        do {
 534                next = pud_addr_end(addr, end);
 535                if (pud_none_or_clear_bad(pud))
 536                        continue;
 537                if (check_pmd_range(vma, pud, addr, next, nodes,
 538                                    flags, private))
 539                        return -EIO;
 540        } while (pud++, addr = next, addr != end);
 541        return 0;
 542}
 543
 544static inline int check_pgd_range(struct vm_area_struct *vma,
 545                unsigned long addr, unsigned long end,
 546                const nodemask_t *nodes, unsigned long flags,
 547                void *private)
 548{
 549        pgd_t *pgd;
 550        unsigned long next;
 551
 552        pgd = pgd_offset(vma->vm_mm, addr);
 553        do {
 554                next = pgd_addr_end(addr, end);
 555                if (pgd_none_or_clear_bad(pgd))
 556                        continue;
 557                if (check_pud_range(vma, pgd, addr, next, nodes,
 558                                    flags, private))
 559                        return -EIO;
 560        } while (pgd++, addr = next, addr != end);
 561        return 0;
 562}
 563
 564/*
 565 * Check if all pages in a range are on a set of nodes.
 566 * If pagelist != NULL then isolate pages from the LRU and
 567 * put them on the pagelist.
 568 */
 569static struct vm_area_struct *
 570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 571                const nodemask_t *nodes, unsigned long flags, void *private)
 572{
 573        int err;
 574        struct vm_area_struct *first, *vma, *prev;
 575
 576
 577        first = find_vma(mm, start);
 578        if (!first)
 579                return ERR_PTR(-EFAULT);
 580        prev = NULL;
 581        for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
 582                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 583                        if (!vma->vm_next && vma->vm_end < end)
 584                                return ERR_PTR(-EFAULT);
 585                        if (prev && prev->vm_end < vma->vm_start)
 586                                return ERR_PTR(-EFAULT);
 587                }
 588                if (!is_vm_hugetlb_page(vma) &&
 589                    ((flags & MPOL_MF_STRICT) ||
 590                     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 591                                vma_migratable(vma)))) {
 592                        unsigned long endvma = vma->vm_end;
 593
 594                        if (endvma > end)
 595                                endvma = end;
 596                        if (vma->vm_start > start)
 597                                start = vma->vm_start;
 598                        err = check_pgd_range(vma, start, endvma, nodes,
 599                                                flags, private);
 600                        if (err) {
 601                                first = ERR_PTR(err);
 602                                break;
 603                        }
 604                }
 605                prev = vma;
 606        }
 607        return first;
 608}
 609
 610/*
 611 * Apply policy to a single VMA
 612 * This must be called with the mmap_sem held for writing.
 613 */
 614static int vma_replace_policy(struct vm_area_struct *vma,
 615                                                struct mempolicy *pol)
 616{
 617        int err;
 618        struct mempolicy *old;
 619        struct mempolicy *new;
 620
 621        pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 622                 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 623                 vma->vm_ops, vma->vm_file,
 624                 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 625
 626        new = mpol_dup(pol);
 627        if (IS_ERR(new))
 628                return PTR_ERR(new);
 629
 630        if (vma->vm_ops && vma->vm_ops->set_policy) {
 631                err = vma->vm_ops->set_policy(vma, new);
 632                if (err)
 633                        goto err_out;
 634        }
 635
 636        old = vma->vm_policy;
 637        vma->vm_policy = new; /* protected by mmap_sem */
 638        mpol_put(old);
 639
 640        return 0;
 641 err_out:
 642        mpol_put(new);
 643        return err;
 644}
 645
 646/* Step 2: apply policy to a range and do splits. */
 647static int mbind_range(struct mm_struct *mm, unsigned long start,
 648                       unsigned long end, struct mempolicy *new_pol)
 649{
 650        struct vm_area_struct *next;
 651        struct vm_area_struct *prev;
 652        struct vm_area_struct *vma;
 653        int err = 0;
 654        pgoff_t pgoff;
 655        unsigned long vmstart;
 656        unsigned long vmend;
 657
 658        vma = find_vma(mm, start);
 659        if (!vma || vma->vm_start > start)
 660                return -EFAULT;
 661
 662        prev = vma->vm_prev;
 663        if (start > vma->vm_start)
 664                prev = vma;
 665
 666        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 667                next = vma->vm_next;
 668                vmstart = max(start, vma->vm_start);
 669                vmend   = min(end, vma->vm_end);
 670
 671                if (mpol_equal(vma_policy(vma), new_pol))
 672                        continue;
 673
 674                pgoff = vma->vm_pgoff +
 675                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
 676                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 677                                  vma->anon_vma, vma->vm_file, pgoff,
 678                                  new_pol);
 679                if (prev) {
 680                        vma = prev;
 681                        next = vma->vm_next;
 682                        continue;
 683                }
 684                if (vma->vm_start != vmstart) {
 685                        err = split_vma(vma->vm_mm, vma, vmstart, 1);
 686                        if (err)
 687                                goto out;
 688                }
 689                if (vma->vm_end != vmend) {
 690                        err = split_vma(vma->vm_mm, vma, vmend, 0);
 691                        if (err)
 692                                goto out;
 693                }
 694                err = vma_replace_policy(vma, new_pol);
 695                if (err)
 696                        goto out;
 697        }
 698
 699 out:
 700        return err;
 701}
 702
 703/*
 704 * Update task->flags PF_MEMPOLICY bit: set iff non-default
 705 * mempolicy.  Allows more rapid checking of this (combined perhaps
 706 * with other PF_* flag bits) on memory allocation hot code paths.
 707 *
 708 * If called from outside this file, the task 'p' should -only- be
 709 * a newly forked child not yet visible on the task list, because
 710 * manipulating the task flags of a visible task is not safe.
 711 *
 712 * The above limitation is why this routine has the funny name
 713 * mpol_fix_fork_child_flag().
 714 *
 715 * It is also safe to call this with a task pointer of current,
 716 * which the static wrapper mpol_set_task_struct_flag() does,
 717 * for use within this file.
 718 */
 719
 720void mpol_fix_fork_child_flag(struct task_struct *p)
 721{
 722        if (p->mempolicy)
 723                p->flags |= PF_MEMPOLICY;
 724        else
 725                p->flags &= ~PF_MEMPOLICY;
 726}
 727
 728static void mpol_set_task_struct_flag(void)
 729{
 730        mpol_fix_fork_child_flag(current);
 731}
 732
 733/* Set the process memory policy */
 734static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 735                             nodemask_t *nodes)
 736{
 737        struct mempolicy *new, *old;
 738        struct mm_struct *mm = current->mm;
 739        NODEMASK_SCRATCH(scratch);
 740        int ret;
 741
 742        if (!scratch)
 743                return -ENOMEM;
 744
 745        new = mpol_new(mode, flags, nodes);
 746        if (IS_ERR(new)) {
 747                ret = PTR_ERR(new);
 748                goto out;
 749        }
 750        /*
 751         * prevent changing our mempolicy while show_numa_maps()
 752         * is using it.
 753         * Note:  do_set_mempolicy() can be called at init time
 754         * with no 'mm'.
 755         */
 756        if (mm)
 757                down_write(&mm->mmap_sem);
 758        task_lock(current);
 759        ret = mpol_set_nodemask(new, nodes, scratch);
 760        if (ret) {
 761                task_unlock(current);
 762                if (mm)
 763                        up_write(&mm->mmap_sem);
 764                mpol_put(new);
 765                goto out;
 766        }
 767        old = current->mempolicy;
 768        current->mempolicy = new;
 769        mpol_set_task_struct_flag();
 770        if (new && new->mode == MPOL_INTERLEAVE &&
 771            nodes_weight(new->v.nodes))
 772                current->il_next = first_node(new->v.nodes);
 773        task_unlock(current);
 774        if (mm)
 775                up_write(&mm->mmap_sem);
 776
 777        mpol_put(old);
 778        ret = 0;
 779out:
 780        NODEMASK_SCRATCH_FREE(scratch);
 781        return ret;
 782}
 783
 784/*
 785 * Return nodemask for policy for get_mempolicy() query
 786 *
 787 * Called with task's alloc_lock held
 788 */
 789static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 790{
 791        nodes_clear(*nodes);
 792        if (p == &default_policy)
 793                return;
 794
 795        switch (p->mode) {
 796        case MPOL_BIND:
 797                /* Fall through */
 798        case MPOL_INTERLEAVE:
 799                *nodes = p->v.nodes;
 800                break;
 801        case MPOL_PREFERRED:
 802                if (!(p->flags & MPOL_F_LOCAL))
 803                        node_set(p->v.preferred_node, *nodes);
 804                /* else return empty node mask for local allocation */
 805                break;
 806        default:
 807                BUG();
 808        }
 809}
 810
 811static int lookup_node(struct mm_struct *mm, unsigned long addr)
 812{
 813        struct page *p;
 814        int err;
 815
 816        err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 817        if (err >= 0) {
 818                err = page_to_nid(p);
 819                put_page(p);
 820        }
 821        return err;
 822}
 823
 824/* Retrieve NUMA policy */
 825static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 826                             unsigned long addr, unsigned long flags)
 827{
 828        int err;
 829        struct mm_struct *mm = current->mm;
 830        struct vm_area_struct *vma = NULL;
 831        struct mempolicy *pol = current->mempolicy;
 832
 833        if (flags &
 834                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 835                return -EINVAL;
 836
 837        if (flags & MPOL_F_MEMS_ALLOWED) {
 838                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 839                        return -EINVAL;
 840                *policy = 0;    /* just so it's initialized */
 841                task_lock(current);
 842                *nmask  = cpuset_current_mems_allowed;
 843                task_unlock(current);
 844                return 0;
 845        }
 846
 847        if (flags & MPOL_F_ADDR) {
 848                /*
 849                 * Do NOT fall back to task policy if the
 850                 * vma/shared policy at addr is NULL.  We
 851                 * want to return MPOL_DEFAULT in this case.
 852                 */
 853                down_read(&mm->mmap_sem);
 854                vma = find_vma_intersection(mm, addr, addr+1);
 855                if (!vma) {
 856                        up_read(&mm->mmap_sem);
 857                        return -EFAULT;
 858                }
 859                if (vma->vm_ops && vma->vm_ops->get_policy)
 860                        pol = vma->vm_ops->get_policy(vma, addr);
 861                else
 862                        pol = vma->vm_policy;
 863        } else if (addr)
 864                return -EINVAL;
 865
 866        if (!pol)
 867                pol = &default_policy;  /* indicates default behavior */
 868
 869        if (flags & MPOL_F_NODE) {
 870                if (flags & MPOL_F_ADDR) {
 871                        err = lookup_node(mm, addr);
 872                        if (err < 0)
 873                                goto out;
 874                        *policy = err;
 875                } else if (pol == current->mempolicy &&
 876                                pol->mode == MPOL_INTERLEAVE) {
 877                        *policy = current->il_next;
 878                } else {
 879                        err = -EINVAL;
 880                        goto out;
 881                }
 882        } else {
 883                *policy = pol == &default_policy ? MPOL_DEFAULT :
 884                                                pol->mode;
 885                /*
 886                 * Internal mempolicy flags must be masked off before exposing
 887                 * the policy to userspace.
 888                 */
 889                *policy |= (pol->flags & MPOL_MODE_FLAGS);
 890        }
 891
 892        if (vma) {
 893                up_read(&current->mm->mmap_sem);
 894                vma = NULL;
 895        }
 896
 897        err = 0;
 898        if (nmask) {
 899                if (mpol_store_user_nodemask(pol)) {
 900                        *nmask = pol->w.user_nodemask;
 901                } else {
 902                        task_lock(current);
 903                        get_policy_nodemask(pol, nmask);
 904                        task_unlock(current);
 905                }
 906        }
 907
 908 out:
 909        mpol_cond_put(pol);
 910        if (vma)
 911                up_read(&current->mm->mmap_sem);
 912        return err;
 913}
 914
 915#ifdef CONFIG_MIGRATION
 916/*
 917 * page migration
 918 */
 919static void migrate_page_add(struct page *page, struct list_head *pagelist,
 920                                unsigned long flags)
 921{
 922        /*
 923         * Avoid migrating a page that is shared with others.
 924         */
 925        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
 926                if (!isolate_lru_page(page)) {
 927                        list_add_tail(&page->lru, pagelist);
 928                        inc_zone_page_state(page, NR_ISOLATED_ANON +
 929                                            page_is_file_cache(page));
 930                }
 931        }
 932}
 933
 934static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 935{
 936        return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 937}
 938
 939/*
 940 * Migrate pages from one node to a target node.
 941 * Returns error or the number of pages not migrated.
 942 */
 943static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 944                           int flags)
 945{
 946        nodemask_t nmask;
 947        LIST_HEAD(pagelist);
 948        int err = 0;
 949        struct vm_area_struct *vma;
 950
 951        nodes_clear(nmask);
 952        node_set(source, nmask);
 953
 954        vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 955                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 956        if (IS_ERR(vma))
 957                return PTR_ERR(vma);
 958
 959        if (!list_empty(&pagelist)) {
 960                err = migrate_pages(&pagelist, new_node_page, dest,
 961                                                        false, MIGRATE_SYNC);
 962                if (err)
 963                        putback_lru_pages(&pagelist);
 964        }
 965
 966        return err;
 967}
 968
 969/*
 970 * Move pages between the two nodesets so as to preserve the physical
 971 * layout as much as possible.
 972 *
 973 * Returns the number of page that could not be moved.
 974 */
 975int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 976                     const nodemask_t *to, int flags)
 977{
 978        int busy = 0;
 979        int err;
 980        nodemask_t tmp;
 981
 982        err = migrate_prep();
 983        if (err)
 984                return err;
 985
 986        down_read(&mm->mmap_sem);
 987
 988        err = migrate_vmas(mm, from, to, flags);
 989        if (err)
 990                goto out;
 991
 992        /*
 993         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 994         * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 995         * bit in 'tmp', and return that <source, dest> pair for migration.
 996         * The pair of nodemasks 'to' and 'from' define the map.
 997         *
 998         * If no pair of bits is found that way, fallback to picking some
 999         * pair of 'source' and 'dest' bits that are not the same.  If the
1000         * 'source' and 'dest' bits are the same, this represents a node
1001         * that will be migrating to itself, so no pages need move.
1002         *
1003         * If no bits are left in 'tmp', or if all remaining bits left
1004         * in 'tmp' correspond to the same bit in 'to', return false
1005         * (nothing left to migrate).
1006         *
1007         * This lets us pick a pair of nodes to migrate between, such that
1008         * if possible the dest node is not already occupied by some other
1009         * source node, minimizing the risk of overloading the memory on a
1010         * node that would happen if we migrated incoming memory to a node
1011         * before migrating outgoing memory source that same node.
1012         *
1013         * A single scan of tmp is sufficient.  As we go, we remember the
1014         * most recent <s, d> pair that moved (s != d).  If we find a pair
1015         * that not only moved, but what's better, moved to an empty slot
1016         * (d is not set in tmp), then we break out then, with that pair.
1017         * Otherwise when we finish scanning from_tmp, we at least have the
1018         * most recent <s, d> pair that moved.  If we get all the way through
1019         * the scan of tmp without finding any node that moved, much less
1020         * moved to an empty node, then there is nothing left worth migrating.
1021         */
1022
1023        tmp = *from;
1024        while (!nodes_empty(tmp)) {
1025                int s,d;
1026                int source = -1;
1027                int dest = 0;
1028
1029                for_each_node_mask(s, tmp) {
1030
1031                        /*
1032                         * do_migrate_pages() tries to maintain the relative
1033                         * node relationship of the pages established between
1034                         * threads and memory areas.
1035                         *
1036                         * However if the number of source nodes is not equal to
1037                         * the number of destination nodes we can not preserve
1038                         * this node relative relationship.  In that case, skip
1039                         * copying memory from a node that is in the destination
1040                         * mask.
1041                         *
1042                         * Example: [2,3,4] -> [3,4,5] moves everything.
1043                         *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1044                         */
1045
1046                        if ((nodes_weight(*from) != nodes_weight(*to)) &&
1047                                                (node_isset(s, *to)))
1048                                continue;
1049
1050                        d = node_remap(s, *from, *to);
1051                        if (s == d)
1052                                continue;
1053
1054                        source = s;     /* Node moved. Memorize */
1055                        dest = d;
1056
1057                        /* dest not in remaining from nodes? */
1058                        if (!node_isset(dest, tmp))
1059                                break;
1060                }
1061                if (source == -1)
1062                        break;
1063
1064                node_clear(source, tmp);
1065                err = migrate_to_node(mm, source, dest, flags);
1066                if (err > 0)
1067                        busy += err;
1068                if (err < 0)
1069                        break;
1070        }
1071out:
1072        up_read(&mm->mmap_sem);
1073        if (err < 0)
1074                return err;
1075        return busy;
1076
1077}
1078
1079/*
1080 * Allocate a new page for page migration based on vma policy.
1081 * Start assuming that page is mapped by vma pointed to by @private.
1082 * Search forward from there, if not.  N.B., this assumes that the
1083 * list of pages handed to migrate_pages()--which is how we get here--
1084 * is in virtual address order.
1085 */
1086static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1087{
1088        struct vm_area_struct *vma = (struct vm_area_struct *)private;
1089        unsigned long uninitialized_var(address);
1090
1091        while (vma) {
1092                address = page_address_in_vma(page, vma);
1093                if (address != -EFAULT)
1094                        break;
1095                vma = vma->vm_next;
1096        }
1097
1098        /*
1099         * if !vma, alloc_page_vma() will use task or system default policy
1100         */
1101        return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1102}
1103#else
1104
1105static void migrate_page_add(struct page *page, struct list_head *pagelist,
1106                                unsigned long flags)
1107{
1108}
1109
1110int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111                     const nodemask_t *to, int flags)
1112{
1113        return -ENOSYS;
1114}
1115
1116static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1117{
1118        return NULL;
1119}
1120#endif
1121
1122static long do_mbind(unsigned long start, unsigned long len,
1123                     unsigned short mode, unsigned short mode_flags,
1124                     nodemask_t *nmask, unsigned long flags)
1125{
1126        struct vm_area_struct *vma;
1127        struct mm_struct *mm = current->mm;
1128        struct mempolicy *new;
1129        unsigned long end;
1130        int err;
1131        LIST_HEAD(pagelist);
1132
1133        if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1134                                     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1135                return -EINVAL;
1136        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1137                return -EPERM;
1138
1139        if (start & ~PAGE_MASK)
1140                return -EINVAL;
1141
1142        if (mode == MPOL_DEFAULT)
1143                flags &= ~MPOL_MF_STRICT;
1144
1145        len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1146        end = start + len;
1147
1148        if (end < start)
1149                return -EINVAL;
1150        if (end == start)
1151                return 0;
1152
1153        new = mpol_new(mode, mode_flags, nmask);
1154        if (IS_ERR(new))
1155                return PTR_ERR(new);
1156
1157        /*
1158         * If we are using the default policy then operation
1159         * on discontinuous address spaces is okay after all
1160         */
1161        if (!new)
1162                flags |= MPOL_MF_DISCONTIG_OK;
1163
1164        pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1165                 start, start + len, mode, mode_flags,
1166                 nmask ? nodes_addr(*nmask)[0] : -1);
1167
1168        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1169
1170                err = migrate_prep();
1171                if (err)
1172                        goto mpol_out;
1173        }
1174        {
1175                NODEMASK_SCRATCH(scratch);
1176                if (scratch) {
1177                        down_write(&mm->mmap_sem);
1178                        task_lock(current);
1179                        err = mpol_set_nodemask(new, nmask, scratch);
1180                        task_unlock(current);
1181                        if (err)
1182                                up_write(&mm->mmap_sem);
1183                } else
1184                        err = -ENOMEM;
1185                NODEMASK_SCRATCH_FREE(scratch);
1186        }
1187        if (err)
1188                goto mpol_out;
1189
1190        vma = check_range(mm, start, end, nmask,
1191                          flags | MPOL_MF_INVERT, &pagelist);
1192
1193        err = PTR_ERR(vma);
1194        if (!IS_ERR(vma)) {
1195                int nr_failed = 0;
1196
1197                err = mbind_range(mm, start, end, new);
1198
1199                if (!list_empty(&pagelist)) {
1200                        nr_failed = migrate_pages(&pagelist, new_vma_page,
1201                                                (unsigned long)vma,
1202                                                false, MIGRATE_SYNC);
1203                        if (nr_failed)
1204                                putback_lru_pages(&pagelist);
1205                }
1206
1207                if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1208                        err = -EIO;
1209        } else
1210                putback_lru_pages(&pagelist);
1211
1212        up_write(&mm->mmap_sem);
1213 mpol_out:
1214        mpol_put(new);
1215        return err;
1216}
1217
1218/*
1219 * User space interface with variable sized bitmaps for nodelists.
1220 */
1221
1222/* Copy a node mask from user space. */
1223static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1224                     unsigned long maxnode)
1225{
1226        unsigned long k;
1227        unsigned long nlongs;
1228        unsigned long endmask;
1229
1230        --maxnode;
1231        nodes_clear(*nodes);
1232        if (maxnode == 0 || !nmask)
1233                return 0;
1234        if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1235                return -EINVAL;
1236
1237        nlongs = BITS_TO_LONGS(maxnode);
1238        if ((maxnode % BITS_PER_LONG) == 0)
1239                endmask = ~0UL;
1240        else
1241                endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1242
1243        /* When the user specified more nodes than supported just check
1244           if the non supported part is all zero. */
1245        if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1246                if (nlongs > PAGE_SIZE/sizeof(long))
1247                        return -EINVAL;
1248                for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1249                        unsigned long t;
1250                        if (get_user(t, nmask + k))
1251                                return -EFAULT;
1252                        if (k == nlongs - 1) {
1253                                if (t & endmask)
1254                                        return -EINVAL;
1255                        } else if (t)
1256                                return -EINVAL;
1257                }
1258                nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1259                endmask = ~0UL;
1260        }
1261
1262        if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1263                return -EFAULT;
1264        nodes_addr(*nodes)[nlongs-1] &= endmask;
1265        return 0;
1266}
1267
1268/* Copy a kernel node mask to user space */
1269static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1270                              nodemask_t *nodes)
1271{
1272        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1273        const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1274
1275        if (copy > nbytes) {
1276                if (copy > PAGE_SIZE)
1277                        return -EINVAL;
1278                if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1279                        return -EFAULT;
1280                copy = nbytes;
1281        }
1282        return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1283}
1284
1285SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1286                unsigned long, mode, unsigned long __user *, nmask,
1287                unsigned long, maxnode, unsigned, flags)
1288{
1289        nodemask_t nodes;
1290        int err;
1291        unsigned short mode_flags;
1292
1293        mode_flags = mode & MPOL_MODE_FLAGS;
1294        mode &= ~MPOL_MODE_FLAGS;
1295        if (mode >= MPOL_MAX)
1296                return -EINVAL;
1297        if ((mode_flags & MPOL_F_STATIC_NODES) &&
1298            (mode_flags & MPOL_F_RELATIVE_NODES))
1299                return -EINVAL;
1300        err = get_nodes(&nodes, nmask, maxnode);
1301        if (err)
1302                return err;
1303        return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1304}
1305
1306/* Set the process memory policy */
1307SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1308                unsigned long, maxnode)
1309{
1310        int err;
1311        nodemask_t nodes;
1312        unsigned short flags;
1313
1314        flags = mode & MPOL_MODE_FLAGS;
1315        mode &= ~MPOL_MODE_FLAGS;
1316        if ((unsigned int)mode >= MPOL_MAX)
1317                return -EINVAL;
1318        if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1319                return -EINVAL;
1320        err = get_nodes(&nodes, nmask, maxnode);
1321        if (err)
1322                return err;
1323        return do_set_mempolicy(mode, flags, &nodes);
1324}
1325
1326SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1327                const unsigned long __user *, old_nodes,
1328                const unsigned long __user *, new_nodes)
1329{
1330        const struct cred *cred = current_cred(), *tcred;
1331        struct mm_struct *mm = NULL;
1332        struct task_struct *task;
1333        nodemask_t task_nodes;
1334        int err;
1335        nodemask_t *old;
1336        nodemask_t *new;
1337        NODEMASK_SCRATCH(scratch);
1338
1339        if (!scratch)
1340                return -ENOMEM;
1341
1342        old = &scratch->mask1;
1343        new = &scratch->mask2;
1344
1345        err = get_nodes(old, old_nodes, maxnode);
1346        if (err)
1347                goto out;
1348
1349        err = get_nodes(new, new_nodes, maxnode);
1350        if (err)
1351                goto out;
1352
1353        /* Find the mm_struct */
1354        rcu_read_lock();
1355        task = pid ? find_task_by_vpid(pid) : current;
1356        if (!task) {
1357                rcu_read_unlock();
1358                err = -ESRCH;
1359                goto out;
1360        }
1361        get_task_struct(task);
1362
1363        err = -EINVAL;
1364
1365        /*
1366         * Check if this process has the right to modify the specified
1367         * process. The right exists if the process has administrative
1368         * capabilities, superuser privileges or the same
1369         * userid as the target process.
1370         */
1371        tcred = __task_cred(task);
1372        if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1373            !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1374            !capable(CAP_SYS_NICE)) {
1375                rcu_read_unlock();
1376                err = -EPERM;
1377                goto out_put;
1378        }
1379        rcu_read_unlock();
1380
1381        task_nodes = cpuset_mems_allowed(task);
1382        /* Is the user allowed to access the target nodes? */
1383        if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1384                err = -EPERM;
1385                goto out_put;
1386        }
1387
1388        if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1389                err = -EINVAL;
1390                goto out_put;
1391        }
1392
1393        err = security_task_movememory(task);
1394        if (err)
1395                goto out_put;
1396
1397        mm = get_task_mm(task);
1398        put_task_struct(task);
1399
1400        if (!mm) {
1401                err = -EINVAL;
1402                goto out;
1403        }
1404
1405        err = do_migrate_pages(mm, old, new,
1406                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1407
1408        mmput(mm);
1409out:
1410        NODEMASK_SCRATCH_FREE(scratch);
1411
1412        return err;
1413
1414out_put:
1415        put_task_struct(task);
1416        goto out;
1417
1418}
1419
1420
1421/* Retrieve NUMA policy */
1422SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1423                unsigned long __user *, nmask, unsigned long, maxnode,
1424                unsigned long, addr, unsigned long, flags)
1425{
1426        int err;
1427        int uninitialized_var(pval);
1428        nodemask_t nodes;
1429
1430        if (nmask != NULL && maxnode < MAX_NUMNODES)
1431                return -EINVAL;
1432
1433        err = do_get_mempolicy(&pval, &nodes, addr, flags);
1434
1435        if (err)
1436                return err;
1437
1438        if (policy && put_user(pval, policy))
1439                return -EFAULT;
1440
1441        if (nmask)
1442                err = copy_nodes_to_user(nmask, maxnode, &nodes);
1443
1444        return err;
1445}
1446
1447#ifdef CONFIG_COMPAT
1448
1449asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1450                                     compat_ulong_t __user *nmask,
1451                                     compat_ulong_t maxnode,
1452                                     compat_ulong_t addr, compat_ulong_t flags)
1453{
1454        long err;
1455        unsigned long __user *nm = NULL;
1456        unsigned long nr_bits, alloc_size;
1457        DECLARE_BITMAP(bm, MAX_NUMNODES);
1458
1459        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1460        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1461
1462        if (nmask)
1463                nm = compat_alloc_user_space(alloc_size);
1464
1465        err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1466
1467        if (!err && nmask) {
1468                unsigned long copy_size;
1469                copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1470                err = copy_from_user(bm, nm, copy_size);
1471                /* ensure entire bitmap is zeroed */
1472                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1473                err |= compat_put_bitmap(nmask, bm, nr_bits);
1474        }
1475
1476        return err;
1477}
1478
1479asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1480                                     compat_ulong_t maxnode)
1481{
1482        long err = 0;
1483        unsigned long __user *nm = NULL;
1484        unsigned long nr_bits, alloc_size;
1485        DECLARE_BITMAP(bm, MAX_NUMNODES);
1486
1487        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1488        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1489
1490        if (nmask) {
1491                err = compat_get_bitmap(bm, nmask, nr_bits);
1492                nm = compat_alloc_user_space(alloc_size);
1493                err |= copy_to_user(nm, bm, alloc_size);
1494        }
1495
1496        if (err)
1497                return -EFAULT;
1498
1499        return sys_set_mempolicy(mode, nm, nr_bits+1);
1500}
1501
1502asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1503                             compat_ulong_t mode, compat_ulong_t __user *nmask,
1504                             compat_ulong_t maxnode, compat_ulong_t flags)
1505{
1506        long err = 0;
1507        unsigned long __user *nm = NULL;
1508        unsigned long nr_bits, alloc_size;
1509        nodemask_t bm;
1510
1511        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1512        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1513
1514        if (nmask) {
1515                err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1516                nm = compat_alloc_user_space(alloc_size);
1517                err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1518        }
1519
1520        if (err)
1521                return -EFAULT;
1522
1523        return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1524}
1525
1526#endif
1527
1528/*
1529 * get_vma_policy(@task, @vma, @addr)
1530 * @task - task for fallback if vma policy == default
1531 * @vma   - virtual memory area whose policy is sought
1532 * @addr  - address in @vma for shared policy lookup
1533 *
1534 * Returns effective policy for a VMA at specified address.
1535 * Falls back to @task or system default policy, as necessary.
1536 * Current or other task's task mempolicy and non-shared vma policies
1537 * are protected by the task's mmap_sem, which must be held for read by
1538 * the caller.
1539 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1540 * count--added by the get_policy() vm_op, as appropriate--to protect against
1541 * freeing by another task.  It is the caller's responsibility to free the
1542 * extra reference for shared policies.
1543 */
1544struct mempolicy *get_vma_policy(struct task_struct *task,
1545                struct vm_area_struct *vma, unsigned long addr)
1546{
1547        struct mempolicy *pol = task->mempolicy;
1548
1549        if (vma) {
1550                if (vma->vm_ops && vma->vm_ops->get_policy) {
1551                        struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1552                                                                        addr);
1553                        if (vpol)
1554                                pol = vpol;
1555                } else if (vma->vm_policy) {
1556                        pol = vma->vm_policy;
1557
1558                        /*
1559                         * shmem_alloc_page() passes MPOL_F_SHARED policy with
1560                         * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1561                         * count on these policies which will be dropped by
1562                         * mpol_cond_put() later
1563                         */
1564                        if (mpol_needs_cond_ref(pol))
1565                                mpol_get(pol);
1566                }
1567        }
1568        if (!pol)
1569                pol = &default_policy;
1570        return pol;
1571}
1572
1573/*
1574 * Return a nodemask representing a mempolicy for filtering nodes for
1575 * page allocation
1576 */
1577static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1578{
1579        /* Lower zones don't get a nodemask applied for MPOL_BIND */
1580        if (unlikely(policy->mode == MPOL_BIND) &&
1581                        gfp_zone(gfp) >= policy_zone &&
1582                        cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1583                return &policy->v.nodes;
1584
1585        return NULL;
1586}
1587
1588/* Return a zonelist indicated by gfp for node representing a mempolicy */
1589static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1590        int nd)
1591{
1592        switch (policy->mode) {
1593        case MPOL_PREFERRED:
1594                if (!(policy->flags & MPOL_F_LOCAL))
1595                        nd = policy->v.preferred_node;
1596                break;
1597        case MPOL_BIND:
1598                /*
1599                 * Normally, MPOL_BIND allocations are node-local within the
1600                 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1601                 * current node isn't part of the mask, we use the zonelist for
1602                 * the first node in the mask instead.
1603                 */
1604                if (unlikely(gfp & __GFP_THISNODE) &&
1605                                unlikely(!node_isset(nd, policy->v.nodes)))
1606                        nd = first_node(policy->v.nodes);
1607                break;
1608        default:
1609                BUG();
1610        }
1611        return node_zonelist(nd, gfp);
1612}
1613
1614/* Do dynamic interleaving for a process */
1615static unsigned interleave_nodes(struct mempolicy *policy)
1616{
1617        unsigned nid, next;
1618        struct task_struct *me = current;
1619
1620        nid = me->il_next;
1621        next = next_node(nid, policy->v.nodes);
1622        if (next >= MAX_NUMNODES)
1623                next = first_node(policy->v.nodes);
1624        if (next < MAX_NUMNODES)
1625                me->il_next = next;
1626        return nid;
1627}
1628
1629/*
1630 * Depending on the memory policy provide a node from which to allocate the
1631 * next slab entry.
1632 * @policy must be protected by freeing by the caller.  If @policy is
1633 * the current task's mempolicy, this protection is implicit, as only the
1634 * task can change it's policy.  The system default policy requires no
1635 * such protection.
1636 */
1637unsigned slab_node(void)
1638{
1639        struct mempolicy *policy;
1640
1641        if (in_interrupt())
1642                return numa_node_id();
1643
1644        policy = current->mempolicy;
1645        if (!policy || policy->flags & MPOL_F_LOCAL)
1646                return numa_node_id();
1647
1648        switch (policy->mode) {
1649        case MPOL_PREFERRED:
1650                /*
1651                 * handled MPOL_F_LOCAL above
1652                 */
1653                return policy->v.preferred_node;
1654
1655        case MPOL_INTERLEAVE:
1656                return interleave_nodes(policy);
1657
1658        case MPOL_BIND: {
1659                /*
1660                 * Follow bind policy behavior and start allocation at the
1661                 * first node.
1662                 */
1663                struct zonelist *zonelist;
1664                struct zone *zone;
1665                enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1666                zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1667                (void)first_zones_zonelist(zonelist, highest_zoneidx,
1668                                                        &policy->v.nodes,
1669                                                        &zone);
1670                return zone ? zone->node : numa_node_id();
1671        }
1672
1673        default:
1674                BUG();
1675        }
1676}
1677
1678/* Do static interleaving for a VMA with known offset. */
1679static unsigned offset_il_node(struct mempolicy *pol,
1680                struct vm_area_struct *vma, unsigned long off)
1681{
1682        unsigned nnodes = nodes_weight(pol->v.nodes);
1683        unsigned target;
1684        int c;
1685        int nid = -1;
1686
1687        if (!nnodes)
1688                return numa_node_id();
1689        target = (unsigned int)off % nnodes;
1690        c = 0;
1691        do {
1692                nid = next_node(nid, pol->v.nodes);
1693                c++;
1694        } while (c <= target);
1695        return nid;
1696}
1697
1698/* Determine a node number for interleave */
1699static inline unsigned interleave_nid(struct mempolicy *pol,
1700                 struct vm_area_struct *vma, unsigned long addr, int shift)
1701{
1702        if (vma) {
1703                unsigned long off;
1704
1705                /*
1706                 * for small pages, there is no difference between
1707                 * shift and PAGE_SHIFT, so the bit-shift is safe.
1708                 * for huge pages, since vm_pgoff is in units of small
1709                 * pages, we need to shift off the always 0 bits to get
1710                 * a useful offset.
1711                 */
1712                BUG_ON(shift < PAGE_SHIFT);
1713                off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1714                off += (addr - vma->vm_start) >> shift;
1715                return offset_il_node(pol, vma, off);
1716        } else
1717                return interleave_nodes(pol);
1718}
1719
1720/*
1721 * Return the bit number of a random bit set in the nodemask.
1722 * (returns -1 if nodemask is empty)
1723 */
1724int node_random(const nodemask_t *maskp)
1725{
1726        int w, bit = -1;
1727
1728        w = nodes_weight(*maskp);
1729        if (w)
1730                bit = bitmap_ord_to_pos(maskp->bits,
1731                        get_random_int() % w, MAX_NUMNODES);
1732        return bit;
1733}
1734
1735#ifdef CONFIG_HUGETLBFS
1736/*
1737 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1738 * @vma = virtual memory area whose policy is sought
1739 * @addr = address in @vma for shared policy lookup and interleave policy
1740 * @gfp_flags = for requested zone
1741 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1742 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1743 *
1744 * Returns a zonelist suitable for a huge page allocation and a pointer
1745 * to the struct mempolicy for conditional unref after allocation.
1746 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1747 * @nodemask for filtering the zonelist.
1748 *
1749 * Must be protected by get_mems_allowed()
1750 */
1751struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1752                                gfp_t gfp_flags, struct mempolicy **mpol,
1753                                nodemask_t **nodemask)
1754{
1755        struct zonelist *zl;
1756
1757        *mpol = get_vma_policy(current, vma, addr);
1758        *nodemask = NULL;       /* assume !MPOL_BIND */
1759
1760        if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1761                zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1762                                huge_page_shift(hstate_vma(vma))), gfp_flags);
1763        } else {
1764                zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1765                if ((*mpol)->mode == MPOL_BIND)
1766                        *nodemask = &(*mpol)->v.nodes;
1767        }
1768        return zl;
1769}
1770
1771/*
1772 * init_nodemask_of_mempolicy
1773 *
1774 * If the current task's mempolicy is "default" [NULL], return 'false'
1775 * to indicate default policy.  Otherwise, extract the policy nodemask
1776 * for 'bind' or 'interleave' policy into the argument nodemask, or
1777 * initialize the argument nodemask to contain the single node for
1778 * 'preferred' or 'local' policy and return 'true' to indicate presence
1779 * of non-default mempolicy.
1780 *
1781 * We don't bother with reference counting the mempolicy [mpol_get/put]
1782 * because the current task is examining it's own mempolicy and a task's
1783 * mempolicy is only ever changed by the task itself.
1784 *
1785 * N.B., it is the caller's responsibility to free a returned nodemask.
1786 */
1787bool init_nodemask_of_mempolicy(nodemask_t *mask)
1788{
1789        struct mempolicy *mempolicy;
1790        int nid;
1791
1792        if (!(mask && current->mempolicy))
1793                return false;
1794
1795        task_lock(current);
1796        mempolicy = current->mempolicy;
1797        switch (mempolicy->mode) {
1798        case MPOL_PREFERRED:
1799                if (mempolicy->flags & MPOL_F_LOCAL)
1800                        nid = numa_node_id();
1801                else
1802                        nid = mempolicy->v.preferred_node;
1803                init_nodemask_of_node(mask, nid);
1804                break;
1805
1806        case MPOL_BIND:
1807                /* Fall through */
1808        case MPOL_INTERLEAVE:
1809                *mask =  mempolicy->v.nodes;
1810                break;
1811
1812        default:
1813                BUG();
1814        }
1815        task_unlock(current);
1816
1817        return true;
1818}
1819#endif
1820
1821/*
1822 * mempolicy_nodemask_intersects
1823 *
1824 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1825 * policy.  Otherwise, check for intersection between mask and the policy
1826 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1827 * policy, always return true since it may allocate elsewhere on fallback.
1828 *
1829 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1830 */
1831bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1832                                        const nodemask_t *mask)
1833{
1834        struct mempolicy *mempolicy;
1835        bool ret = true;
1836
1837        if (!mask)
1838                return ret;
1839        task_lock(tsk);
1840        mempolicy = tsk->mempolicy;
1841        if (!mempolicy)
1842                goto out;
1843
1844        switch (mempolicy->mode) {
1845        case MPOL_PREFERRED:
1846                /*
1847                 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1848                 * allocate from, they may fallback to other nodes when oom.
1849                 * Thus, it's possible for tsk to have allocated memory from
1850                 * nodes in mask.
1851                 */
1852                break;
1853        case MPOL_BIND:
1854        case MPOL_INTERLEAVE:
1855                ret = nodes_intersects(mempolicy->v.nodes, *mask);
1856                break;
1857        default:
1858                BUG();
1859        }
1860out:
1861        task_unlock(tsk);
1862        return ret;
1863}
1864
1865/* Allocate a page in interleaved policy.
1866   Own path because it needs to do special accounting. */
1867static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1868                                        unsigned nid)
1869{
1870        struct zonelist *zl;
1871        struct page *page;
1872
1873        zl = node_zonelist(nid, gfp);
1874        page = __alloc_pages(gfp, order, zl);
1875        if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1876                inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1877        return page;
1878}
1879
1880/**
1881 *      alloc_pages_vma - Allocate a page for a VMA.
1882 *
1883 *      @gfp:
1884 *      %GFP_USER    user allocation.
1885 *      %GFP_KERNEL  kernel allocations,
1886 *      %GFP_HIGHMEM highmem/user allocations,
1887 *      %GFP_FS      allocation should not call back into a file system.
1888 *      %GFP_ATOMIC  don't sleep.
1889 *
1890 *      @order:Order of the GFP allocation.
1891 *      @vma:  Pointer to VMA or NULL if not available.
1892 *      @addr: Virtual Address of the allocation. Must be inside the VMA.
1893 *
1894 *      This function allocates a page from the kernel page pool and applies
1895 *      a NUMA policy associated with the VMA or the current process.
1896 *      When VMA is not NULL caller must hold down_read on the mmap_sem of the
1897 *      mm_struct of the VMA to prevent it from going away. Should be used for
1898 *      all allocations for pages that will be mapped into
1899 *      user space. Returns NULL when no page can be allocated.
1900 *
1901 *      Should be called with the mm_sem of the vma hold.
1902 */
1903struct page *
1904alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1905                unsigned long addr, int node)
1906{
1907        struct mempolicy *pol;
1908        struct zonelist *zl;
1909        struct page *page;
1910        unsigned int cpuset_mems_cookie;
1911
1912retry_cpuset:
1913        pol = get_vma_policy(current, vma, addr);
1914        cpuset_mems_cookie = get_mems_allowed();
1915
1916        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1917                unsigned nid;
1918
1919                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1920                mpol_cond_put(pol);
1921                page = alloc_page_interleave(gfp, order, nid);
1922                if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1923                        goto retry_cpuset;
1924
1925                return page;
1926        }
1927        zl = policy_zonelist(gfp, pol, node);
1928        if (unlikely(mpol_needs_cond_ref(pol))) {
1929                /*
1930                 * slow path: ref counted shared policy
1931                 */
1932                struct page *page =  __alloc_pages_nodemask(gfp, order,
1933                                                zl, policy_nodemask(gfp, pol));
1934                __mpol_put(pol);
1935                if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1936                        goto retry_cpuset;
1937                return page;
1938        }
1939        /*
1940         * fast path:  default or task policy
1941         */
1942        page = __alloc_pages_nodemask(gfp, order, zl,
1943                                      policy_nodemask(gfp, pol));
1944        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1945                goto retry_cpuset;
1946        return page;
1947}
1948
1949/**
1950 *      alloc_pages_current - Allocate pages.
1951 *
1952 *      @gfp:
1953 *              %GFP_USER   user allocation,
1954 *              %GFP_KERNEL kernel allocation,
1955 *              %GFP_HIGHMEM highmem allocation,
1956 *              %GFP_FS     don't call back into a file system.
1957 *              %GFP_ATOMIC don't sleep.
1958 *      @order: Power of two of allocation size in pages. 0 is a single page.
1959 *
1960 *      Allocate a page from the kernel page pool.  When not in
1961 *      interrupt context and apply the current process NUMA policy.
1962 *      Returns NULL when no page can be allocated.
1963 *
1964 *      Don't call cpuset_update_task_memory_state() unless
1965 *      1) it's ok to take cpuset_sem (can WAIT), and
1966 *      2) allocating for current task (not interrupt).
1967 */
1968struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1969{
1970        struct mempolicy *pol = current->mempolicy;
1971        struct page *page;
1972        unsigned int cpuset_mems_cookie;
1973
1974        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1975                pol = &default_policy;
1976
1977retry_cpuset:
1978        cpuset_mems_cookie = get_mems_allowed();
1979
1980        /*
1981         * No reference counting needed for current->mempolicy
1982         * nor system default_policy
1983         */
1984        if (pol->mode == MPOL_INTERLEAVE)
1985                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1986        else
1987                page = __alloc_pages_nodemask(gfp, order,
1988                                policy_zonelist(gfp, pol, numa_node_id()),
1989                                policy_nodemask(gfp, pol));
1990
1991        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
1992                goto retry_cpuset;
1993
1994        return page;
1995}
1996EXPORT_SYMBOL(alloc_pages_current);
1997
1998/*
1999 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2000 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2001 * with the mems_allowed returned by cpuset_mems_allowed().  This
2002 * keeps mempolicies cpuset relative after its cpuset moves.  See
2003 * further kernel/cpuset.c update_nodemask().
2004 *
2005 * current's mempolicy may be rebinded by the other task(the task that changes
2006 * cpuset's mems), so we needn't do rebind work for current task.
2007 */
2008
2009/* Slow path of a mempolicy duplicate */
2010struct mempolicy *__mpol_dup(struct mempolicy *old)
2011{
2012        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2013
2014        if (!new)
2015                return ERR_PTR(-ENOMEM);
2016
2017        /* task's mempolicy is protected by alloc_lock */
2018        if (old == current->mempolicy) {
2019                task_lock(current);
2020                *new = *old;
2021                task_unlock(current);
2022        } else
2023                *new = *old;
2024
2025        rcu_read_lock();
2026        if (current_cpuset_is_being_rebound()) {
2027                nodemask_t mems = cpuset_mems_allowed(current);
2028                if (new->flags & MPOL_F_REBINDING)
2029                        mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2030                else
2031                        mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2032        }
2033        rcu_read_unlock();
2034        atomic_set(&new->refcnt, 1);
2035        return new;
2036}
2037
2038/*
2039 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
2040 * eliminate the * MPOL_F_* flags that require conditional ref and
2041 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
2042 * after return.  Use the returned value.
2043 *
2044 * Allows use of a mempolicy for, e.g., multiple allocations with a single
2045 * policy lookup, even if the policy needs/has extra ref on lookup.
2046 * shmem_readahead needs this.
2047 */
2048struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
2049                                                struct mempolicy *frompol)
2050{
2051        if (!mpol_needs_cond_ref(frompol))
2052                return frompol;
2053
2054        *tompol = *frompol;
2055        tompol->flags &= ~MPOL_F_SHARED;        /* copy doesn't need unref */
2056        __mpol_put(frompol);
2057        return tompol;
2058}
2059
2060/* Slow path of a mempolicy comparison */
2061bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2062{
2063        if (!a || !b)
2064                return false;
2065        if (a->mode != b->mode)
2066                return false;
2067        if (a->flags != b->flags)
2068                return false;
2069        if (mpol_store_user_nodemask(a))
2070                if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2071                        return false;
2072
2073        switch (a->mode) {
2074        case MPOL_BIND:
2075                /* Fall through */
2076        case MPOL_INTERLEAVE:
2077                return !!nodes_equal(a->v.nodes, b->v.nodes);
2078        case MPOL_PREFERRED:
2079                return a->v.preferred_node == b->v.preferred_node;
2080        default:
2081                BUG();
2082                return false;
2083        }
2084}
2085
2086/*
2087 * Shared memory backing store policy support.
2088 *
2089 * Remember policies even when nobody has shared memory mapped.
2090 * The policies are kept in Red-Black tree linked from the inode.
2091 * They are protected by the sp->lock spinlock, which should be held
2092 * for any accesses to the tree.
2093 */
2094
2095/* lookup first element intersecting start-end */
2096/* Caller holds sp->mutex */
2097static struct sp_node *
2098sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2099{
2100        struct rb_node *n = sp->root.rb_node;
2101
2102        while (n) {
2103                struct sp_node *p = rb_entry(n, struct sp_node, nd);
2104
2105                if (start >= p->end)
2106                        n = n->rb_right;
2107                else if (end <= p->start)
2108                        n = n->rb_left;
2109                else
2110                        break;
2111        }
2112        if (!n)
2113                return NULL;
2114        for (;;) {
2115                struct sp_node *w = NULL;
2116                struct rb_node *prev = rb_prev(n);
2117                if (!prev)
2118                        break;
2119                w = rb_entry(prev, struct sp_node, nd);
2120                if (w->end <= start)
2121                        break;
2122                n = prev;
2123        }
2124        return rb_entry(n, struct sp_node, nd);
2125}
2126
2127/* Insert a new shared policy into the list. */
2128/* Caller holds sp->lock */
2129static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2130{
2131        struct rb_node **p = &sp->root.rb_node;
2132        struct rb_node *parent = NULL;
2133        struct sp_node *nd;
2134
2135        while (*p) {
2136                parent = *p;
2137                nd = rb_entry(parent, struct sp_node, nd);
2138                if (new->start < nd->start)
2139                        p = &(*p)->rb_left;
2140                else if (new->end > nd->end)
2141                        p = &(*p)->rb_right;
2142                else
2143                        BUG();
2144        }
2145        rb_link_node(&new->nd, parent, p);
2146        rb_insert_color(&new->nd, &sp->root);
2147        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2148                 new->policy ? new->policy->mode : 0);
2149}
2150
2151/* Find shared policy intersecting idx */
2152struct mempolicy *
2153mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2154{
2155        struct mempolicy *pol = NULL;
2156        struct sp_node *sn;
2157
2158        if (!sp->root.rb_node)
2159                return NULL;
2160        mutex_lock(&sp->mutex);
2161        sn = sp_lookup(sp, idx, idx+1);
2162        if (sn) {
2163                mpol_get(sn->policy);
2164                pol = sn->policy;
2165        }
2166        mutex_unlock(&sp->mutex);
2167        return pol;
2168}
2169
2170static void sp_free(struct sp_node *n)
2171{
2172        mpol_put(n->policy);
2173        kmem_cache_free(sn_cache, n);
2174}
2175
2176static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2177{
2178        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2179        rb_erase(&n->nd, &sp->root);
2180        sp_free(n);
2181}
2182
2183static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2184                                struct mempolicy *pol)
2185{
2186        struct sp_node *n;
2187        struct mempolicy *newpol;
2188
2189        n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2190        if (!n)
2191                return NULL;
2192
2193        newpol = mpol_dup(pol);
2194        if (IS_ERR(newpol)) {
2195                kmem_cache_free(sn_cache, n);
2196                return NULL;
2197        }
2198        newpol->flags |= MPOL_F_SHARED;
2199
2200        n->start = start;
2201        n->end = end;
2202        n->policy = newpol;
2203
2204        return n;
2205}
2206
2207/* Replace a policy range. */
2208static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2209                                 unsigned long end, struct sp_node *new)
2210{
2211        struct sp_node *n;
2212        int ret = 0;
2213
2214        mutex_lock(&sp->mutex);
2215        n = sp_lookup(sp, start, end);
2216        /* Take care of old policies in the same range. */
2217        while (n && n->start < end) {
2218                struct rb_node *next = rb_next(&n->nd);
2219                if (n->start >= start) {
2220                        if (n->end <= end)
2221                                sp_delete(sp, n);
2222                        else
2223                                n->start = end;
2224                } else {
2225                        /* Old policy spanning whole new range. */
2226                        if (n->end > end) {
2227                                struct sp_node *new2;
2228                                new2 = sp_alloc(end, n->end, n->policy);
2229                                if (!new2) {
2230                                        ret = -ENOMEM;
2231                                        goto out;
2232                                }
2233                                n->end = start;
2234                                sp_insert(sp, new2);
2235                                break;
2236                        } else
2237                                n->end = start;
2238                }
2239                if (!next)
2240                        break;
2241                n = rb_entry(next, struct sp_node, nd);
2242        }
2243        if (new)
2244                sp_insert(sp, new);
2245out:
2246        mutex_unlock(&sp->mutex);
2247        return ret;
2248}
2249
2250/**
2251 * mpol_shared_policy_init - initialize shared policy for inode
2252 * @sp: pointer to inode shared policy
2253 * @mpol:  struct mempolicy to install
2254 *
2255 * Install non-NULL @mpol in inode's shared policy rb-tree.
2256 * On entry, the current task has a reference on a non-NULL @mpol.
2257 * This must be released on exit.
2258 * This is called at get_inode() calls and we can use GFP_KERNEL.
2259 */
2260void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2261{
2262        int ret;
2263
2264        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
2265        mutex_init(&sp->mutex);
2266
2267        if (mpol) {
2268                struct vm_area_struct pvma;
2269                struct mempolicy *new;
2270                NODEMASK_SCRATCH(scratch);
2271
2272                if (!scratch)
2273                        goto put_mpol;
2274                /* contextualize the tmpfs mount point mempolicy */
2275                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2276                if (IS_ERR(new))
2277                        goto free_scratch; /* no valid nodemask intersection */
2278
2279                task_lock(current);
2280                ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2281                task_unlock(current);
2282                if (ret)
2283                        goto put_new;
2284
2285                /* Create pseudo-vma that contains just the policy */
2286                memset(&pvma, 0, sizeof(struct vm_area_struct));
2287                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
2288                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2289
2290put_new:
2291                mpol_put(new);                  /* drop initial ref */
2292free_scratch:
2293                NODEMASK_SCRATCH_FREE(scratch);
2294put_mpol:
2295                mpol_put(mpol); /* drop our incoming ref on sb mpol */
2296        }
2297}
2298
2299int mpol_set_shared_policy(struct shared_policy *info,
2300                        struct vm_area_struct *vma, struct mempolicy *npol)
2301{
2302        int err;
2303        struct sp_node *new = NULL;
2304        unsigned long sz = vma_pages(vma);
2305
2306        pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2307                 vma->vm_pgoff,
2308                 sz, npol ? npol->mode : -1,
2309                 npol ? npol->flags : -1,
2310                 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2311
2312        if (npol) {
2313                new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2314                if (!new)
2315                        return -ENOMEM;
2316        }
2317        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2318        if (err && new)
2319                sp_free(new);
2320        return err;
2321}
2322
2323/* Free a backing policy store on inode delete. */
2324void mpol_free_shared_policy(struct shared_policy *p)
2325{
2326        struct sp_node *n;
2327        struct rb_node *next;
2328
2329        if (!p->root.rb_node)
2330                return;
2331        mutex_lock(&p->mutex);
2332        next = rb_first(&p->root);
2333        while (next) {
2334                n = rb_entry(next, struct sp_node, nd);
2335                next = rb_next(&n->nd);
2336                sp_delete(p, n);
2337        }
2338        mutex_unlock(&p->mutex);
2339}
2340
2341/* assumes fs == KERNEL_DS */
2342void __init numa_policy_init(void)
2343{
2344        nodemask_t interleave_nodes;
2345        unsigned long largest = 0;
2346        int nid, prefer = 0;
2347
2348        policy_cache = kmem_cache_create("numa_policy",
2349                                         sizeof(struct mempolicy),
2350                                         0, SLAB_PANIC, NULL);
2351
2352        sn_cache = kmem_cache_create("shared_policy_node",
2353                                     sizeof(struct sp_node),
2354                                     0, SLAB_PANIC, NULL);
2355
2356        /*
2357         * Set interleaving policy for system init. Interleaving is only
2358         * enabled across suitably sized nodes (default is >= 16MB), or
2359         * fall back to the largest node if they're all smaller.
2360         */
2361        nodes_clear(interleave_nodes);
2362        for_each_node_state(nid, N_HIGH_MEMORY) {
2363                unsigned long total_pages = node_present_pages(nid);
2364
2365                /* Preserve the largest node */
2366                if (largest < total_pages) {
2367                        largest = total_pages;
2368                        prefer = nid;
2369                }
2370
2371                /* Interleave this node? */
2372                if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2373                        node_set(nid, interleave_nodes);
2374        }
2375
2376        /* All too small, use the largest */
2377        if (unlikely(nodes_empty(interleave_nodes)))
2378                node_set(prefer, interleave_nodes);
2379
2380        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2381                printk("numa_policy_init: interleaving failed\n");
2382}
2383
2384/* Reset policy of current process to default */
2385void numa_default_policy(void)
2386{
2387        do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2388}
2389
2390/*
2391 * Parse and format mempolicy from/to strings
2392 */
2393
2394/*
2395 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2396 * Used only for mpol_parse_str() and mpol_to_str()
2397 */
2398#define MPOL_LOCAL MPOL_MAX
2399static const char * const policy_modes[] =
2400{
2401        [MPOL_DEFAULT]    = "default",
2402        [MPOL_PREFERRED]  = "prefer",
2403        [MPOL_BIND]       = "bind",
2404        [MPOL_INTERLEAVE] = "interleave",
2405        [MPOL_LOCAL]      = "local"
2406};
2407
2408
2409#ifdef CONFIG_TMPFS
2410/**
2411 * mpol_parse_str - parse string to mempolicy
2412 * @str:  string containing mempolicy to parse
2413 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2414 * @no_context:  flag whether to "contextualize" the mempolicy
2415 *
2416 * Format of input:
2417 *      <mode>[=<flags>][:<nodelist>]
2418 *
2419 * if @no_context is true, save the input nodemask in w.user_nodemask in
2420 * the returned mempolicy.  This will be used to "clone" the mempolicy in
2421 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2422 * mount option.  Note that if 'static' or 'relative' mode flags were
2423 * specified, the input nodemask will already have been saved.  Saving
2424 * it again is redundant, but safe.
2425 *
2426 * On success, returns 0, else 1
2427 */
2428int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2429{
2430        struct mempolicy *new = NULL;
2431        unsigned short mode;
2432        unsigned short uninitialized_var(mode_flags);
2433        nodemask_t nodes;
2434        char *nodelist = strchr(str, ':');
2435        char *flags = strchr(str, '=');
2436        int err = 1;
2437
2438        if (nodelist) {
2439                /* NUL-terminate mode or flags string */
2440                *nodelist++ = '\0';
2441                if (nodelist_parse(nodelist, nodes))
2442                        goto out;
2443                if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2444                        goto out;
2445        } else
2446                nodes_clear(nodes);
2447
2448        if (flags)
2449                *flags++ = '\0';        /* terminate mode string */
2450
2451        for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2452                if (!strcmp(str, policy_modes[mode])) {
2453                        break;
2454                }
2455        }
2456        if (mode > MPOL_LOCAL)
2457                goto out;
2458
2459        switch (mode) {
2460        case MPOL_PREFERRED:
2461                /*
2462                 * Insist on a nodelist of one node only
2463                 */
2464                if (nodelist) {
2465                        char *rest = nodelist;
2466                        while (isdigit(*rest))
2467                                rest++;
2468                        if (*rest)
2469                                goto out;
2470                }
2471                break;
2472        case MPOL_INTERLEAVE:
2473                /*
2474                 * Default to online nodes with memory if no nodelist
2475                 */
2476                if (!nodelist)
2477                        nodes = node_states[N_HIGH_MEMORY];
2478                break;
2479        case MPOL_LOCAL:
2480                /*
2481                 * Don't allow a nodelist;  mpol_new() checks flags
2482                 */
2483                if (nodelist)
2484                        goto out;
2485                mode = MPOL_PREFERRED;
2486                break;
2487        case MPOL_DEFAULT:
2488                /*
2489                 * Insist on a empty nodelist
2490                 */
2491                if (!nodelist)
2492                        err = 0;
2493                goto out;
2494        case MPOL_BIND:
2495                /*
2496                 * Insist on a nodelist
2497                 */
2498                if (!nodelist)
2499                        goto out;
2500        }
2501
2502        mode_flags = 0;
2503        if (flags) {
2504                /*
2505                 * Currently, we only support two mutually exclusive
2506                 * mode flags.
2507                 */
2508                if (!strcmp(flags, "static"))
2509                        mode_flags |= MPOL_F_STATIC_NODES;
2510                else if (!strcmp(flags, "relative"))
2511                        mode_flags |= MPOL_F_RELATIVE_NODES;
2512                else
2513                        goto out;
2514        }
2515
2516        new = mpol_new(mode, mode_flags, &nodes);
2517        if (IS_ERR(new))
2518                goto out;
2519
2520        if (no_context) {
2521                /* save for contextualization */
2522                new->w.user_nodemask = nodes;
2523        } else {
2524                int ret;
2525                NODEMASK_SCRATCH(scratch);
2526                if (scratch) {
2527                        task_lock(current);
2528                        ret = mpol_set_nodemask(new, &nodes, scratch);
2529                        task_unlock(current);
2530                } else
2531                        ret = -ENOMEM;
2532                NODEMASK_SCRATCH_FREE(scratch);
2533                if (ret) {
2534                        mpol_put(new);
2535                        goto out;
2536                }
2537        }
2538        err = 0;
2539
2540out:
2541        /* Restore string for error message */
2542        if (nodelist)
2543                *--nodelist = ':';
2544        if (flags)
2545                *--flags = '=';
2546        if (!err)
2547                *mpol = new;
2548        return err;
2549}
2550#endif /* CONFIG_TMPFS */
2551
2552/**
2553 * mpol_to_str - format a mempolicy structure for printing
2554 * @buffer:  to contain formatted mempolicy string
2555 * @maxlen:  length of @buffer
2556 * @pol:  pointer to mempolicy to be formatted
2557 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2558 *
2559 * Convert a mempolicy into a string.
2560 * Returns the number of characters in buffer (if positive)
2561 * or an error (negative)
2562 */
2563int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2564{
2565        char *p = buffer;
2566        int l;
2567        nodemask_t nodes;
2568        unsigned short mode;
2569        unsigned short flags = pol ? pol->flags : 0;
2570
2571        /*
2572         * Sanity check:  room for longest mode, flag and some nodes
2573         */
2574        VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2575
2576        if (!pol || pol == &default_policy)
2577                mode = MPOL_DEFAULT;
2578        else
2579                mode = pol->mode;
2580
2581        switch (mode) {
2582        case MPOL_DEFAULT:
2583                nodes_clear(nodes);
2584                break;
2585
2586        case MPOL_PREFERRED:
2587                nodes_clear(nodes);
2588                if (flags & MPOL_F_LOCAL)
2589                        mode = MPOL_LOCAL;      /* pseudo-policy */
2590                else
2591                        node_set(pol->v.preferred_node, nodes);
2592                break;
2593
2594        case MPOL_BIND:
2595                /* Fall through */
2596        case MPOL_INTERLEAVE:
2597                if (no_context)
2598                        nodes = pol->w.user_nodemask;
2599                else
2600                        nodes = pol->v.nodes;
2601                break;
2602
2603        default:
2604                return -EINVAL;
2605        }
2606
2607        l = strlen(policy_modes[mode]);
2608        if (buffer + maxlen < p + l + 1)
2609                return -ENOSPC;
2610
2611        strcpy(p, policy_modes[mode]);
2612        p += l;
2613
2614        if (flags & MPOL_MODE_FLAGS) {
2615                if (buffer + maxlen < p + 2)
2616                        return -ENOSPC;
2617                *p++ = '=';
2618
2619                /*
2620                 * Currently, the only defined flags are mutually exclusive
2621                 */
2622                if (flags & MPOL_F_STATIC_NODES)
2623                        p += snprintf(p, buffer + maxlen - p, "static");
2624                else if (flags & MPOL_F_RELATIVE_NODES)
2625                        p += snprintf(p, buffer + maxlen - p, "relative");
2626        }
2627
2628        if (!nodes_empty(nodes)) {
2629                if (buffer + maxlen < p + 2)
2630                        return -ENOSPC;
2631                *p++ = ':';
2632                p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2633        }
2634        return p - buffer;
2635}
2636
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.