linux/mm/mempolicy.c
<<
>>
Prefs
   1/*
   2 * Simple NUMA memory policy for the Linux kernel.
   3 *
   4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   6 * Subject to the GNU Public License, version 2.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case node -1 here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *                in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#include <linux/mempolicy.h>
  69#include <linux/mm.h>
  70#include <linux/highmem.h>
  71#include <linux/hugetlb.h>
  72#include <linux/kernel.h>
  73#include <linux/sched.h>
  74#include <linux/nodemask.h>
  75#include <linux/cpuset.h>
  76#include <linux/slab.h>
  77#include <linux/string.h>
  78#include <linux/module.h>
  79#include <linux/nsproxy.h>
  80#include <linux/interrupt.h>
  81#include <linux/init.h>
  82#include <linux/compat.h>
  83#include <linux/swap.h>
  84#include <linux/seq_file.h>
  85#include <linux/proc_fs.h>
  86#include <linux/migrate.h>
  87#include <linux/ksm.h>
  88#include <linux/rmap.h>
  89#include <linux/security.h>
  90#include <linux/syscalls.h>
  91#include <linux/ctype.h>
  92#include <linux/mm_inline.h>
  93
  94#include <asm/tlbflush.h>
  95#include <asm/uaccess.h>
  96#include <linux/random.h>
  97
  98#include "internal.h"
  99
 100/* Internal flags */
 101#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)    /* Skip checks for continuous vmas */
 102#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)          /* Invert check for nodemask */
 103
 104static struct kmem_cache *policy_cache;
 105static struct kmem_cache *sn_cache;
 106
 107/* Highest zone. An specific allocation for a zone below that is not
 108   policied. */
 109enum zone_type policy_zone = 0;
 110
 111/*
 112 * run-time system-wide default policy => local allocation
 113 */
 114struct mempolicy default_policy = {
 115        .refcnt = ATOMIC_INIT(1), /* never free it */
 116        .mode = MPOL_PREFERRED,
 117        .flags = MPOL_F_LOCAL,
 118};
 119
 120static const struct mempolicy_operations {
 121        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 122        /*
 123         * If read-side task has no lock to protect task->mempolicy, write-side
 124         * task will rebind the task->mempolicy by two step. The first step is
 125         * setting all the newly nodes, and the second step is cleaning all the
 126         * disallowed nodes. In this way, we can avoid finding no node to alloc
 127         * page.
 128         * If we have a lock to protect task->mempolicy in read-side, we do
 129         * rebind directly.
 130         *
 131         * step:
 132         *      MPOL_REBIND_ONCE - do rebind work at once
 133         *      MPOL_REBIND_STEP1 - set all the newly nodes
 134         *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 135         */
 136        void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
 137                        enum mpol_rebind_step step);
 138} mpol_ops[MPOL_MAX];
 139
 140/* Check that the nodemask contains at least one populated zone */
 141static int is_valid_nodemask(const nodemask_t *nodemask)
 142{
 143        int nd, k;
 144
 145        for_each_node_mask(nd, *nodemask) {
 146                struct zone *z;
 147
 148                for (k = 0; k <= policy_zone; k++) {
 149                        z = &NODE_DATA(nd)->node_zones[k];
 150                        if (z->present_pages > 0)
 151                                return 1;
 152                }
 153        }
 154
 155        return 0;
 156}
 157
 158static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 159{
 160        return pol->flags & MPOL_MODE_FLAGS;
 161}
 162
 163static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 164                                   const nodemask_t *rel)
 165{
 166        nodemask_t tmp;
 167        nodes_fold(tmp, *orig, nodes_weight(*rel));
 168        nodes_onto(*ret, tmp, *rel);
 169}
 170
 171static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 172{
 173        if (nodes_empty(*nodes))
 174                return -EINVAL;
 175        pol->v.nodes = *nodes;
 176        return 0;
 177}
 178
 179static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 180{
 181        if (!nodes)
 182                pol->flags |= MPOL_F_LOCAL;     /* local allocation */
 183        else if (nodes_empty(*nodes))
 184                return -EINVAL;                 /*  no allowed nodes */
 185        else
 186                pol->v.preferred_node = first_node(*nodes);
 187        return 0;
 188}
 189
 190static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 191{
 192        if (!is_valid_nodemask(nodes))
 193                return -EINVAL;
 194        pol->v.nodes = *nodes;
 195        return 0;
 196}
 197
 198/*
 199 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 200 * any, for the new policy.  mpol_new() has already validated the nodes
 201 * parameter with respect to the policy mode and flags.  But, we need to
 202 * handle an empty nodemask with MPOL_PREFERRED here.
 203 *
 204 * Must be called holding task's alloc_lock to protect task's mems_allowed
 205 * and mempolicy.  May also be called holding the mmap_semaphore for write.
 206 */
 207static int mpol_set_nodemask(struct mempolicy *pol,
 208                     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 209{
 210        int ret;
 211
 212        /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 213        if (pol == NULL)
 214                return 0;
 215        /* Check N_HIGH_MEMORY */
 216        nodes_and(nsc->mask1,
 217                  cpuset_current_mems_allowed, node_states[N_HIGH_MEMORY]);
 218
 219        VM_BUG_ON(!nodes);
 220        if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 221                nodes = NULL;   /* explicit local allocation */
 222        else {
 223                if (pol->flags & MPOL_F_RELATIVE_NODES)
 224                        mpol_relative_nodemask(&nsc->mask2, nodes,&nsc->mask1);
 225                else
 226                        nodes_and(nsc->mask2, *nodes, nsc->mask1);
 227
 228                if (mpol_store_user_nodemask(pol))
 229                        pol->w.user_nodemask = *nodes;
 230                else
 231                        pol->w.cpuset_mems_allowed =
 232                                                cpuset_current_mems_allowed;
 233        }
 234
 235        if (nodes)
 236                ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 237        else
 238                ret = mpol_ops[pol->mode].create(pol, NULL);
 239        return ret;
 240}
 241
 242/*
 243 * This function just creates a new policy, does some check and simple
 244 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 245 */
 246static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 247                                  nodemask_t *nodes)
 248{
 249        struct mempolicy *policy;
 250
 251        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 252                 mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 253
 254        if (mode == MPOL_DEFAULT) {
 255                if (nodes && !nodes_empty(*nodes))
 256                        return ERR_PTR(-EINVAL);
 257                return NULL;    /* simply delete any existing policy */
 258        }
 259        VM_BUG_ON(!nodes);
 260
 261        /*
 262         * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 263         * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 264         * All other modes require a valid pointer to a non-empty nodemask.
 265         */
 266        if (mode == MPOL_PREFERRED) {
 267                if (nodes_empty(*nodes)) {
 268                        if (((flags & MPOL_F_STATIC_NODES) ||
 269                             (flags & MPOL_F_RELATIVE_NODES)))
 270                                return ERR_PTR(-EINVAL);
 271                }
 272        } else if (nodes_empty(*nodes))
 273                return ERR_PTR(-EINVAL);
 274        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 275        if (!policy)
 276                return ERR_PTR(-ENOMEM);
 277        atomic_set(&policy->refcnt, 1);
 278        policy->mode = mode;
 279        policy->flags = flags;
 280
 281        return policy;
 282}
 283
 284/* Slow path of a mpol destructor. */
 285void __mpol_put(struct mempolicy *p)
 286{
 287        if (!atomic_dec_and_test(&p->refcnt))
 288                return;
 289        kmem_cache_free(policy_cache, p);
 290}
 291
 292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
 293                                enum mpol_rebind_step step)
 294{
 295}
 296
 297/*
 298 * step:
 299 *      MPOL_REBIND_ONCE  - do rebind work at once
 300 *      MPOL_REBIND_STEP1 - set all the newly nodes
 301 *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 302 */
 303static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
 304                                 enum mpol_rebind_step step)
 305{
 306        nodemask_t tmp;
 307
 308        if (pol->flags & MPOL_F_STATIC_NODES)
 309                nodes_and(tmp, pol->w.user_nodemask, *nodes);
 310        else if (pol->flags & MPOL_F_RELATIVE_NODES)
 311                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 312        else {
 313                /*
 314                 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
 315                 * result
 316                 */
 317                if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
 318                        nodes_remap(tmp, pol->v.nodes,
 319                                        pol->w.cpuset_mems_allowed, *nodes);
 320                        pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
 321                } else if (step == MPOL_REBIND_STEP2) {
 322                        tmp = pol->w.cpuset_mems_allowed;
 323                        pol->w.cpuset_mems_allowed = *nodes;
 324                } else
 325                        BUG();
 326        }
 327
 328        if (nodes_empty(tmp))
 329                tmp = *nodes;
 330
 331        if (step == MPOL_REBIND_STEP1)
 332                nodes_or(pol->v.nodes, pol->v.nodes, tmp);
 333        else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
 334                pol->v.nodes = tmp;
 335        else
 336                BUG();
 337
 338        if (!node_isset(current->il_next, tmp)) {
 339                current->il_next = next_node(current->il_next, tmp);
 340                if (current->il_next >= MAX_NUMNODES)
 341                        current->il_next = first_node(tmp);
 342                if (current->il_next >= MAX_NUMNODES)
 343                        current->il_next = numa_node_id();
 344        }
 345}
 346
 347static void mpol_rebind_preferred(struct mempolicy *pol,
 348                                  const nodemask_t *nodes,
 349                                  enum mpol_rebind_step step)
 350{
 351        nodemask_t tmp;
 352
 353        if (pol->flags & MPOL_F_STATIC_NODES) {
 354                int node = first_node(pol->w.user_nodemask);
 355
 356                if (node_isset(node, *nodes)) {
 357                        pol->v.preferred_node = node;
 358                        pol->flags &= ~MPOL_F_LOCAL;
 359                } else
 360                        pol->flags |= MPOL_F_LOCAL;
 361        } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 362                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 363                pol->v.preferred_node = first_node(tmp);
 364        } else if (!(pol->flags & MPOL_F_LOCAL)) {
 365                pol->v.preferred_node = node_remap(pol->v.preferred_node,
 366                                                   pol->w.cpuset_mems_allowed,
 367                                                   *nodes);
 368                pol->w.cpuset_mems_allowed = *nodes;
 369        }
 370}
 371
 372/*
 373 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 374 *
 375 * If read-side task has no lock to protect task->mempolicy, write-side
 376 * task will rebind the task->mempolicy by two step. The first step is
 377 * setting all the newly nodes, and the second step is cleaning all the
 378 * disallowed nodes. In this way, we can avoid finding no node to alloc
 379 * page.
 380 * If we have a lock to protect task->mempolicy in read-side, we do
 381 * rebind directly.
 382 *
 383 * step:
 384 *      MPOL_REBIND_ONCE  - do rebind work at once
 385 *      MPOL_REBIND_STEP1 - set all the newly nodes
 386 *      MPOL_REBIND_STEP2 - clean all the disallowed nodes
 387 */
 388static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
 389                                enum mpol_rebind_step step)
 390{
 391        if (!pol)
 392                return;
 393        if (!mpol_store_user_nodemask(pol) && step == 0 &&
 394            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 395                return;
 396
 397        if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
 398                return;
 399
 400        if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
 401                BUG();
 402
 403        if (step == MPOL_REBIND_STEP1)
 404                pol->flags |= MPOL_F_REBINDING;
 405        else if (step == MPOL_REBIND_STEP2)
 406                pol->flags &= ~MPOL_F_REBINDING;
 407        else if (step >= MPOL_REBIND_NSTEP)
 408                BUG();
 409
 410        mpol_ops[pol->mode].rebind(pol, newmask, step);
 411}
 412
 413/*
 414 * Wrapper for mpol_rebind_policy() that just requires task
 415 * pointer, and updates task mempolicy.
 416 *
 417 * Called with task's alloc_lock held.
 418 */
 419
 420void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
 421                        enum mpol_rebind_step step)
 422{
 423        mpol_rebind_policy(tsk->mempolicy, new, step);
 424}
 425
 426/*
 427 * Rebind each vma in mm to new nodemask.
 428 *
 429 * Call holding a reference to mm.  Takes mm->mmap_sem during call.
 430 */
 431
 432void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 433{
 434        struct vm_area_struct *vma;
 435
 436        down_write(&mm->mmap_sem);
 437        for (vma = mm->mmap; vma; vma = vma->vm_next)
 438                mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
 439        up_write(&mm->mmap_sem);
 440}
 441
 442static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 443        [MPOL_DEFAULT] = {
 444                .rebind = mpol_rebind_default,
 445        },
 446        [MPOL_INTERLEAVE] = {
 447                .create = mpol_new_interleave,
 448                .rebind = mpol_rebind_nodemask,
 449        },
 450        [MPOL_PREFERRED] = {
 451                .create = mpol_new_preferred,
 452                .rebind = mpol_rebind_preferred,
 453        },
 454        [MPOL_BIND] = {
 455                .create = mpol_new_bind,
 456                .rebind = mpol_rebind_nodemask,
 457        },
 458};
 459
 460static void migrate_page_add(struct page *page, struct list_head *pagelist,
 461                                unsigned long flags);
 462
 463/* Scan through pages checking if pages follow certain conditions. */
 464static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 465                unsigned long addr, unsigned long end,
 466                const nodemask_t *nodes, unsigned long flags,
 467                void *private)
 468{
 469        pte_t *orig_pte;
 470        pte_t *pte;
 471        spinlock_t *ptl;
 472
 473        orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
 474        do {
 475                struct page *page;
 476                int nid;
 477
 478                if (!pte_present(*pte))
 479                        continue;
 480                page = vm_normal_page(vma, addr, *pte);
 481                if (!page)
 482                        continue;
 483                /*
 484                 * vm_normal_page() filters out zero pages, but there might
 485                 * still be PageReserved pages to skip, perhaps in a VDSO.
 486                 * And we cannot move PageKsm pages sensibly or safely yet.
 487                 */
 488                if (PageReserved(page) || PageKsm(page))
 489                        continue;
 490                nid = page_to_nid(page);
 491                if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
 492                        continue;
 493
 494                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
 495                        migrate_page_add(page, private, flags);
 496                else
 497                        break;
 498        } while (pte++, addr += PAGE_SIZE, addr != end);
 499        pte_unmap_unlock(orig_pte, ptl);
 500        return addr != end;
 501}
 502
 503static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud,
 504                unsigned long addr, unsigned long end,
 505                const nodemask_t *nodes, unsigned long flags,
 506                void *private)
 507{
 508        pmd_t *pmd;
 509        unsigned long next;
 510
 511        pmd = pmd_offset(pud, addr);
 512        do {
 513                next = pmd_addr_end(addr, end);
 514                split_huge_page_pmd(vma->vm_mm, pmd);
 515                if (pmd_none_or_clear_bad(pmd))
 516                        continue;
 517                if (check_pte_range(vma, pmd, addr, next, nodes,
 518                                    flags, private))
 519                        return -EIO;
 520        } while (pmd++, addr = next, addr != end);
 521        return 0;
 522}
 523
 524static inline int check_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
 525                unsigned long addr, unsigned long end,
 526                const nodemask_t *nodes, unsigned long flags,
 527                void *private)
 528{
 529        pud_t *pud;
 530        unsigned long next;
 531
 532        pud = pud_offset(pgd, addr);
 533        do {
 534                next = pud_addr_end(addr, end);
 535                if (pud_none_or_clear_bad(pud))
 536                        continue;
 537                if (check_pmd_range(vma, pud, addr, next, nodes,
 538                                    flags, private))
 539                        return -EIO;
 540        } while (pud++, addr = next, addr != end);
 541        return 0;
 542}
 543
 544static inline int check_pgd_range(struct vm_area_struct *vma,
 545                unsigned long addr, unsigned long end,
 546                const nodemask_t *nodes, unsigned long flags,
 547                void *private)
 548{
 549        pgd_t *pgd;
 550        unsigned long next;
 551
 552        pgd = pgd_offset(vma->vm_mm, addr);
 553        do {
 554                next = pgd_addr_end(addr, end);
 555                if (pgd_none_or_clear_bad(pgd))
 556                        continue;
 557                if (check_pud_range(vma, pgd, addr, next, nodes,
 558                                    flags, private))
 559                        return -EIO;
 560        } while (pgd++, addr = next, addr != end);
 561        return 0;
 562}
 563
 564/*
 565 * Check if all pages in a range are on a set of nodes.
 566 * If pagelist != NULL then isolate pages from the LRU and
 567 * put them on the pagelist.
 568 */
 569static struct vm_area_struct *
 570check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 571                const nodemask_t *nodes, unsigned long flags, void *private)
 572{
 573        int err;
 574        struct vm_area_struct *first, *vma, *prev;
 575
 576
 577        first = find_vma(mm, start);
 578        if (!first)
 579                return ERR_PTR(-EFAULT);
 580        prev = NULL;
 581        for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
 582                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
 583                        if (!vma->vm_next && vma->vm_end < end)
 584                                return ERR_PTR(-EFAULT);
 585                        if (prev && prev->vm_end < vma->vm_start)
 586                                return ERR_PTR(-EFAULT);
 587                }
 588                if (!is_vm_hugetlb_page(vma) &&
 589                    ((flags & MPOL_MF_STRICT) ||
 590                     ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
 591                                vma_migratable(vma)))) {
 592                        unsigned long endvma = vma->vm_end;
 593
 594                        if (endvma > end)
 595                                endvma = end;
 596                        if (vma->vm_start > start)
 597                                start = vma->vm_start;
 598                        err = check_pgd_range(vma, start, endvma, nodes,
 599                                                flags, private);
 600                        if (err) {
 601                                first = ERR_PTR(err);
 602                                break;
 603                        }
 604                }
 605                prev = vma;
 606        }
 607        return first;
 608}
 609
 610/* Apply policy to a single VMA */
 611static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
 612{
 613        int err = 0;
 614        struct mempolicy *old = vma->vm_policy;
 615
 616        pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 617                 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 618                 vma->vm_ops, vma->vm_file,
 619                 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 620
 621        if (vma->vm_ops && vma->vm_ops->set_policy)
 622                err = vma->vm_ops->set_policy(vma, new);
 623        if (!err) {
 624                mpol_get(new);
 625                vma->vm_policy = new;
 626                mpol_put(old);
 627        }
 628        return err;
 629}
 630
 631/* Step 2: apply policy to a range and do splits. */
 632static int mbind_range(struct mm_struct *mm, unsigned long start,
 633                       unsigned long end, struct mempolicy *new_pol)
 634{
 635        struct vm_area_struct *next;
 636        struct vm_area_struct *prev;
 637        struct vm_area_struct *vma;
 638        int err = 0;
 639        unsigned long vmstart;
 640        unsigned long vmend;
 641
 642        vma = find_vma_prev(mm, start, &prev);
 643        if (!vma || vma->vm_start > start)
 644                return -EFAULT;
 645
 646        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 647                next = vma->vm_next;
 648                vmstart = max(start, vma->vm_start);
 649                vmend   = min(end, vma->vm_end);
 650
 651                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 652                                  vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 653                                  new_pol);
 654                if (prev) {
 655                        vma = prev;
 656                        next = vma->vm_next;
 657                        continue;
 658                }
 659                if (vma->vm_start != vmstart) {
 660                        err = split_vma(vma->vm_mm, vma, vmstart, 1);
 661                        if (err)
 662                                goto out;
 663                }
 664                if (vma->vm_end != vmend) {
 665                        err = split_vma(vma->vm_mm, vma, vmend, 0);
 666                        if (err)
 667                                goto out;
 668                }
 669                err = policy_vma(vma, new_pol);
 670                if (err)
 671                        goto out;
 672        }
 673
 674 out:
 675        return err;
 676}
 677
 678/*
 679 * Update task->flags PF_MEMPOLICY bit: set iff non-default
 680 * mempolicy.  Allows more rapid checking of this (combined perhaps
 681 * with other PF_* flag bits) on memory allocation hot code paths.
 682 *
 683 * If called from outside this file, the task 'p' should -only- be
 684 * a newly forked child not yet visible on the task list, because
 685 * manipulating the task flags of a visible task is not safe.
 686 *
 687 * The above limitation is why this routine has the funny name
 688 * mpol_fix_fork_child_flag().
 689 *
 690 * It is also safe to call this with a task pointer of current,
 691 * which the static wrapper mpol_set_task_struct_flag() does,
 692 * for use within this file.
 693 */
 694
 695void mpol_fix_fork_child_flag(struct task_struct *p)
 696{
 697        if (p->mempolicy)
 698                p->flags |= PF_MEMPOLICY;
 699        else
 700                p->flags &= ~PF_MEMPOLICY;
 701}
 702
 703static void mpol_set_task_struct_flag(void)
 704{
 705        mpol_fix_fork_child_flag(current);
 706}
 707
 708/* Set the process memory policy */
 709static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 710                             nodemask_t *nodes)
 711{
 712        struct mempolicy *new, *old;
 713        struct mm_struct *mm = current->mm;
 714        NODEMASK_SCRATCH(scratch);
 715        int ret;
 716
 717        if (!scratch)
 718                return -ENOMEM;
 719
 720        new = mpol_new(mode, flags, nodes);
 721        if (IS_ERR(new)) {
 722                ret = PTR_ERR(new);
 723                goto out;
 724        }
 725        /*
 726         * prevent changing our mempolicy while show_numa_maps()
 727         * is using it.
 728         * Note:  do_set_mempolicy() can be called at init time
 729         * with no 'mm'.
 730         */
 731        if (mm)
 732                down_write(&mm->mmap_sem);
 733        task_lock(current);
 734        ret = mpol_set_nodemask(new, nodes, scratch);
 735        if (ret) {
 736                task_unlock(current);
 737                if (mm)
 738                        up_write(&mm->mmap_sem);
 739                mpol_put(new);
 740                goto out;
 741        }
 742        old = current->mempolicy;
 743        current->mempolicy = new;
 744        mpol_set_task_struct_flag();
 745        if (new && new->mode == MPOL_INTERLEAVE &&
 746            nodes_weight(new->v.nodes))
 747                current->il_next = first_node(new->v.nodes);
 748        task_unlock(current);
 749        if (mm)
 750                up_write(&mm->mmap_sem);
 751
 752        mpol_put(old);
 753        ret = 0;
 754out:
 755        NODEMASK_SCRATCH_FREE(scratch);
 756        return ret;
 757}
 758
 759/*
 760 * Return nodemask for policy for get_mempolicy() query
 761 *
 762 * Called with task's alloc_lock held
 763 */
 764static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 765{
 766        nodes_clear(*nodes);
 767        if (p == &default_policy)
 768                return;
 769
 770        switch (p->mode) {
 771        case MPOL_BIND:
 772                /* Fall through */
 773        case MPOL_INTERLEAVE:
 774                *nodes = p->v.nodes;
 775                break;
 776        case MPOL_PREFERRED:
 777                if (!(p->flags & MPOL_F_LOCAL))
 778                        node_set(p->v.preferred_node, *nodes);
 779                /* else return empty node mask for local allocation */
 780                break;
 781        default:
 782                BUG();
 783        }
 784}
 785
 786static int lookup_node(struct mm_struct *mm, unsigned long addr)
 787{
 788        struct page *p;
 789        int err;
 790
 791        err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
 792        if (err >= 0) {
 793                err = page_to_nid(p);
 794                put_page(p);
 795        }
 796        return err;
 797}
 798
 799/* Retrieve NUMA policy */
 800static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 801                             unsigned long addr, unsigned long flags)
 802{
 803        int err;
 804        struct mm_struct *mm = current->mm;
 805        struct vm_area_struct *vma = NULL;
 806        struct mempolicy *pol = current->mempolicy;
 807
 808        if (flags &
 809                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 810                return -EINVAL;
 811
 812        if (flags & MPOL_F_MEMS_ALLOWED) {
 813                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 814                        return -EINVAL;
 815                *policy = 0;    /* just so it's initialized */
 816                task_lock(current);
 817                *nmask  = cpuset_current_mems_allowed;
 818                task_unlock(current);
 819                return 0;
 820        }
 821
 822        if (flags & MPOL_F_ADDR) {
 823                /*
 824                 * Do NOT fall back to task policy if the
 825                 * vma/shared policy at addr is NULL.  We
 826                 * want to return MPOL_DEFAULT in this case.
 827                 */
 828                down_read(&mm->mmap_sem);
 829                vma = find_vma_intersection(mm, addr, addr+1);
 830                if (!vma) {
 831                        up_read(&mm->mmap_sem);
 832                        return -EFAULT;
 833                }
 834                if (vma->vm_ops && vma->vm_ops->get_policy)
 835                        pol = vma->vm_ops->get_policy(vma, addr);
 836                else
 837                        pol = vma->vm_policy;
 838        } else if (addr)
 839                return -EINVAL;
 840
 841        if (!pol)
 842                pol = &default_policy;  /* indicates default behavior */
 843
 844        if (flags & MPOL_F_NODE) {
 845                if (flags & MPOL_F_ADDR) {
 846                        err = lookup_node(mm, addr);
 847                        if (err < 0)
 848                                goto out;
 849                        *policy = err;
 850                } else if (pol == current->mempolicy &&
 851                                pol->mode == MPOL_INTERLEAVE) {
 852                        *policy = current->il_next;
 853                } else {
 854                        err = -EINVAL;
 855                        goto out;
 856                }
 857        } else {
 858                *policy = pol == &default_policy ? MPOL_DEFAULT :
 859                                                pol->mode;
 860                /*
 861                 * Internal mempolicy flags must be masked off before exposing
 862                 * the policy to userspace.
 863                 */
 864                *policy |= (pol->flags & MPOL_MODE_FLAGS);
 865        }
 866
 867        if (vma) {
 868                up_read(&current->mm->mmap_sem);
 869                vma = NULL;
 870        }
 871
 872        err = 0;
 873        if (nmask) {
 874                if (mpol_store_user_nodemask(pol)) {
 875                        *nmask = pol->w.user_nodemask;
 876                } else {
 877                        task_lock(current);
 878                        get_policy_nodemask(pol, nmask);
 879                        task_unlock(current);
 880                }
 881        }
 882
 883 out:
 884        mpol_cond_put(pol);
 885        if (vma)
 886                up_read(&current->mm->mmap_sem);
 887        return err;
 888}
 889
 890#ifdef CONFIG_MIGRATION
 891/*
 892 * page migration
 893 */
 894static void migrate_page_add(struct page *page, struct list_head *pagelist,
 895                                unsigned long flags)
 896{
 897        /*
 898         * Avoid migrating a page that is shared with others.
 899         */
 900        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
 901                if (!isolate_lru_page(page)) {
 902                        list_add_tail(&page->lru, pagelist);
 903                        inc_zone_page_state(page, NR_ISOLATED_ANON +
 904                                            page_is_file_cache(page));
 905                }
 906        }
 907}
 908
 909static struct page *new_node_page(struct page *page, unsigned long node, int **x)
 910{
 911        return alloc_pages_exact_node(node, GFP_HIGHUSER_MOVABLE, 0);
 912}
 913
 914/*
 915 * Migrate pages from one node to a target node.
 916 * Returns error or the number of pages not migrated.
 917 */
 918static int migrate_to_node(struct mm_struct *mm, int source, int dest,
 919                           int flags)
 920{
 921        nodemask_t nmask;
 922        LIST_HEAD(pagelist);
 923        int err = 0;
 924        struct vm_area_struct *vma;
 925
 926        nodes_clear(nmask);
 927        node_set(source, nmask);
 928
 929        vma = check_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
 930                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
 931        if (IS_ERR(vma))
 932                return PTR_ERR(vma);
 933
 934        if (!list_empty(&pagelist)) {
 935                err = migrate_pages(&pagelist, new_node_page, dest,
 936                                                                false, true);
 937                if (err)
 938                        putback_lru_pages(&pagelist);
 939        }
 940
 941        return err;
 942}
 943
 944/*
 945 * Move pages between the two nodesets so as to preserve the physical
 946 * layout as much as possible.
 947 *
 948 * Returns the number of page that could not be moved.
 949 */
 950int do_migrate_pages(struct mm_struct *mm,
 951        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
 952{
 953        int busy = 0;
 954        int err;
 955        nodemask_t tmp;
 956
 957        err = migrate_prep();
 958        if (err)
 959                return err;
 960
 961        down_read(&mm->mmap_sem);
 962
 963        err = migrate_vmas(mm, from_nodes, to_nodes, flags);
 964        if (err)
 965                goto out;
 966
 967        /*
 968         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
 969         * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
 970         * bit in 'tmp', and return that <source, dest> pair for migration.
 971         * The pair of nodemasks 'to' and 'from' define the map.
 972         *
 973         * If no pair of bits is found that way, fallback to picking some
 974         * pair of 'source' and 'dest' bits that are not the same.  If the
 975         * 'source' and 'dest' bits are the same, this represents a node
 976         * that will be migrating to itself, so no pages need move.
 977         *
 978         * If no bits are left in 'tmp', or if all remaining bits left
 979         * in 'tmp' correspond to the same bit in 'to', return false
 980         * (nothing left to migrate).
 981         *
 982         * This lets us pick a pair of nodes to migrate between, such that
 983         * if possible the dest node is not already occupied by some other
 984         * source node, minimizing the risk of overloading the memory on a
 985         * node that would happen if we migrated incoming memory to a node
 986         * before migrating outgoing memory source that same node.
 987         *
 988         * A single scan of tmp is sufficient.  As we go, we remember the
 989         * most recent <s, d> pair that moved (s != d).  If we find a pair
 990         * that not only moved, but what's better, moved to an empty slot
 991         * (d is not set in tmp), then we break out then, with that pair.
 992         * Otherwise when we finish scanning from_tmp, we at least have the
 993         * most recent <s, d> pair that moved.  If we get all the way through
 994         * the scan of tmp without finding any node that moved, much less
 995         * moved to an empty node, then there is nothing left worth migrating.
 996         */
 997
 998        tmp = *from_nodes;
 999        while (!nodes_empty(tmp)) {
1000                int s,d;
1001                int source = -1;
1002                int dest = 0;
1003
1004                for_each_node_mask(s, tmp) {
1005                        d = node_remap(s, *from_nodes, *to_nodes);
1006                        if (s == d)
1007                                continue;
1008
1009                        source = s;     /* Node moved. Memorize */
1010                        dest = d;
1011
1012                        /* dest not in remaining from nodes? */
1013                        if (!node_isset(dest, tmp))
1014                                break;
1015                }
1016                if (source == -1)
1017                        break;
1018
1019                node_clear(source, tmp);
1020                err = migrate_to_node(mm, source, dest, flags);
1021                if (err > 0)
1022                        busy += err;
1023                if (err < 0)
1024                        break;
1025        }
1026out:
1027        up_read(&mm->mmap_sem);
1028        if (err < 0)
1029                return err;
1030        return busy;
1031
1032}
1033
1034/*
1035 * Allocate a new page for page migration based on vma policy.
1036 * Start assuming that page is mapped by vma pointed to by @private.
1037 * Search forward from there, if not.  N.B., this assumes that the
1038 * list of pages handed to migrate_pages()--which is how we get here--
1039 * is in virtual address order.
1040 */
1041static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1042{
1043        struct vm_area_struct *vma = (struct vm_area_struct *)private;
1044        unsigned long uninitialized_var(address);
1045
1046        while (vma) {
1047                address = page_address_in_vma(page, vma);
1048                if (address != -EFAULT)
1049                        break;
1050                vma = vma->vm_next;
1051        }
1052
1053        /*
1054         * if !vma, alloc_page_vma() will use task or system default policy
1055         */
1056        return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1057}
1058#else
1059
1060static void migrate_page_add(struct page *page, struct list_head *pagelist,
1061                                unsigned long flags)
1062{
1063}
1064
1065int do_migrate_pages(struct mm_struct *mm,
1066        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
1067{
1068        return -ENOSYS;
1069}
1070
1071static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
1072{
1073        return NULL;
1074}
1075#endif
1076
1077static long do_mbind(unsigned long start, unsigned long len,
1078                     unsigned short mode, unsigned short mode_flags,
1079                     nodemask_t *nmask, unsigned long flags)
1080{
1081        struct vm_area_struct *vma;
1082        struct mm_struct *mm = current->mm;
1083        struct mempolicy *new;
1084        unsigned long end;
1085        int err;
1086        LIST_HEAD(pagelist);
1087
1088        if (flags & ~(unsigned long)(MPOL_MF_STRICT |
1089                                     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1090                return -EINVAL;
1091        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1092                return -EPERM;
1093
1094        if (start & ~PAGE_MASK)
1095                return -EINVAL;
1096
1097        if (mode == MPOL_DEFAULT)
1098                flags &= ~MPOL_MF_STRICT;
1099
1100        len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1101        end = start + len;
1102
1103        if (end < start)
1104                return -EINVAL;
1105        if (end == start)
1106                return 0;
1107
1108        new = mpol_new(mode, mode_flags, nmask);
1109        if (IS_ERR(new))
1110                return PTR_ERR(new);
1111
1112        /*
1113         * If we are using the default policy then operation
1114         * on discontinuous address spaces is okay after all
1115         */
1116        if (!new)
1117                flags |= MPOL_MF_DISCONTIG_OK;
1118
1119        pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1120                 start, start + len, mode, mode_flags,
1121                 nmask ? nodes_addr(*nmask)[0] : -1);
1122
1123        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1124
1125                err = migrate_prep();
1126                if (err)
1127                        goto mpol_out;
1128        }
1129        {
1130                NODEMASK_SCRATCH(scratch);
1131                if (scratch) {
1132                        down_write(&mm->mmap_sem);
1133                        task_lock(current);
1134                        err = mpol_set_nodemask(new, nmask, scratch);
1135                        task_unlock(current);
1136                        if (err)
1137                                up_write(&mm->mmap_sem);
1138                } else
1139                        err = -ENOMEM;
1140                NODEMASK_SCRATCH_FREE(scratch);
1141        }
1142        if (err)
1143                goto mpol_out;
1144
1145        vma = check_range(mm, start, end, nmask,
1146                          flags | MPOL_MF_INVERT, &pagelist);
1147
1148        err = PTR_ERR(vma);
1149        if (!IS_ERR(vma)) {
1150                int nr_failed = 0;
1151
1152                err = mbind_range(mm, start, end, new);
1153
1154                if (!list_empty(&pagelist)) {
1155                        nr_failed = migrate_pages(&pagelist, new_vma_page,
1156                                                (unsigned long)vma,
1157                                                false, true);
1158                        if (nr_failed)
1159                                putback_lru_pages(&pagelist);
1160                }
1161
1162                if (!err && nr_failed && (flags & MPOL_MF_STRICT))
1163                        err = -EIO;
1164        } else
1165                putback_lru_pages(&pagelist);
1166
1167        up_write(&mm->mmap_sem);
1168 mpol_out:
1169        mpol_put(new);
1170        return err;
1171}
1172
1173/*
1174 * User space interface with variable sized bitmaps for nodelists.
1175 */
1176
1177/* Copy a node mask from user space. */
1178static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1179                     unsigned long maxnode)
1180{
1181        unsigned long k;
1182        unsigned long nlongs;
1183        unsigned long endmask;
1184
1185        --maxnode;
1186        nodes_clear(*nodes);
1187        if (maxnode == 0 || !nmask)
1188                return 0;
1189        if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1190                return -EINVAL;
1191
1192        nlongs = BITS_TO_LONGS(maxnode);
1193        if ((maxnode % BITS_PER_LONG) == 0)
1194                endmask = ~0UL;
1195        else
1196                endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1197
1198        /* When the user specified more nodes than supported just check
1199           if the non supported part is all zero. */
1200        if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1201                if (nlongs > PAGE_SIZE/sizeof(long))
1202                        return -EINVAL;
1203                for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1204                        unsigned long t;
1205                        if (get_user(t, nmask + k))
1206                                return -EFAULT;
1207                        if (k == nlongs - 1) {
1208                                if (t & endmask)
1209                                        return -EINVAL;
1210                        } else if (t)
1211                                return -EINVAL;
1212                }
1213                nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1214                endmask = ~0UL;
1215        }
1216
1217        if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1218                return -EFAULT;
1219        nodes_addr(*nodes)[nlongs-1] &= endmask;
1220        return 0;
1221}
1222
1223/* Copy a kernel node mask to user space */
1224static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1225                              nodemask_t *nodes)
1226{
1227        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1228        const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1229
1230        if (copy > nbytes) {
1231                if (copy > PAGE_SIZE)
1232                        return -EINVAL;
1233                if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1234                        return -EFAULT;
1235                copy = nbytes;
1236        }
1237        return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1238}
1239
1240SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1241                unsigned long, mode, unsigned long __user *, nmask,
1242                unsigned long, maxnode, unsigned, flags)
1243{
1244        nodemask_t nodes;
1245        int err;
1246        unsigned short mode_flags;
1247
1248        mode_flags = mode & MPOL_MODE_FLAGS;
1249        mode &= ~MPOL_MODE_FLAGS;
1250        if (mode >= MPOL_MAX)
1251                return -EINVAL;
1252        if ((mode_flags & MPOL_F_STATIC_NODES) &&
1253            (mode_flags & MPOL_F_RELATIVE_NODES))
1254                return -EINVAL;
1255        err = get_nodes(&nodes, nmask, maxnode);
1256        if (err)
1257                return err;
1258        return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1259}
1260
1261/* Set the process memory policy */
1262SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
1263                unsigned long, maxnode)
1264{
1265        int err;
1266        nodemask_t nodes;
1267        unsigned short flags;
1268
1269        flags = mode & MPOL_MODE_FLAGS;
1270        mode &= ~MPOL_MODE_FLAGS;
1271        if ((unsigned int)mode >= MPOL_MAX)
1272                return -EINVAL;
1273        if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1274                return -EINVAL;
1275        err = get_nodes(&nodes, nmask, maxnode);
1276        if (err)
1277                return err;
1278        return do_set_mempolicy(mode, flags, &nodes);
1279}
1280
1281SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1282                const unsigned long __user *, old_nodes,
1283                const unsigned long __user *, new_nodes)
1284{
1285        const struct cred *cred = current_cred(), *tcred;
1286        struct mm_struct *mm = NULL;
1287        struct task_struct *task;
1288        nodemask_t task_nodes;
1289        int err;
1290        nodemask_t *old;
1291        nodemask_t *new;
1292        NODEMASK_SCRATCH(scratch);
1293
1294        if (!scratch)
1295                return -ENOMEM;
1296
1297        old = &scratch->mask1;
1298        new = &scratch->mask2;
1299
1300        err = get_nodes(old, old_nodes, maxnode);
1301        if (err)
1302                goto out;
1303
1304        err = get_nodes(new, new_nodes, maxnode);
1305        if (err)
1306                goto out;
1307
1308        /* Find the mm_struct */
1309        rcu_read_lock();
1310        task = pid ? find_task_by_vpid(pid) : current;
1311        if (!task) {
1312                rcu_read_unlock();
1313                err = -ESRCH;
1314                goto out;
1315        }
1316        mm = get_task_mm(task);
1317        rcu_read_unlock();
1318
1319        err = -EINVAL;
1320        if (!mm)
1321                goto out;
1322
1323        /*
1324         * Check if this process has the right to modify the specified
1325         * process. The right exists if the process has administrative
1326         * capabilities, superuser privileges or the same
1327         * userid as the target process.
1328         */
1329        rcu_read_lock();
1330        tcred = __task_cred(task);
1331        if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1332            cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
1333            !capable(CAP_SYS_NICE)) {
1334                rcu_read_unlock();
1335                err = -EPERM;
1336                goto out;
1337        }
1338        rcu_read_unlock();
1339
1340        task_nodes = cpuset_mems_allowed(task);
1341        /* Is the user allowed to access the target nodes? */
1342        if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1343                err = -EPERM;
1344                goto out;
1345        }
1346
1347        if (!nodes_subset(*new, node_states[N_HIGH_MEMORY])) {
1348                err = -EINVAL;
1349                goto out;
1350        }
1351
1352        err = security_task_movememory(task);
1353        if (err)
1354                goto out;
1355
1356        err = do_migrate_pages(mm, old, new,
1357                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1358out:
1359        if (mm)
1360                mmput(mm);
1361        NODEMASK_SCRATCH_FREE(scratch);
1362
1363        return err;
1364}
1365
1366
1367/* Retrieve NUMA policy */
1368SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1369                unsigned long __user *, nmask, unsigned long, maxnode,
1370                unsigned long, addr, unsigned long, flags)
1371{
1372        int err;
1373        int uninitialized_var(pval);
1374        nodemask_t nodes;
1375
1376        if (nmask != NULL && maxnode < MAX_NUMNODES)
1377                return -EINVAL;
1378
1379        err = do_get_mempolicy(&pval, &nodes, addr, flags);
1380
1381        if (err)
1382                return err;
1383
1384        if (policy && put_user(pval, policy))
1385                return -EFAULT;
1386
1387        if (nmask)
1388                err = copy_nodes_to_user(nmask, maxnode, &nodes);
1389
1390        return err;
1391}
1392
1393#ifdef CONFIG_COMPAT
1394
1395asmlinkage long compat_sys_get_mempolicy(int __user *policy,
1396                                     compat_ulong_t __user *nmask,
1397                                     compat_ulong_t maxnode,
1398                                     compat_ulong_t addr, compat_ulong_t flags)
1399{
1400        long err;
1401        unsigned long __user *nm = NULL;
1402        unsigned long nr_bits, alloc_size;
1403        DECLARE_BITMAP(bm, MAX_NUMNODES);
1404
1405        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1406        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1407
1408        if (nmask)
1409                nm = compat_alloc_user_space(alloc_size);
1410
1411        err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1412
1413        if (!err && nmask) {
1414                unsigned long copy_size;
1415                copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1416                err = copy_from_user(bm, nm, copy_size);
1417                /* ensure entire bitmap is zeroed */
1418                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1419                err |= compat_put_bitmap(nmask, bm, nr_bits);
1420        }
1421
1422        return err;
1423}
1424
1425asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
1426                                     compat_ulong_t maxnode)
1427{
1428        long err = 0;
1429        unsigned long __user *nm = NULL;
1430        unsigned long nr_bits, alloc_size;
1431        DECLARE_BITMAP(bm, MAX_NUMNODES);
1432
1433        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1434        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1435
1436        if (nmask) {
1437                err = compat_get_bitmap(bm, nmask, nr_bits);
1438                nm = compat_alloc_user_space(alloc_size);
1439                err |= copy_to_user(nm, bm, alloc_size);
1440        }
1441
1442        if (err)
1443                return -EFAULT;
1444
1445        return sys_set_mempolicy(mode, nm, nr_bits+1);
1446}
1447
1448asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
1449                             compat_ulong_t mode, compat_ulong_t __user *nmask,
1450                             compat_ulong_t maxnode, compat_ulong_t flags)
1451{
1452        long err = 0;
1453        unsigned long __user *nm = NULL;
1454        unsigned long nr_bits, alloc_size;
1455        nodemask_t bm;
1456
1457        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1458        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1459
1460        if (nmask) {
1461                err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1462                nm = compat_alloc_user_space(alloc_size);
1463                err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1464        }
1465
1466        if (err)
1467                return -EFAULT;
1468
1469        return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1470}
1471
1472#endif
1473
1474/*
1475 * get_vma_policy(@task, @vma, @addr)
1476 * @task - task for fallback if vma policy == default
1477 * @vma   - virtual memory area whose policy is sought
1478 * @addr  - address in @vma for shared policy lookup
1479 *
1480 * Returns effective policy for a VMA at specified address.
1481 * Falls back to @task or system default policy, as necessary.
1482 * Current or other task's task mempolicy and non-shared vma policies
1483 * are protected by the task's mmap_sem, which must be held for read by
1484 * the caller.
1485 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1486 * count--added by the get_policy() vm_op, as appropriate--to protect against
1487 * freeing by another task.  It is the caller's responsibility to free the
1488 * extra reference for shared policies.
1489 */
1490struct mempolicy *get_vma_policy(struct task_struct *task,
1491                struct vm_area_struct *vma, unsigned long addr)
1492{
1493        struct mempolicy *pol = task->mempolicy;
1494
1495        if (vma) {
1496                if (vma->vm_ops && vma->vm_ops->get_policy) {
1497                        struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
1498                                                                        addr);
1499                        if (vpol)
1500                                pol = vpol;
1501                } else if (vma->vm_policy)
1502                        pol = vma->vm_policy;
1503        }
1504        if (!pol)
1505                pol = &default_policy;
1506        return pol;
1507}
1508
1509/*
1510 * Return a nodemask representing a mempolicy for filtering nodes for
1511 * page allocation
1512 */
1513static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1514{
1515        /* Lower zones don't get a nodemask applied for MPOL_BIND */
1516        if (unlikely(policy->mode == MPOL_BIND) &&
1517                        gfp_zone(gfp) >= policy_zone &&
1518                        cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1519                return &policy->v.nodes;
1520
1521        return NULL;
1522}
1523
1524/* Return a zonelist indicated by gfp for node representing a mempolicy */
1525static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1526        int nd)
1527{
1528        switch (policy->mode) {
1529        case MPOL_PREFERRED:
1530                if (!(policy->flags & MPOL_F_LOCAL))
1531                        nd = policy->v.preferred_node;
1532                break;
1533        case MPOL_BIND:
1534                /*
1535                 * Normally, MPOL_BIND allocations are node-local within the
1536                 * allowed nodemask.  However, if __GFP_THISNODE is set and the
1537                 * current node isn't part of the mask, we use the zonelist for
1538                 * the first node in the mask instead.
1539                 */
1540                if (unlikely(gfp & __GFP_THISNODE) &&
1541                                unlikely(!node_isset(nd, policy->v.nodes)))
1542                        nd = first_node(policy->v.nodes);
1543                break;
1544        default:
1545                BUG();
1546        }
1547        return node_zonelist(nd, gfp);
1548}
1549
1550/* Do dynamic interleaving for a process */
1551static unsigned interleave_nodes(struct mempolicy *policy)
1552{
1553        unsigned nid, next;
1554        struct task_struct *me = current;
1555
1556        nid = me->il_next;
1557        next = next_node(nid, policy->v.nodes);
1558        if (next >= MAX_NUMNODES)
1559                next = first_node(policy->v.nodes);
1560        if (next < MAX_NUMNODES)
1561                me->il_next = next;
1562        return nid;
1563}
1564
1565/*
1566 * Depending on the memory policy provide a node from which to allocate the
1567 * next slab entry.
1568 * @policy must be protected by freeing by the caller.  If @policy is
1569 * the current task's mempolicy, this protection is implicit, as only the
1570 * task can change it's policy.  The system default policy requires no
1571 * such protection.
1572 */
1573unsigned slab_node(struct mempolicy *policy)
1574{
1575        if (!policy || policy->flags & MPOL_F_LOCAL)
1576                return numa_node_id();
1577
1578        switch (policy->mode) {
1579        case MPOL_PREFERRED:
1580                /*
1581                 * handled MPOL_F_LOCAL above
1582                 */
1583                return policy->v.preferred_node;
1584
1585        case MPOL_INTERLEAVE:
1586                return interleave_nodes(policy);
1587
1588        case MPOL_BIND: {
1589                /*
1590                 * Follow bind policy behavior and start allocation at the
1591                 * first node.
1592                 */
1593                struct zonelist *zonelist;
1594                struct zone *zone;
1595                enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1596                zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
1597                (void)first_zones_zonelist(zonelist, highest_zoneidx,
1598                                                        &policy->v.nodes,
1599                                                        &zone);
1600                return zone ? zone->node : numa_node_id();
1601        }
1602
1603        default:
1604                BUG();
1605        }
1606}
1607
1608/* Do static interleaving for a VMA with known offset. */
1609static unsigned offset_il_node(struct mempolicy *pol,
1610                struct vm_area_struct *vma, unsigned long off)
1611{
1612        unsigned nnodes = nodes_weight(pol->v.nodes);
1613        unsigned target;
1614        int c;
1615        int nid = -1;
1616
1617        if (!nnodes)
1618                return numa_node_id();
1619        target = (unsigned int)off % nnodes;
1620        c = 0;
1621        do {
1622                nid = next_node(nid, pol->v.nodes);
1623                c++;
1624        } while (c <= target);
1625        return nid;
1626}
1627
1628/* Determine a node number for interleave */
1629static inline unsigned interleave_nid(struct mempolicy *pol,
1630                 struct vm_area_struct *vma, unsigned long addr, int shift)
1631{
1632        if (vma) {
1633                unsigned long off;
1634
1635                /*
1636                 * for small pages, there is no difference between
1637                 * shift and PAGE_SHIFT, so the bit-shift is safe.
1638                 * for huge pages, since vm_pgoff is in units of small
1639                 * pages, we need to shift off the always 0 bits to get
1640                 * a useful offset.
1641                 */
1642                BUG_ON(shift < PAGE_SHIFT);
1643                off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1644                off += (addr - vma->vm_start) >> shift;
1645                return offset_il_node(pol, vma, off);
1646        } else
1647                return interleave_nodes(pol);
1648}
1649
1650/*
1651 * Return the bit number of a random bit set in the nodemask.
1652 * (returns -1 if nodemask is empty)
1653 */
1654int node_random(const nodemask_t *maskp)
1655{
1656        int w, bit = -1;
1657
1658        w = nodes_weight(*maskp);
1659        if (w)
1660                bit = bitmap_ord_to_pos(maskp->bits,
1661                        get_random_int() % w, MAX_NUMNODES);
1662        return bit;
1663}
1664
1665#ifdef CONFIG_HUGETLBFS
1666/*
1667 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1668 * @vma = virtual memory area whose policy is sought
1669 * @addr = address in @vma for shared policy lookup and interleave policy
1670 * @gfp_flags = for requested zone
1671 * @mpol = pointer to mempolicy pointer for reference counted mempolicy
1672 * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
1673 *
1674 * Returns a zonelist suitable for a huge page allocation and a pointer
1675 * to the struct mempolicy for conditional unref after allocation.
1676 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1677 * @nodemask for filtering the zonelist.
1678 *
1679 * Must be protected by get_mems_allowed()
1680 */
1681struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1682                                gfp_t gfp_flags, struct mempolicy **mpol,
1683                                nodemask_t **nodemask)
1684{
1685        struct zonelist *zl;
1686
1687        *mpol = get_vma_policy(current, vma, addr);
1688        *nodemask = NULL;       /* assume !MPOL_BIND */
1689
1690        if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1691                zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1692                                huge_page_shift(hstate_vma(vma))), gfp_flags);
1693        } else {
1694                zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1695                if ((*mpol)->mode == MPOL_BIND)
1696                        *nodemask = &(*mpol)->v.nodes;
1697        }
1698        return zl;
1699}
1700
1701/*
1702 * init_nodemask_of_mempolicy
1703 *
1704 * If the current task's mempolicy is "default" [NULL], return 'false'
1705 * to indicate default policy.  Otherwise, extract the policy nodemask
1706 * for 'bind' or 'interleave' policy into the argument nodemask, or
1707 * initialize the argument nodemask to contain the single node for
1708 * 'preferred' or 'local' policy and return 'true' to indicate presence
1709 * of non-default mempolicy.
1710 *
1711 * We don't bother with reference counting the mempolicy [mpol_get/put]
1712 * because the current task is examining it's own mempolicy and a task's
1713 * mempolicy is only ever changed by the task itself.
1714 *
1715 * N.B., it is the caller's responsibility to free a returned nodemask.
1716 */
1717bool init_nodemask_of_mempolicy(nodemask_t *mask)
1718{
1719        struct mempolicy *mempolicy;
1720        int nid;
1721
1722        if (!(mask && current->mempolicy))
1723                return false;
1724
1725        task_lock(current);
1726        mempolicy = current->mempolicy;
1727        switch (mempolicy->mode) {
1728        case MPOL_PREFERRED:
1729                if (mempolicy->flags & MPOL_F_LOCAL)
1730                        nid = numa_node_id();
1731                else
1732                        nid = mempolicy->v.preferred_node;
1733                init_nodemask_of_node(mask, nid);
1734                break;
1735
1736        case MPOL_BIND:
1737                /* Fall through */
1738        case MPOL_INTERLEAVE:
1739                *mask =  mempolicy->v.nodes;
1740                break;
1741
1742        default:
1743                BUG();
1744        }
1745        task_unlock(current);
1746
1747        return true;
1748}
1749#endif
1750
1751/*
1752 * mempolicy_nodemask_intersects
1753 *
1754 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1755 * policy.  Otherwise, check for intersection between mask and the policy
1756 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
1757 * policy, always return true since it may allocate elsewhere on fallback.
1758 *
1759 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1760 */
1761bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1762                                        const nodemask_t *mask)
1763{
1764        struct mempolicy *mempolicy;
1765        bool ret = true;
1766
1767        if (!mask)
1768                return ret;
1769        task_lock(tsk);
1770        mempolicy = tsk->mempolicy;
1771        if (!mempolicy)
1772                goto out;
1773
1774        switch (mempolicy->mode) {
1775        case MPOL_PREFERRED:
1776                /*
1777                 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1778                 * allocate from, they may fallback to other nodes when oom.
1779                 * Thus, it's possible for tsk to have allocated memory from
1780                 * nodes in mask.
1781                 */
1782                break;
1783        case MPOL_BIND:
1784        case MPOL_INTERLEAVE:
1785                ret = nodes_intersects(mempolicy->v.nodes, *mask);
1786                break;
1787        default:
1788                BUG();
1789        }
1790out:
1791        task_unlock(tsk);
1792        return ret;
1793}
1794
1795/* Allocate a page in interleaved policy.
1796   Own path because it needs to do special accounting. */
1797static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1798                                        unsigned nid)
1799{
1800        struct zonelist *zl;
1801        struct page *page;
1802
1803        zl = node_zonelist(nid, gfp);
1804        page = __alloc_pages(gfp, order, zl);
1805        if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1806                inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1807        return page;
1808}
1809
1810/**
1811 *      alloc_pages_vma - Allocate a page for a VMA.
1812 *
1813 *      @gfp:
1814 *      %GFP_USER    user allocation.
1815 *      %GFP_KERNEL  kernel allocations,
1816 *      %GFP_HIGHMEM highmem/user allocations,
1817 *      %GFP_FS      allocation should not call back into a file system.
1818 *      %GFP_ATOMIC  don't sleep.
1819 *
1820 *      @order:Order of the GFP allocation.
1821 *      @vma:  Pointer to VMA or NULL if not available.
1822 *      @addr: Virtual Address of the allocation. Must be inside the VMA.
1823 *
1824 *      This function allocates a page from the kernel page pool and applies
1825 *      a NUMA policy associated with the VMA or the current process.
1826 *      When VMA is not NULL caller must hold down_read on the mmap_sem of the
1827 *      mm_struct of the VMA to prevent it from going away. Should be used for
1828 *      all allocations for pages that will be mapped into
1829 *      user space. Returns NULL when no page can be allocated.
1830 *
1831 *      Should be called with the mm_sem of the vma hold.
1832 */
1833struct page *
1834alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1835                unsigned long addr, int node)
1836{
1837        struct mempolicy *pol = get_vma_policy(current, vma, addr);
1838        struct zonelist *zl;
1839        struct page *page;
1840
1841        get_mems_allowed();
1842        if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1843                unsigned nid;
1844
1845                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1846                mpol_cond_put(pol);
1847                page = alloc_page_interleave(gfp, order, nid);
1848                put_mems_allowed();
1849                return page;
1850        }
1851        zl = policy_zonelist(gfp, pol, node);
1852        if (unlikely(mpol_needs_cond_ref(pol))) {
1853                /*
1854                 * slow path: ref counted shared policy
1855                 */
1856                struct page *page =  __alloc_pages_nodemask(gfp, order,
1857                                                zl, policy_nodemask(gfp, pol));
1858                __mpol_put(pol);
1859                put_mems_allowed();
1860                return page;
1861        }
1862        /*
1863         * fast path:  default or task policy
1864         */
1865        page = __alloc_pages_nodemask(gfp, order, zl,
1866                                      policy_nodemask(gfp, pol));
1867        put_mems_allowed();
1868        return page;
1869}
1870
1871/**
1872 *      alloc_pages_current - Allocate pages.
1873 *
1874 *      @gfp:
1875 *              %GFP_USER   user allocation,
1876 *              %GFP_KERNEL kernel allocation,
1877 *              %GFP_HIGHMEM highmem allocation,
1878 *              %GFP_FS     don't call back into a file system.
1879 *              %GFP_ATOMIC don't sleep.
1880 *      @order: Power of two of allocation size in pages. 0 is a single page.
1881 *
1882 *      Allocate a page from the kernel page pool.  When not in
1883 *      interrupt context and apply the current process NUMA policy.
1884 *      Returns NULL when no page can be allocated.
1885 *
1886 *      Don't call cpuset_update_task_memory_state() unless
1887 *      1) it's ok to take cpuset_sem (can WAIT), and
1888 *      2) allocating for current task (not interrupt).
1889 */
1890struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1891{
1892        struct mempolicy *pol = current->mempolicy;
1893        struct page *page;
1894
1895        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
1896                pol = &default_policy;
1897
1898        get_mems_allowed();
1899        /*
1900         * No reference counting needed for current->mempolicy
1901         * nor system default_policy
1902         */
1903        if (pol->mode == MPOL_INTERLEAVE)
1904                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1905        else
1906                page = __alloc_pages_nodemask(gfp, order,
1907                                policy_zonelist(gfp, pol, numa_node_id()),
1908                                policy_nodemask(gfp, pol));
1909        put_mems_allowed();
1910        return page;
1911}
1912EXPORT_SYMBOL(alloc_pages_current);
1913
1914/*
1915 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
1916 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
1917 * with the mems_allowed returned by cpuset_mems_allowed().  This
1918 * keeps mempolicies cpuset relative after its cpuset moves.  See
1919 * further kernel/cpuset.c update_nodemask().
1920 *
1921 * current's mempolicy may be rebinded by the other task(the task that changes
1922 * cpuset's mems), so we needn't do rebind work for current task.
1923 */
1924
1925/* Slow path of a mempolicy duplicate */
1926struct mempolicy *__mpol_dup(struct mempolicy *old)
1927{
1928        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
1929
1930        if (!new)
1931                return ERR_PTR(-ENOMEM);
1932
1933        /* task's mempolicy is protected by alloc_lock */
1934        if (old == current->mempolicy) {
1935                task_lock(current);
1936                *new = *old;
1937                task_unlock(current);
1938        } else
1939                *new = *old;
1940
1941        rcu_read_lock();
1942        if (current_cpuset_is_being_rebound()) {
1943                nodemask_t mems = cpuset_mems_allowed(current);
1944                if (new->flags & MPOL_F_REBINDING)
1945                        mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
1946                else
1947                        mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
1948        }
1949        rcu_read_unlock();
1950        atomic_set(&new->refcnt, 1);
1951        return new;
1952}
1953
1954/*
1955 * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
1956 * eliminate the * MPOL_F_* flags that require conditional ref and
1957 * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
1958 * after return.  Use the returned value.
1959 *
1960 * Allows use of a mempolicy for, e.g., multiple allocations with a single
1961 * policy lookup, even if the policy needs/has extra ref on lookup.
1962 * shmem_readahead needs this.
1963 */
1964struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
1965                                                struct mempolicy *frompol)
1966{
1967        if (!mpol_needs_cond_ref(frompol))
1968                return frompol;
1969
1970        *tompol = *frompol;
1971        tompol->flags &= ~MPOL_F_SHARED;        /* copy doesn't need unref */
1972        __mpol_put(frompol);
1973        return tompol;
1974}
1975
1976/* Slow path of a mempolicy comparison */
1977int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
1978{
1979        if (!a || !b)
1980                return 0;
1981        if (a->mode != b->mode)
1982                return 0;
1983        if (a->flags != b->flags)
1984                return 0;
1985        if (mpol_store_user_nodemask(a))
1986                if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
1987                        return 0;
1988
1989        switch (a->mode) {
1990        case MPOL_BIND:
1991                /* Fall through */
1992        case MPOL_INTERLEAVE:
1993                return nodes_equal(a->v.nodes, b->v.nodes);
1994        case MPOL_PREFERRED:
1995                return a->v.preferred_node == b->v.preferred_node;
1996        default:
1997                BUG();
1998                return 0;
1999        }
2000}
2001
2002/*
2003 * Shared memory backing store policy support.
2004 *
2005 * Remember policies even when nobody has shared memory mapped.
2006 * The policies are kept in Red-Black tree linked from the inode.
2007 * They are protected by the sp->lock spinlock, which should be held
2008 * for any accesses to the tree.
2009 */
2010
2011/* lookup first element intersecting start-end */
2012/* Caller holds sp->lock */
2013static struct sp_node *
2014sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2015{
2016        struct rb_node *n = sp->root.rb_node;
2017
2018        while (n) {
2019                struct sp_node *p = rb_entry(n, struct sp_node, nd);
2020
2021                if (start >= p->end)
2022                        n = n->rb_right;
2023                else if (end <= p->start)
2024                        n = n->rb_left;
2025                else
2026                        break;
2027        }
2028        if (!n)
2029                return NULL;
2030        for (;;) {
2031                struct sp_node *w = NULL;
2032                struct rb_node *prev = rb_prev(n);
2033                if (!prev)
2034                        break;
2035                w = rb_entry(prev, struct sp_node, nd);
2036                if (w->end <= start)
2037                        break;
2038                n = prev;
2039        }
2040        return rb_entry(n, struct sp_node, nd);
2041}
2042
2043/* Insert a new shared policy into the list. */
2044/* Caller holds sp->lock */
2045static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2046{
2047        struct rb_node **p = &sp->root.rb_node;
2048        struct rb_node *parent = NULL;
2049        struct sp_node *nd;
2050
2051        while (*p) {
2052                parent = *p;
2053                nd = rb_entry(parent, struct sp_node, nd);
2054                if (new->start < nd->start)
2055                        p = &(*p)->rb_left;
2056                else if (new->end > nd->end)
2057                        p = &(*p)->rb_right;
2058                else
2059                        BUG();
2060        }
2061        rb_link_node(&new->nd, parent, p);
2062        rb_insert_color(&new->nd, &sp->root);
2063        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2064                 new->policy ? new->policy->mode : 0);
2065}
2066
2067/* Find shared policy intersecting idx */
2068struct mempolicy *
2069mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2070{
2071        struct mempolicy *pol = NULL;
2072        struct sp_node *sn;
2073
2074        if (!sp->root.rb_node)
2075                return NULL;
2076        spin_lock(&sp->lock);
2077        sn = sp_lookup(sp, idx, idx+1);
2078        if (sn) {
2079                mpol_get(sn->policy);
2080                pol = sn->policy;
2081        }
2082        spin_unlock(&sp->lock);
2083        return pol;
2084}
2085
2086static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2087{
2088        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2089        rb_erase(&n->nd, &sp->root);
2090        mpol_put(n->policy);
2091        kmem_cache_free(sn_cache, n);
2092}
2093
2094static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2095                                struct mempolicy *pol)
2096{
2097        struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2098
2099        if (!n)
2100                return NULL;
2101        n->start = start;
2102        n->end = end;
2103        mpol_get(pol);
2104        pol->flags |= MPOL_F_SHARED;    /* for unref */
2105        n->policy = pol;
2106        return n;
2107}
2108
2109/* Replace a policy range. */
2110static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2111                                 unsigned long end, struct sp_node *new)
2112{
2113        struct sp_node *n, *new2 = NULL;
2114
2115restart:
2116        spin_lock(&sp->lock);
2117        n = sp_lookup(sp, start, end);
2118        /* Take care of old policies in the same range. */
2119        while (n && n->start < end) {
2120                struct rb_node *next = rb_next(&n->nd);
2121                if (n->start >= start) {
2122                        if (n->end <= end)
2123                                sp_delete(sp, n);
2124                        else
2125                                n->start = end;
2126                } else {
2127                        /* Old policy spanning whole new range. */
2128                        if (n->end > end) {
2129                                if (!new2) {
2130                                        spin_unlock(&sp->lock);
2131                                        new2 = sp_alloc(end, n->end, n->policy);
2132                                        if (!new2)
2133                                                return -ENOMEM;
2134                                        goto restart;
2135                                }
2136                                n->end = start;
2137                                sp_insert(sp, new2);
2138                                new2 = NULL;
2139                                break;
2140                        } else
2141                                n->end = start;
2142                }
2143                if (!next)
2144                        break;
2145                n = rb_entry(next, struct sp_node, nd);
2146        }
2147        if (new)
2148                sp_insert(sp, new);
2149        spin_unlock(&sp->lock);
2150        if (new2) {
2151                mpol_put(new2->policy);
2152                kmem_cache_free(sn_cache, new2);
2153        }
2154        return 0;
2155}
2156
2157/**
2158 * mpol_shared_policy_init - initialize shared policy for inode
2159 * @sp: pointer to inode shared policy
2160 * @mpol:  struct mempolicy to install
2161 *
2162 * Install non-NULL @mpol in inode's shared policy rb-tree.
2163 * On entry, the current task has a reference on a non-NULL @mpol.
2164 * This must be released on exit.
2165 * This is called at get_inode() calls and we can use GFP_KERNEL.
2166 */
2167void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2168{
2169        int ret;
2170
2171        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
2172        spin_lock_init(&sp->lock);
2173
2174        if (mpol) {
2175                struct vm_area_struct pvma;
2176                struct mempolicy *new;
2177                NODEMASK_SCRATCH(scratch);
2178
2179                if (!scratch)
2180                        goto put_mpol;
2181                /* contextualize the tmpfs mount point mempolicy */
2182                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2183                if (IS_ERR(new))
2184                        goto free_scratch; /* no valid nodemask intersection */
2185
2186                task_lock(current);
2187                ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2188                task_unlock(current);
2189                if (ret)
2190                        goto put_new;
2191
2192                /* Create pseudo-vma that contains just the policy */
2193                memset(&pvma, 0, sizeof(struct vm_area_struct));
2194                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
2195                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2196
2197put_new:
2198                mpol_put(new);                  /* drop initial ref */
2199free_scratch:
2200                NODEMASK_SCRATCH_FREE(scratch);
2201put_mpol:
2202                mpol_put(mpol); /* drop our incoming ref on sb mpol */
2203        }
2204}
2205
2206int mpol_set_shared_policy(struct shared_policy *info,
2207                        struct vm_area_struct *vma, struct mempolicy *npol)
2208{
2209        int err;
2210        struct sp_node *new = NULL;
2211        unsigned long sz = vma_pages(vma);
2212
2213        pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2214                 vma->vm_pgoff,
2215                 sz, npol ? npol->mode : -1,
2216                 npol ? npol->flags : -1,
2217                 npol ? nodes_addr(npol->v.nodes)[0] : -1);
2218
2219        if (npol) {
2220                new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2221                if (!new)
2222                        return -ENOMEM;
2223        }
2224        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2225        if (err && new)
2226                kmem_cache_free(sn_cache, new);
2227        return err;
2228}
2229
2230/* Free a backing policy store on inode delete. */
2231void mpol_free_shared_policy(struct shared_policy *p)
2232{
2233        struct sp_node *n;
2234        struct rb_node *next;
2235
2236        if (!p->root.rb_node)
2237                return;
2238        spin_lock(&p->lock);
2239        next = rb_first(&p->root);
2240        while (next) {
2241                n = rb_entry(next, struct sp_node, nd);
2242                next = rb_next(&n->nd);
2243                rb_erase(&n->nd, &p->root);
2244                mpol_put(n->policy);
2245                kmem_cache_free(sn_cache, n);
2246        }
2247        spin_unlock(&p->lock);
2248}
2249
2250/* assumes fs == KERNEL_DS */
2251void __init numa_policy_init(void)
2252{
2253        nodemask_t interleave_nodes;
2254        unsigned long largest = 0;
2255        int nid, prefer = 0;
2256
2257        policy_cache = kmem_cache_create("numa_policy",
2258                                         sizeof(struct mempolicy),
2259                                         0, SLAB_PANIC, NULL);
2260
2261        sn_cache = kmem_cache_create("shared_policy_node",
2262                                     sizeof(struct sp_node),
2263                                     0, SLAB_PANIC, NULL);
2264
2265        /*
2266         * Set interleaving policy for system init. Interleaving is only
2267         * enabled across suitably sized nodes (default is >= 16MB), or
2268         * fall back to the largest node if they're all smaller.
2269         */
2270        nodes_clear(interleave_nodes);
2271        for_each_node_state(nid, N_HIGH_MEMORY) {
2272                unsigned long total_pages = node_present_pages(nid);
2273
2274                /* Preserve the largest node */
2275                if (largest < total_pages) {
2276                        largest = total_pages;
2277                        prefer = nid;
2278                }
2279
2280                /* Interleave this node? */
2281                if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2282                        node_set(nid, interleave_nodes);
2283        }
2284
2285        /* All too small, use the largest */
2286        if (unlikely(nodes_empty(interleave_nodes)))
2287                node_set(prefer, interleave_nodes);
2288
2289        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2290                printk("numa_policy_init: interleaving failed\n");
2291}
2292
2293/* Reset policy of current process to default */
2294void numa_default_policy(void)
2295{
2296        do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2297}
2298
2299/*
2300 * Parse and format mempolicy from/to strings
2301 */
2302
2303/*
2304 * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
2305 * Used only for mpol_parse_str() and mpol_to_str()
2306 */
2307#define MPOL_LOCAL MPOL_MAX
2308static const char * const policy_modes[] =
2309{
2310        [MPOL_DEFAULT]    = "default",
2311        [MPOL_PREFERRED]  = "prefer",
2312        [MPOL_BIND]       = "bind",
2313        [MPOL_INTERLEAVE] = "interleave",
2314        [MPOL_LOCAL]      = "local"
2315};
2316
2317
2318#ifdef CONFIG_TMPFS
2319/**
2320 * mpol_parse_str - parse string to mempolicy
2321 * @str:  string containing mempolicy to parse
2322 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2323 * @no_context:  flag whether to "contextualize" the mempolicy
2324 *
2325 * Format of input:
2326 *      <mode>[=<flags>][:<nodelist>]
2327 *
2328 * if @no_context is true, save the input nodemask in w.user_nodemask in
2329 * the returned mempolicy.  This will be used to "clone" the mempolicy in
2330 * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
2331 * mount option.  Note that if 'static' or 'relative' mode flags were
2332 * specified, the input nodemask will already have been saved.  Saving
2333 * it again is redundant, but safe.
2334 *
2335 * On success, returns 0, else 1
2336 */
2337int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
2338{
2339        struct mempolicy *new = NULL;
2340        unsigned short mode;
2341        unsigned short uninitialized_var(mode_flags);
2342        nodemask_t nodes;
2343        char *nodelist = strchr(str, ':');
2344        char *flags = strchr(str, '=');
2345        int err = 1;
2346
2347        if (nodelist) {
2348                /* NUL-terminate mode or flags string */
2349                *nodelist++ = '\0';
2350                if (nodelist_parse(nodelist, nodes))
2351                        goto out;
2352                if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
2353                        goto out;
2354        } else
2355                nodes_clear(nodes);
2356
2357        if (flags)
2358                *flags++ = '\0';        /* terminate mode string */
2359
2360        for (mode = 0; mode <= MPOL_LOCAL; mode++) {
2361                if (!strcmp(str, policy_modes[mode])) {
2362                        break;
2363                }
2364        }
2365        if (mode > MPOL_LOCAL)
2366                goto out;
2367
2368        switch (mode) {
2369        case MPOL_PREFERRED:
2370                /*
2371                 * Insist on a nodelist of one node only
2372                 */
2373                if (nodelist) {
2374                        char *rest = nodelist;
2375                        while (isdigit(*rest))
2376                                rest++;
2377                        if (*rest)
2378                                goto out;
2379                }
2380                break;
2381        case MPOL_INTERLEAVE:
2382                /*
2383                 * Default to online nodes with memory if no nodelist
2384                 */
2385                if (!nodelist)
2386                        nodes = node_states[N_HIGH_MEMORY];
2387                break;
2388        case MPOL_LOCAL:
2389                /*
2390                 * Don't allow a nodelist;  mpol_new() checks flags
2391                 */
2392                if (nodelist)
2393                        goto out;
2394                mode = MPOL_PREFERRED;
2395                break;
2396        case MPOL_DEFAULT:
2397                /*
2398                 * Insist on a empty nodelist
2399                 */
2400                if (!nodelist)
2401                        err = 0;
2402                goto out;
2403        case MPOL_BIND:
2404                /*
2405                 * Insist on a nodelist
2406                 */
2407                if (!nodelist)
2408                        goto out;
2409        }
2410
2411        mode_flags = 0;
2412        if (flags) {
2413                /*
2414                 * Currently, we only support two mutually exclusive
2415                 * mode flags.
2416                 */
2417                if (!strcmp(flags, "static"))
2418                        mode_flags |= MPOL_F_STATIC_NODES;
2419                else if (!strcmp(flags, "relative"))
2420                        mode_flags |= MPOL_F_RELATIVE_NODES;
2421                else
2422                        goto out;
2423        }
2424
2425        new = mpol_new(mode, mode_flags, &nodes);
2426        if (IS_ERR(new))
2427                goto out;
2428
2429        if (no_context) {
2430                /* save for contextualization */
2431                new->w.user_nodemask = nodes;
2432        } else {
2433                int ret;
2434                NODEMASK_SCRATCH(scratch);
2435                if (scratch) {
2436                        task_lock(current);
2437                        ret = mpol_set_nodemask(new, &nodes, scratch);
2438                        task_unlock(current);
2439                } else
2440                        ret = -ENOMEM;
2441                NODEMASK_SCRATCH_FREE(scratch);
2442                if (ret) {
2443                        mpol_put(new);
2444                        goto out;
2445                }
2446        }
2447        err = 0;
2448
2449out:
2450        /* Restore string for error message */
2451        if (nodelist)
2452                *--nodelist = ':';
2453        if (flags)
2454                *--flags = '=';
2455        if (!err)
2456                *mpol = new;
2457        return err;
2458}
2459#endif /* CONFIG_TMPFS */
2460
2461/**
2462 * mpol_to_str - format a mempolicy structure for printing
2463 * @buffer:  to contain formatted mempolicy string
2464 * @maxlen:  length of @buffer
2465 * @pol:  pointer to mempolicy to be formatted
2466 * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
2467 *
2468 * Convert a mempolicy into a string.
2469 * Returns the number of characters in buffer (if positive)
2470 * or an error (negative)
2471 */
2472int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
2473{
2474        char *p = buffer;
2475        int l;
2476        nodemask_t nodes;
2477        unsigned short mode;
2478        unsigned short flags = pol ? pol->flags : 0;
2479
2480        /*
2481         * Sanity check:  room for longest mode, flag and some nodes
2482         */
2483        VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
2484
2485        if (!pol || pol == &default_policy)
2486                mode = MPOL_DEFAULT;
2487        else
2488                mode = pol->mode;
2489
2490        switch (mode) {
2491        case MPOL_DEFAULT:
2492                nodes_clear(nodes);
2493                break;
2494
2495        case MPOL_PREFERRED:
2496                nodes_clear(nodes);
2497                if (flags & MPOL_F_LOCAL)
2498                        mode = MPOL_LOCAL;      /* pseudo-policy */
2499                else
2500                        node_set(pol->v.preferred_node, nodes);
2501                break;
2502
2503        case MPOL_BIND:
2504                /* Fall through */
2505        case MPOL_INTERLEAVE:
2506                if (no_context)
2507                        nodes = pol->w.user_nodemask;
2508                else
2509                        nodes = pol->v.nodes;
2510                break;
2511
2512        default:
2513                BUG();
2514        }
2515
2516        l = strlen(policy_modes[mode]);
2517        if (buffer + maxlen < p + l + 1)
2518                return -ENOSPC;
2519
2520        strcpy(p, policy_modes[mode]);
2521        p += l;
2522
2523        if (flags & MPOL_MODE_FLAGS) {
2524                if (buffer + maxlen < p + 2)
2525                        return -ENOSPC;
2526                *p++ = '=';
2527
2528                /*
2529                 * Currently, the only defined flags are mutually exclusive
2530                 */
2531                if (flags & MPOL_F_STATIC_NODES)
2532                        p += snprintf(p, buffer + maxlen - p, "static");
2533                else if (flags & MPOL_F_RELATIVE_NODES)
2534                        p += snprintf(p, buffer + maxlen - p, "relative");
2535        }
2536
2537        if (!nodes_empty(nodes)) {
2538                if (buffer + maxlen < p + 2)
2539                        return -ENOSPC;
2540                *p++ = ':';
2541                p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
2542        }
2543        return p - buffer;
2544}
2545
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.