linux/kernel/cpuset.c
<<
>>
Prefs
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/list.h>
  37#include <linux/mempolicy.h>
  38#include <linux/mm.h>
  39#include <linux/memory.h>
  40#include <linux/export.h>
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/pagemap.h>
  44#include <linux/proc_fs.h>
  45#include <linux/rcupdate.h>
  46#include <linux/sched.h>
  47#include <linux/seq_file.h>
  48#include <linux/security.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/stat.h>
  52#include <linux/string.h>
  53#include <linux/time.h>
  54#include <linux/backing-dev.h>
  55#include <linux/sort.h>
  56
  57#include <asm/uaccess.h>
  58#include <linux/atomic.h>
  59#include <linux/mutex.h>
  60#include <linux/workqueue.h>
  61#include <linux/cgroup.h>
  62#include <linux/wait.h>
  63
  64/*
  65 * Tracks how many cpusets are currently defined in system.
  66 * When there is only one cpuset (the root cpuset) we can
  67 * short circuit some hooks.
  68 */
  69int number_of_cpusets __read_mostly;
  70
  71/* Forward declare cgroup structures */
  72struct cgroup_subsys cpuset_subsys;
  73struct cpuset;
  74
  75/* See "Frequency meter" comments, below. */
  76
  77struct fmeter {
  78        int cnt;                /* unprocessed events count */
  79        int val;                /* most recent output value */
  80        time_t time;            /* clock (secs) when val computed */
  81        spinlock_t lock;        /* guards read or write of above */
  82};
  83
  84struct cpuset {
  85        struct cgroup_subsys_state css;
  86
  87        unsigned long flags;            /* "unsigned long" so bitops work */
  88        cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
  89        nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
  90
  91        /*
  92         * This is old Memory Nodes tasks took on.
  93         *
  94         * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
  95         * - A new cpuset's old_mems_allowed is initialized when some
  96         *   task is moved into it.
  97         * - old_mems_allowed is used in cpuset_migrate_mm() when we change
  98         *   cpuset.mems_allowed and have tasks' nodemask updated, and
  99         *   then old_mems_allowed is updated to mems_allowed.
 100         */
 101        nodemask_t old_mems_allowed;
 102
 103        struct fmeter fmeter;           /* memory_pressure filter */
 104
 105        /*
 106         * Tasks are being attached to this cpuset.  Used to prevent
 107         * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
 108         */
 109        int attach_in_progress;
 110
 111        /* partition number for rebuild_sched_domains() */
 112        int pn;
 113
 114        /* for custom sched domain */
 115        int relax_domain_level;
 116};
 117
 118/* Retrieve the cpuset for a cgroup */
 119static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
 120{
 121        return container_of(cgroup_subsys_state(cgrp, cpuset_subsys_id),
 122                            struct cpuset, css);
 123}
 124
 125/* Retrieve the cpuset for a task */
 126static inline struct cpuset *task_cs(struct task_struct *task)
 127{
 128        return container_of(task_subsys_state(task, cpuset_subsys_id),
 129                            struct cpuset, css);
 130}
 131
 132static inline struct cpuset *parent_cs(const struct cpuset *cs)
 133{
 134        struct cgroup *pcgrp = cs->css.cgroup->parent;
 135
 136        if (pcgrp)
 137                return cgroup_cs(pcgrp);
 138        return NULL;
 139}
 140
 141#ifdef CONFIG_NUMA
 142static inline bool task_has_mempolicy(struct task_struct *task)
 143{
 144        return task->mempolicy;
 145}
 146#else
 147static inline bool task_has_mempolicy(struct task_struct *task)
 148{
 149        return false;
 150}
 151#endif
 152
 153
 154/* bits in struct cpuset flags field */
 155typedef enum {
 156        CS_ONLINE,
 157        CS_CPU_EXCLUSIVE,
 158        CS_MEM_EXCLUSIVE,
 159        CS_MEM_HARDWALL,
 160        CS_MEMORY_MIGRATE,
 161        CS_SCHED_LOAD_BALANCE,
 162        CS_SPREAD_PAGE,
 163        CS_SPREAD_SLAB,
 164} cpuset_flagbits_t;
 165
 166/* convenient tests for these bits */
 167static inline bool is_cpuset_online(const struct cpuset *cs)
 168{
 169        return test_bit(CS_ONLINE, &cs->flags);
 170}
 171
 172static inline int is_cpu_exclusive(const struct cpuset *cs)
 173{
 174        return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 175}
 176
 177static inline int is_mem_exclusive(const struct cpuset *cs)
 178{
 179        return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 180}
 181
 182static inline int is_mem_hardwall(const struct cpuset *cs)
 183{
 184        return test_bit(CS_MEM_HARDWALL, &cs->flags);
 185}
 186
 187static inline int is_sched_load_balance(const struct cpuset *cs)
 188{
 189        return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 190}
 191
 192static inline int is_memory_migrate(const struct cpuset *cs)
 193{
 194        return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 195}
 196
 197static inline int is_spread_page(const struct cpuset *cs)
 198{
 199        return test_bit(CS_SPREAD_PAGE, &cs->flags);
 200}
 201
 202static inline int is_spread_slab(const struct cpuset *cs)
 203{
 204        return test_bit(CS_SPREAD_SLAB, &cs->flags);
 205}
 206
 207static struct cpuset top_cpuset = {
 208        .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
 209                  (1 << CS_MEM_EXCLUSIVE)),
 210};
 211
 212/**
 213 * cpuset_for_each_child - traverse online children of a cpuset
 214 * @child_cs: loop cursor pointing to the current child
 215 * @pos_cgrp: used for iteration
 216 * @parent_cs: target cpuset to walk children of
 217 *
 218 * Walk @child_cs through the online children of @parent_cs.  Must be used
 219 * with RCU read locked.
 220 */
 221#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs)            \
 222        cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup)      \
 223                if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
 224
 225/**
 226 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 227 * @des_cs: loop cursor pointing to the current descendant
 228 * @pos_cgrp: used for iteration
 229 * @root_cs: target cpuset to walk ancestor of
 230 *
 231 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 232 * with RCU read locked.  The caller may modify @pos_cgrp by calling
 233 * cgroup_rightmost_descendant() to skip subtree.
 234 */
 235#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs)       \
 236        cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
 237                if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
 238
 239/*
 240 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 241 * and callback_mutex.  The latter may nest inside the former.  We also
 242 * require taking task_lock() when dereferencing a task's cpuset pointer.
 243 * See "The task_lock() exception", at the end of this comment.
 244 *
 245 * A task must hold both mutexes to modify cpusets.  If a task holds
 246 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 247 * is the only task able to also acquire callback_mutex and be able to
 248 * modify cpusets.  It can perform various checks on the cpuset structure
 249 * first, knowing nothing will change.  It can also allocate memory while
 250 * just holding cpuset_mutex.  While it is performing these checks, various
 251 * callback routines can briefly acquire callback_mutex to query cpusets.
 252 * Once it is ready to make the changes, it takes callback_mutex, blocking
 253 * everyone else.
 254 *
 255 * Calls to the kernel memory allocator can not be made while holding
 256 * callback_mutex, as that would risk double tripping on callback_mutex
 257 * from one of the callbacks into the cpuset code from within
 258 * __alloc_pages().
 259 *
 260 * If a task is only holding callback_mutex, then it has read-only
 261 * access to cpusets.
 262 *
 263 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 264 * by other task, we use alloc_lock in the task_struct fields to protect
 265 * them.
 266 *
 267 * The cpuset_common_file_read() handlers only hold callback_mutex across
 268 * small pieces of code, such as when reading out possibly multi-word
 269 * cpumasks and nodemasks.
 270 *
 271 * Accessing a task's cpuset should be done in accordance with the
 272 * guidelines for accessing subsystem state in kernel/cgroup.c
 273 */
 274
 275static DEFINE_MUTEX(cpuset_mutex);
 276static DEFINE_MUTEX(callback_mutex);
 277
 278/*
 279 * CPU / memory hotplug is handled asynchronously.
 280 */
 281static void cpuset_hotplug_workfn(struct work_struct *work);
 282static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
 283
 284static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
 285
 286/*
 287 * This is ugly, but preserves the userspace API for existing cpuset
 288 * users. If someone tries to mount the "cpuset" filesystem, we
 289 * silently switch it to mount "cgroup" instead
 290 */
 291static struct dentry *cpuset_mount(struct file_system_type *fs_type,
 292                         int flags, const char *unused_dev_name, void *data)
 293{
 294        struct file_system_type *cgroup_fs = get_fs_type("cgroup");
 295        struct dentry *ret = ERR_PTR(-ENODEV);
 296        if (cgroup_fs) {
 297                char mountopts[] =
 298                        "cpuset,noprefix,"
 299                        "release_agent=/sbin/cpuset_release_agent";
 300                ret = cgroup_fs->mount(cgroup_fs, flags,
 301                                           unused_dev_name, mountopts);
 302                put_filesystem(cgroup_fs);
 303        }
 304        return ret;
 305}
 306
 307static struct file_system_type cpuset_fs_type = {
 308        .name = "cpuset",
 309        .mount = cpuset_mount,
 310};
 311
 312/*
 313 * Return in pmask the portion of a cpusets's cpus_allowed that
 314 * are online.  If none are online, walk up the cpuset hierarchy
 315 * until we find one that does have some online cpus.  The top
 316 * cpuset always has some cpus online.
 317 *
 318 * One way or another, we guarantee to return some non-empty subset
 319 * of cpu_online_mask.
 320 *
 321 * Call with callback_mutex held.
 322 */
 323static void guarantee_online_cpus(const struct cpuset *cs,
 324                                  struct cpumask *pmask)
 325{
 326        while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 327                cs = parent_cs(cs);
 328        cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 329}
 330
 331/*
 332 * Return in *pmask the portion of a cpusets's mems_allowed that
 333 * are online, with memory.  If none are online with memory, walk
 334 * up the cpuset hierarchy until we find one that does have some
 335 * online mems.  The top cpuset always has some mems online.
 336 *
 337 * One way or another, we guarantee to return some non-empty subset
 338 * of node_states[N_MEMORY].
 339 *
 340 * Call with callback_mutex held.
 341 */
 342static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 343{
 344        while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
 345                cs = parent_cs(cs);
 346        nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
 347}
 348
 349/*
 350 * update task's spread flag if cpuset's page/slab spread flag is set
 351 *
 352 * Called with callback_mutex/cpuset_mutex held
 353 */
 354static void cpuset_update_task_spread_flag(struct cpuset *cs,
 355                                        struct task_struct *tsk)
 356{
 357        if (is_spread_page(cs))
 358                tsk->flags |= PF_SPREAD_PAGE;
 359        else
 360                tsk->flags &= ~PF_SPREAD_PAGE;
 361        if (is_spread_slab(cs))
 362                tsk->flags |= PF_SPREAD_SLAB;
 363        else
 364                tsk->flags &= ~PF_SPREAD_SLAB;
 365}
 366
 367/*
 368 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 369 *
 370 * One cpuset is a subset of another if all its allowed CPUs and
 371 * Memory Nodes are a subset of the other, and its exclusive flags
 372 * are only set if the other's are set.  Call holding cpuset_mutex.
 373 */
 374
 375static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 376{
 377        return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 378                nodes_subset(p->mems_allowed, q->mems_allowed) &&
 379                is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 380                is_mem_exclusive(p) <= is_mem_exclusive(q);
 381}
 382
 383/**
 384 * alloc_trial_cpuset - allocate a trial cpuset
 385 * @cs: the cpuset that the trial cpuset duplicates
 386 */
 387static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
 388{
 389        struct cpuset *trial;
 390
 391        trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 392        if (!trial)
 393                return NULL;
 394
 395        if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
 396                kfree(trial);
 397                return NULL;
 398        }
 399        cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 400
 401        return trial;
 402}
 403
 404/**
 405 * free_trial_cpuset - free the trial cpuset
 406 * @trial: the trial cpuset to be freed
 407 */
 408static void free_trial_cpuset(struct cpuset *trial)
 409{
 410        free_cpumask_var(trial->cpus_allowed);
 411        kfree(trial);
 412}
 413
 414/*
 415 * validate_change() - Used to validate that any proposed cpuset change
 416 *                     follows the structural rules for cpusets.
 417 *
 418 * If we replaced the flag and mask values of the current cpuset
 419 * (cur) with those values in the trial cpuset (trial), would
 420 * our various subset and exclusive rules still be valid?  Presumes
 421 * cpuset_mutex held.
 422 *
 423 * 'cur' is the address of an actual, in-use cpuset.  Operations
 424 * such as list traversal that depend on the actual address of the
 425 * cpuset in the list must use cur below, not trial.
 426 *
 427 * 'trial' is the address of bulk structure copy of cur, with
 428 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 429 * or flags changed to new, trial values.
 430 *
 431 * Return 0 if valid, -errno if not.
 432 */
 433
 434static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 435{
 436        struct cgroup *cgrp;
 437        struct cpuset *c, *par;
 438        int ret;
 439
 440        rcu_read_lock();
 441
 442        /* Each of our child cpusets must be a subset of us */
 443        ret = -EBUSY;
 444        cpuset_for_each_child(c, cgrp, cur)
 445                if (!is_cpuset_subset(c, trial))
 446                        goto out;
 447
 448        /* Remaining checks don't apply to root cpuset */
 449        ret = 0;
 450        if (cur == &top_cpuset)
 451                goto out;
 452
 453        par = parent_cs(cur);
 454
 455        /* We must be a subset of our parent cpuset */
 456        ret = -EACCES;
 457        if (!is_cpuset_subset(trial, par))
 458                goto out;
 459
 460        /*
 461         * If either I or some sibling (!= me) is exclusive, we can't
 462         * overlap
 463         */
 464        ret = -EINVAL;
 465        cpuset_for_each_child(c, cgrp, par) {
 466                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 467                    c != cur &&
 468                    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 469                        goto out;
 470                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 471                    c != cur &&
 472                    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 473                        goto out;
 474        }
 475
 476        /*
 477         * Cpusets with tasks - existing or newly being attached - can't
 478         * be changed to have empty cpus_allowed or mems_allowed.
 479         */
 480        ret = -ENOSPC;
 481        if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
 482                if (!cpumask_empty(cur->cpus_allowed) &&
 483                    cpumask_empty(trial->cpus_allowed))
 484                        goto out;
 485                if (!nodes_empty(cur->mems_allowed) &&
 486                    nodes_empty(trial->mems_allowed))
 487                        goto out;
 488        }
 489
 490        ret = 0;
 491out:
 492        rcu_read_unlock();
 493        return ret;
 494}
 495
 496#ifdef CONFIG_SMP
 497/*
 498 * Helper routine for generate_sched_domains().
 499 * Do cpusets a, b have overlapping cpus_allowed masks?
 500 */
 501static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 502{
 503        return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 504}
 505
 506static void
 507update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 508{
 509        if (dattr->relax_domain_level < c->relax_domain_level)
 510                dattr->relax_domain_level = c->relax_domain_level;
 511        return;
 512}
 513
 514static void update_domain_attr_tree(struct sched_domain_attr *dattr,
 515                                    struct cpuset *root_cs)
 516{
 517        struct cpuset *cp;
 518        struct cgroup *pos_cgrp;
 519
 520        rcu_read_lock();
 521        cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
 522                /* skip the whole subtree if @cp doesn't have any CPU */
 523                if (cpumask_empty(cp->cpus_allowed)) {
 524                        pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
 525                        continue;
 526                }
 527
 528                if (is_sched_load_balance(cp))
 529                        update_domain_attr(dattr, cp);
 530        }
 531        rcu_read_unlock();
 532}
 533
 534/*
 535 * generate_sched_domains()
 536 *
 537 * This function builds a partial partition of the systems CPUs
 538 * A 'partial partition' is a set of non-overlapping subsets whose
 539 * union is a subset of that set.
 540 * The output of this function needs to be passed to kernel/sched/core.c
 541 * partition_sched_domains() routine, which will rebuild the scheduler's
 542 * load balancing domains (sched domains) as specified by that partial
 543 * partition.
 544 *
 545 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
 546 * for a background explanation of this.
 547 *
 548 * Does not return errors, on the theory that the callers of this
 549 * routine would rather not worry about failures to rebuild sched
 550 * domains when operating in the severe memory shortage situations
 551 * that could cause allocation failures below.
 552 *
 553 * Must be called with cpuset_mutex held.
 554 *
 555 * The three key local variables below are:
 556 *    q  - a linked-list queue of cpuset pointers, used to implement a
 557 *         top-down scan of all cpusets.  This scan loads a pointer
 558 *         to each cpuset marked is_sched_load_balance into the
 559 *         array 'csa'.  For our purposes, rebuilding the schedulers
 560 *         sched domains, we can ignore !is_sched_load_balance cpusets.
 561 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 562 *         that need to be load balanced, for convenient iterative
 563 *         access by the subsequent code that finds the best partition,
 564 *         i.e the set of domains (subsets) of CPUs such that the
 565 *         cpus_allowed of every cpuset marked is_sched_load_balance
 566 *         is a subset of one of these domains, while there are as
 567 *         many such domains as possible, each as small as possible.
 568 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 569 *         the kernel/sched/core.c routine partition_sched_domains() in a
 570 *         convenient format, that can be easily compared to the prior
 571 *         value to determine what partition elements (sched domains)
 572 *         were changed (added or removed.)
 573 *
 574 * Finding the best partition (set of domains):
 575 *      The triple nested loops below over i, j, k scan over the
 576 *      load balanced cpusets (using the array of cpuset pointers in
 577 *      csa[]) looking for pairs of cpusets that have overlapping
 578 *      cpus_allowed, but which don't have the same 'pn' partition
 579 *      number and gives them in the same partition number.  It keeps
 580 *      looping on the 'restart' label until it can no longer find
 581 *      any such pairs.
 582 *
 583 *      The union of the cpus_allowed masks from the set of
 584 *      all cpusets having the same 'pn' value then form the one
 585 *      element of the partition (one sched domain) to be passed to
 586 *      partition_sched_domains().
 587 */
 588static int generate_sched_domains(cpumask_var_t **domains,
 589                        struct sched_domain_attr **attributes)
 590{
 591        struct cpuset *cp;      /* scans q */
 592        struct cpuset **csa;    /* array of all cpuset ptrs */
 593        int csn;                /* how many cpuset ptrs in csa so far */
 594        int i, j, k;            /* indices for partition finding loops */
 595        cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
 596        struct sched_domain_attr *dattr;  /* attributes for custom domains */
 597        int ndoms = 0;          /* number of sched domains in result */
 598        int nslot;              /* next empty doms[] struct cpumask slot */
 599        struct cgroup *pos_cgrp;
 600
 601        doms = NULL;
 602        dattr = NULL;
 603        csa = NULL;
 604
 605        /* Special case for the 99% of systems with one, full, sched domain */
 606        if (is_sched_load_balance(&top_cpuset)) {
 607                ndoms = 1;
 608                doms = alloc_sched_domains(ndoms);
 609                if (!doms)
 610                        goto done;
 611
 612                dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 613                if (dattr) {
 614                        *dattr = SD_ATTR_INIT;
 615                        update_domain_attr_tree(dattr, &top_cpuset);
 616                }
 617                cpumask_copy(doms[0], top_cpuset.cpus_allowed);
 618
 619                goto done;
 620        }
 621
 622        csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
 623        if (!csa)
 624                goto done;
 625        csn = 0;
 626
 627        rcu_read_lock();
 628        cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
 629                /*
 630                 * Continue traversing beyond @cp iff @cp has some CPUs and
 631                 * isn't load balancing.  The former is obvious.  The
 632                 * latter: All child cpusets contain a subset of the
 633                 * parent's cpus, so just skip them, and then we call
 634                 * update_domain_attr_tree() to calc relax_domain_level of
 635                 * the corresponding sched domain.
 636                 */
 637                if (!cpumask_empty(cp->cpus_allowed) &&
 638                    !is_sched_load_balance(cp))
 639                        continue;
 640
 641                if (is_sched_load_balance(cp))
 642                        csa[csn++] = cp;
 643
 644                /* skip @cp's subtree */
 645                pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
 646        }
 647        rcu_read_unlock();
 648
 649        for (i = 0; i < csn; i++)
 650                csa[i]->pn = i;
 651        ndoms = csn;
 652
 653restart:
 654        /* Find the best partition (set of sched domains) */
 655        for (i = 0; i < csn; i++) {
 656                struct cpuset *a = csa[i];
 657                int apn = a->pn;
 658
 659                for (j = 0; j < csn; j++) {
 660                        struct cpuset *b = csa[j];
 661                        int bpn = b->pn;
 662
 663                        if (apn != bpn && cpusets_overlap(a, b)) {
 664                                for (k = 0; k < csn; k++) {
 665                                        struct cpuset *c = csa[k];
 666
 667                                        if (c->pn == bpn)
 668                                                c->pn = apn;
 669                                }
 670                                ndoms--;        /* one less element */
 671                                goto restart;
 672                        }
 673                }
 674        }
 675
 676        /*
 677         * Now we know how many domains to create.
 678         * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 679         */
 680        doms = alloc_sched_domains(ndoms);
 681        if (!doms)
 682                goto done;
 683
 684        /*
 685         * The rest of the code, including the scheduler, can deal with
 686         * dattr==NULL case. No need to abort if alloc fails.
 687         */
 688        dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
 689
 690        for (nslot = 0, i = 0; i < csn; i++) {
 691                struct cpuset *a = csa[i];
 692                struct cpumask *dp;
 693                int apn = a->pn;
 694
 695                if (apn < 0) {
 696                        /* Skip completed partitions */
 697                        continue;
 698                }
 699
 700                dp = doms[nslot];
 701
 702                if (nslot == ndoms) {
 703                        static int warnings = 10;
 704                        if (warnings) {
 705                                printk(KERN_WARNING
 706                                 "rebuild_sched_domains confused:"
 707                                  " nslot %d, ndoms %d, csn %d, i %d,"
 708                                  " apn %d\n",
 709                                  nslot, ndoms, csn, i, apn);
 710                                warnings--;
 711                        }
 712                        continue;
 713                }
 714
 715                cpumask_clear(dp);
 716                if (dattr)
 717                        *(dattr + nslot) = SD_ATTR_INIT;
 718                for (j = i; j < csn; j++) {
 719                        struct cpuset *b = csa[j];
 720
 721                        if (apn == b->pn) {
 722                                cpumask_or(dp, dp, b->cpus_allowed);
 723                                if (dattr)
 724                                        update_domain_attr_tree(dattr + nslot, b);
 725
 726                                /* Done with this partition */
 727                                b->pn = -1;
 728                        }
 729                }
 730                nslot++;
 731        }
 732        BUG_ON(nslot != ndoms);
 733
 734done:
 735        kfree(csa);
 736
 737        /*
 738         * Fallback to the default domain if kmalloc() failed.
 739         * See comments in partition_sched_domains().
 740         */
 741        if (doms == NULL)
 742                ndoms = 1;
 743
 744        *domains    = doms;
 745        *attributes = dattr;
 746        return ndoms;
 747}
 748
 749/*
 750 * Rebuild scheduler domains.
 751 *
 752 * If the flag 'sched_load_balance' of any cpuset with non-empty
 753 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 754 * which has that flag enabled, or if any cpuset with a non-empty
 755 * 'cpus' is removed, then call this routine to rebuild the
 756 * scheduler's dynamic sched domains.
 757 *
 758 * Call with cpuset_mutex held.  Takes get_online_cpus().
 759 */
 760static void rebuild_sched_domains_locked(void)
 761{
 762        struct sched_domain_attr *attr;
 763        cpumask_var_t *doms;
 764        int ndoms;
 765
 766        lockdep_assert_held(&cpuset_mutex);
 767        get_online_cpus();
 768
 769        /*
 770         * We have raced with CPU hotplug. Don't do anything to avoid
 771         * passing doms with offlined cpu to partition_sched_domains().
 772         * Anyways, hotplug work item will rebuild sched domains.
 773         */
 774        if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
 775                goto out;
 776
 777        /* Generate domain masks and attrs */
 778        ndoms = generate_sched_domains(&doms, &attr);
 779
 780        /* Have scheduler rebuild the domains */
 781        partition_sched_domains(ndoms, doms, attr);
 782out:
 783        put_online_cpus();
 784}
 785#else /* !CONFIG_SMP */
 786static void rebuild_sched_domains_locked(void)
 787{
 788}
 789#endif /* CONFIG_SMP */
 790
 791void rebuild_sched_domains(void)
 792{
 793        mutex_lock(&cpuset_mutex);
 794        rebuild_sched_domains_locked();
 795        mutex_unlock(&cpuset_mutex);
 796}
 797
 798/*
 799 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
 800 * @cs: the cpuset in interest
 801 *
 802 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
 803 * with non-empty cpus. We use effective cpumask whenever:
 804 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
 805 *   if the cpuset they reside in has no cpus)
 806 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
 807 *
 808 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
 809 * exception. See comments there.
 810 */
 811static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
 812{
 813        while (cpumask_empty(cs->cpus_allowed))
 814                cs = parent_cs(cs);
 815        return cs;
 816}
 817
 818/*
 819 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
 820 * @cs: the cpuset in interest
 821 *
 822 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
 823 * with non-empty memss. We use effective nodemask whenever:
 824 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
 825 *   if the cpuset they reside in has no mems)
 826 * - we want to retrieve task_cs(tsk)'s mems_allowed.
 827 *
 828 * Called with cpuset_mutex held.
 829 */
 830static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
 831{
 832        while (nodes_empty(cs->mems_allowed))
 833                cs = parent_cs(cs);
 834        return cs;
 835}
 836
 837/**
 838 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 839 * @tsk: task to test
 840 * @scan: struct cgroup_scanner containing the cgroup of the task
 841 *
 842 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 843 * cpus_allowed mask needs to be changed.
 844 *
 845 * We don't need to re-check for the cgroup/cpuset membership, since we're
 846 * holding cpuset_mutex at this point.
 847 */
 848static void cpuset_change_cpumask(struct task_struct *tsk,
 849                                  struct cgroup_scanner *scan)
 850{
 851        struct cpuset *cpus_cs;
 852
 853        cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
 854        set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
 855}
 856
 857/**
 858 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 859 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 860 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 861 *
 862 * Called with cpuset_mutex held
 863 *
 864 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 865 * calling callback functions for each.
 866 *
 867 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 868 * if @heap != NULL.
 869 */
 870static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
 871{
 872        struct cgroup_scanner scan;
 873
 874        scan.cg = cs->css.cgroup;
 875        scan.test_task = NULL;
 876        scan.process_task = cpuset_change_cpumask;
 877        scan.heap = heap;
 878        cgroup_scan_tasks(&scan);
 879}
 880
 881/*
 882 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
 883 * @root_cs: the root cpuset of the hierarchy
 884 * @update_root: update root cpuset or not?
 885 * @heap: the heap used by cgroup_scan_tasks()
 886 *
 887 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
 888 * which take on cpumask of @root_cs.
 889 *
 890 * Called with cpuset_mutex held
 891 */
 892static void update_tasks_cpumask_hier(struct cpuset *root_cs,
 893                                      bool update_root, struct ptr_heap *heap)
 894{
 895        struct cpuset *cp;
 896        struct cgroup *pos_cgrp;
 897
 898        if (update_root)
 899                update_tasks_cpumask(root_cs, heap);
 900
 901        rcu_read_lock();
 902        cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
 903                /* skip the whole subtree if @cp have some CPU */
 904                if (!cpumask_empty(cp->cpus_allowed)) {
 905                        pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
 906                        continue;
 907                }
 908                if (!css_tryget(&cp->css))
 909                        continue;
 910                rcu_read_unlock();
 911
 912                update_tasks_cpumask(cp, heap);
 913
 914                rcu_read_lock();
 915                css_put(&cp->css);
 916        }
 917        rcu_read_unlock();
 918}
 919
 920/**
 921 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 922 * @cs: the cpuset to consider
 923 * @buf: buffer of cpu numbers written to this cpuset
 924 */
 925static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 926                          const char *buf)
 927{
 928        struct ptr_heap heap;
 929        int retval;
 930        int is_load_balanced;
 931
 932        /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
 933        if (cs == &top_cpuset)
 934                return -EACCES;
 935
 936        /*
 937         * An empty cpus_allowed is ok only if the cpuset has no tasks.
 938         * Since cpulist_parse() fails on an empty mask, we special case
 939         * that parsing.  The validate_change() call ensures that cpusets
 940         * with tasks have cpus.
 941         */
 942        if (!*buf) {
 943                cpumask_clear(trialcs->cpus_allowed);
 944        } else {
 945                retval = cpulist_parse(buf, trialcs->cpus_allowed);
 946                if (retval < 0)
 947                        return retval;
 948
 949                if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
 950                        return -EINVAL;
 951        }
 952
 953        /* Nothing to do if the cpus didn't change */
 954        if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 955                return 0;
 956
 957        retval = validate_change(cs, trialcs);
 958        if (retval < 0)
 959                return retval;
 960
 961        retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 962        if (retval)
 963                return retval;
 964
 965        is_load_balanced = is_sched_load_balance(trialcs);
 966
 967        mutex_lock(&callback_mutex);
 968        cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 969        mutex_unlock(&callback_mutex);
 970
 971        update_tasks_cpumask_hier(cs, true, &heap);
 972
 973        heap_free(&heap);
 974
 975        if (is_load_balanced)
 976                rebuild_sched_domains_locked();
 977        return 0;
 978}
 979
 980/*
 981 * cpuset_migrate_mm
 982 *
 983 *    Migrate memory region from one set of nodes to another.
 984 *
 985 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 986 *    so that the migration code can allocate pages on these nodes.
 987 *
 988 *    Call holding cpuset_mutex, so current's cpuset won't change
 989 *    during this call, as manage_mutex holds off any cpuset_attach()
 990 *    calls.  Therefore we don't need to take task_lock around the
 991 *    call to guarantee_online_mems(), as we know no one is changing
 992 *    our task's cpuset.
 993 *
 994 *    While the mm_struct we are migrating is typically from some
 995 *    other task, the task_struct mems_allowed that we are hacking
 996 *    is for our current task, which must allocate new pages for that
 997 *    migrating memory region.
 998 */
 999
1000static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1001                                                        const nodemask_t *to)
1002{
1003        struct task_struct *tsk = current;
1004        struct cpuset *mems_cs;
1005
1006        tsk->mems_allowed = *to;
1007
1008        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
1009
1010        mems_cs = effective_nodemask_cpuset(task_cs(tsk));
1011        guarantee_online_mems(mems_cs, &tsk->mems_allowed);
1012}
1013
1014/*
1015 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1016 * @tsk: the task to change
1017 * @newmems: new nodes that the task will be set
1018 *
1019 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
1020 * we structure updates as setting all new allowed nodes, then clearing newly
1021 * disallowed ones.
1022 */
1023static void cpuset_change_task_nodemask(struct task_struct *tsk,
1024                                        nodemask_t *newmems)
1025{
1026        bool need_loop;
1027
1028        /*
1029         * Allow tasks that have access to memory reserves because they have
1030         * been OOM killed to get memory anywhere.
1031         */
1032        if (unlikely(test_thread_flag(TIF_MEMDIE)))
1033                return;
1034        if (current->flags & PF_EXITING) /* Let dying task have memory */
1035                return;
1036
1037        task_lock(tsk);
1038        /*
1039         * Determine if a loop is necessary if another thread is doing
1040         * get_mems_allowed().  If at least one node remains unchanged and
1041         * tsk does not have a mempolicy, then an empty nodemask will not be
1042         * possible when mems_allowed is larger than a word.
1043         */
1044        need_loop = task_has_mempolicy(tsk) ||
1045                        !nodes_intersects(*newmems, tsk->mems_allowed);
1046
1047        if (need_loop)
1048                write_seqcount_begin(&tsk->mems_allowed_seq);
1049
1050        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1051        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1052
1053        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1054        tsk->mems_allowed = *newmems;
1055
1056        if (need_loop)
1057                write_seqcount_end(&tsk->mems_allowed_seq);
1058
1059        task_unlock(tsk);
1060}
1061
1062/*
1063 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1064 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1065 * memory_migrate flag is set. Called with cpuset_mutex held.
1066 */
1067static void cpuset_change_nodemask(struct task_struct *p,
1068                                   struct cgroup_scanner *scan)
1069{
1070        struct cpuset *cs = cgroup_cs(scan->cg);
1071        struct mm_struct *mm;
1072        int migrate;
1073        nodemask_t *newmems = scan->data;
1074
1075        cpuset_change_task_nodemask(p, newmems);
1076
1077        mm = get_task_mm(p);
1078        if (!mm)
1079                return;
1080
1081        migrate = is_memory_migrate(cs);
1082
1083        mpol_rebind_mm(mm, &cs->mems_allowed);
1084        if (migrate)
1085                cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
1086        mmput(mm);
1087}
1088
1089static void *cpuset_being_rebound;
1090
1091/**
1092 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1093 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1094 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1095 *
1096 * Called with cpuset_mutex held
1097 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1098 * if @heap != NULL.
1099 */
1100static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1101{
1102        static nodemask_t newmems;      /* protected by cpuset_mutex */
1103        struct cgroup_scanner scan;
1104        struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1105
1106        cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
1107
1108        guarantee_online_mems(mems_cs, &newmems);
1109
1110        scan.cg = cs->css.cgroup;
1111        scan.test_task = NULL;
1112        scan.process_task = cpuset_change_nodemask;
1113        scan.heap = heap;
1114        scan.data = &newmems;
1115
1116        /*
1117         * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1118         * take while holding tasklist_lock.  Forks can happen - the
1119         * mpol_dup() cpuset_being_rebound check will catch such forks,
1120         * and rebind their vma mempolicies too.  Because we still hold
1121         * the global cpuset_mutex, we know that no other rebind effort
1122         * will be contending for the global variable cpuset_being_rebound.
1123         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1124         * is idempotent.  Also migrate pages in each mm to new nodes.
1125         */
1126        cgroup_scan_tasks(&scan);
1127
1128        /*
1129         * All the tasks' nodemasks have been updated, update
1130         * cs->old_mems_allowed.
1131         */
1132        cs->old_mems_allowed = newmems;
1133
1134        /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1135        cpuset_being_rebound = NULL;
1136}
1137
1138/*
1139 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1140 * @cs: the root cpuset of the hierarchy
1141 * @update_root: update the root cpuset or not?
1142 * @heap: the heap used by cgroup_scan_tasks()
1143 *
1144 * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1145 * which take on nodemask of @root_cs.
1146 *
1147 * Called with cpuset_mutex held
1148 */
1149static void update_tasks_nodemask_hier(struct cpuset *root_cs,
1150                                       bool update_root, struct ptr_heap *heap)
1151{
1152        struct cpuset *cp;
1153        struct cgroup *pos_cgrp;
1154
1155        if (update_root)
1156                update_tasks_nodemask(root_cs, heap);
1157
1158        rcu_read_lock();
1159        cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
1160                /* skip the whole subtree if @cp have some CPU */
1161                if (!nodes_empty(cp->mems_allowed)) {
1162                        pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
1163                        continue;
1164                }
1165                if (!css_tryget(&cp->css))
1166                        continue;
1167                rcu_read_unlock();
1168
1169                update_tasks_nodemask(cp, heap);
1170
1171                rcu_read_lock();
1172                css_put(&cp->css);
1173        }
1174        rcu_read_unlock();
1175}
1176
1177/*
1178 * Handle user request to change the 'mems' memory placement
1179 * of a cpuset.  Needs to validate the request, update the
1180 * cpusets mems_allowed, and for each task in the cpuset,
1181 * update mems_allowed and rebind task's mempolicy and any vma
1182 * mempolicies and if the cpuset is marked 'memory_migrate',
1183 * migrate the tasks pages to the new memory.
1184 *
1185 * Call with cpuset_mutex held.  May take callback_mutex during call.
1186 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1187 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1188 * their mempolicies to the cpusets new mems_allowed.
1189 */
1190static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1191                           const char *buf)
1192{
1193        int retval;
1194        struct ptr_heap heap;
1195
1196        /*
1197         * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1198         * it's read-only
1199         */
1200        if (cs == &top_cpuset) {
1201                retval = -EACCES;
1202                goto done;
1203        }
1204
1205        /*
1206         * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1207         * Since nodelist_parse() fails on an empty mask, we special case
1208         * that parsing.  The validate_change() call ensures that cpusets
1209         * with tasks have memory.
1210         */
1211        if (!*buf) {
1212                nodes_clear(trialcs->mems_allowed);
1213        } else {
1214                retval = nodelist_parse(buf, trialcs->mems_allowed);
1215                if (retval < 0)
1216                        goto done;
1217
1218                if (!nodes_subset(trialcs->mems_allowed,
1219                                node_states[N_MEMORY])) {
1220                        retval =  -EINVAL;
1221                        goto done;
1222                }
1223        }
1224
1225        if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1226                retval = 0;             /* Too easy - nothing to do */
1227                goto done;
1228        }
1229        retval = validate_change(cs, trialcs);
1230        if (retval < 0)
1231                goto done;
1232
1233        retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1234        if (retval < 0)
1235                goto done;
1236
1237        mutex_lock(&callback_mutex);
1238        cs->mems_allowed = trialcs->mems_allowed;
1239        mutex_unlock(&callback_mutex);
1240
1241        update_tasks_nodemask_hier(cs, true, &heap);
1242
1243        heap_free(&heap);
1244done:
1245        return retval;
1246}
1247
1248int current_cpuset_is_being_rebound(void)
1249{
1250        return task_cs(current) == cpuset_being_rebound;
1251}
1252
1253static int update_relax_domain_level(struct cpuset *cs, s64 val)
1254{
1255#ifdef CONFIG_SMP
1256        if (val < -1 || val >= sched_domain_level_max)
1257                return -EINVAL;
1258#endif
1259
1260        if (val != cs->relax_domain_level) {
1261                cs->relax_domain_level = val;
1262                if (!cpumask_empty(cs->cpus_allowed) &&
1263                    is_sched_load_balance(cs))
1264                        rebuild_sched_domains_locked();
1265        }
1266
1267        return 0;
1268}
1269
1270/*
1271 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1272 * @tsk: task to be updated
1273 * @scan: struct cgroup_scanner containing the cgroup of the task
1274 *
1275 * Called by cgroup_scan_tasks() for each task in a cgroup.
1276 *
1277 * We don't need to re-check for the cgroup/cpuset membership, since we're
1278 * holding cpuset_mutex at this point.
1279 */
1280static void cpuset_change_flag(struct task_struct *tsk,
1281                                struct cgroup_scanner *scan)
1282{
1283        cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1284}
1285
1286/*
1287 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1288 * @cs: the cpuset in which each task's spread flags needs to be changed
1289 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1290 *
1291 * Called with cpuset_mutex held
1292 *
1293 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1294 * calling callback functions for each.
1295 *
1296 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1297 * if @heap != NULL.
1298 */
1299static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1300{
1301        struct cgroup_scanner scan;
1302
1303        scan.cg = cs->css.cgroup;
1304        scan.test_task = NULL;
1305        scan.process_task = cpuset_change_flag;
1306        scan.heap = heap;
1307        cgroup_scan_tasks(&scan);
1308}
1309
1310/*
1311 * update_flag - read a 0 or a 1 in a file and update associated flag
1312 * bit:         the bit to update (see cpuset_flagbits_t)
1313 * cs:          the cpuset to update
1314 * turning_on:  whether the flag is being set or cleared
1315 *
1316 * Call with cpuset_mutex held.
1317 */
1318
1319static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1320                       int turning_on)
1321{
1322        struct cpuset *trialcs;
1323        int balance_flag_changed;
1324        int spread_flag_changed;
1325        struct ptr_heap heap;
1326        int err;
1327
1328        trialcs = alloc_trial_cpuset(cs);
1329        if (!trialcs)
1330                return -ENOMEM;
1331
1332        if (turning_on)
1333                set_bit(bit, &trialcs->flags);
1334        else
1335                clear_bit(bit, &trialcs->flags);
1336
1337        err = validate_change(cs, trialcs);
1338        if (err < 0)
1339                goto out;
1340
1341        err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1342        if (err < 0)
1343                goto out;
1344
1345        balance_flag_changed = (is_sched_load_balance(cs) !=
1346                                is_sched_load_balance(trialcs));
1347
1348        spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1349                        || (is_spread_page(cs) != is_spread_page(trialcs)));
1350
1351        mutex_lock(&callback_mutex);
1352        cs->flags = trialcs->flags;
1353        mutex_unlock(&callback_mutex);
1354
1355        if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1356                rebuild_sched_domains_locked();
1357
1358        if (spread_flag_changed)
1359                update_tasks_flags(cs, &heap);
1360        heap_free(&heap);
1361out:
1362        free_trial_cpuset(trialcs);
1363        return err;
1364}
1365
1366/*
1367 * Frequency meter - How fast is some event occurring?
1368 *
1369 * These routines manage a digitally filtered, constant time based,
1370 * event frequency meter.  There are four routines:
1371 *   fmeter_init() - initialize a frequency meter.
1372 *   fmeter_markevent() - called each time the event happens.
1373 *   fmeter_getrate() - returns the recent rate of such events.
1374 *   fmeter_update() - internal routine used to update fmeter.
1375 *
1376 * A common data structure is passed to each of these routines,
1377 * which is used to keep track of the state required to manage the
1378 * frequency meter and its digital filter.
1379 *
1380 * The filter works on the number of events marked per unit time.
1381 * The filter is single-pole low-pass recursive (IIR).  The time unit
1382 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1383 * simulate 3 decimal digits of precision (multiplied by 1000).
1384 *
1385 * With an FM_COEF of 933, and a time base of 1 second, the filter
1386 * has a half-life of 10 seconds, meaning that if the events quit
1387 * happening, then the rate returned from the fmeter_getrate()
1388 * will be cut in half each 10 seconds, until it converges to zero.
1389 *
1390 * It is not worth doing a real infinitely recursive filter.  If more
1391 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1392 * just compute FM_MAXTICKS ticks worth, by which point the level
1393 * will be stable.
1394 *
1395 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1396 * arithmetic overflow in the fmeter_update() routine.
1397 *
1398 * Given the simple 32 bit integer arithmetic used, this meter works
1399 * best for reporting rates between one per millisecond (msec) and
1400 * one per 32 (approx) seconds.  At constant rates faster than one
1401 * per msec it maxes out at values just under 1,000,000.  At constant
1402 * rates between one per msec, and one per second it will stabilize
1403 * to a value N*1000, where N is the rate of events per second.
1404 * At constant rates between one per second and one per 32 seconds,
1405 * it will be choppy, moving up on the seconds that have an event,
1406 * and then decaying until the next event.  At rates slower than
1407 * about one in 32 seconds, it decays all the way back to zero between
1408 * each event.
1409 */
1410
1411#define FM_COEF 933             /* coefficient for half-life of 10 secs */
1412#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1413#define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
1414#define FM_SCALE 1000           /* faux fixed point scale */
1415
1416/* Initialize a frequency meter */
1417static void fmeter_init(struct fmeter *fmp)
1418{
1419        fmp->cnt = 0;
1420        fmp->val = 0;
1421        fmp->time = 0;
1422        spin_lock_init(&fmp->lock);
1423}
1424
1425/* Internal meter update - process cnt events and update value */
1426static void fmeter_update(struct fmeter *fmp)
1427{
1428        time_t now = get_seconds();
1429        time_t ticks = now - fmp->time;
1430
1431        if (ticks == 0)
1432                return;
1433
1434        ticks = min(FM_MAXTICKS, ticks);
1435        while (ticks-- > 0)
1436                fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1437        fmp->time = now;
1438
1439        fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1440        fmp->cnt = 0;
1441}
1442
1443/* Process any previous ticks, then bump cnt by one (times scale). */
1444static void fmeter_markevent(struct fmeter *fmp)
1445{
1446        spin_lock(&fmp->lock);
1447        fmeter_update(fmp);
1448        fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1449        spin_unlock(&fmp->lock);
1450}
1451
1452/* Process any previous ticks, then return current value. */
1453static int fmeter_getrate(struct fmeter *fmp)
1454{
1455        int val;
1456
1457        spin_lock(&fmp->lock);
1458        fmeter_update(fmp);
1459        val = fmp->val;
1460        spin_unlock(&fmp->lock);
1461        return val;
1462}
1463
1464/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1465static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1466{
1467        struct cpuset *cs = cgroup_cs(cgrp);
1468        struct task_struct *task;
1469        int ret;
1470
1471        mutex_lock(&cpuset_mutex);
1472
1473        /*
1474         * We allow to move tasks into an empty cpuset if sane_behavior
1475         * flag is set.
1476         */
1477        ret = -ENOSPC;
1478        if (!cgroup_sane_behavior(cgrp) &&
1479            (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1480                goto out_unlock;
1481
1482        cgroup_taskset_for_each(task, cgrp, tset) {
1483                /*
1484                 * Kthreads which disallow setaffinity shouldn't be moved
1485                 * to a new cpuset; we don't want to change their cpu
1486                 * affinity and isolating such threads by their set of
1487                 * allowed nodes is unnecessary.  Thus, cpusets are not
1488                 * applicable for such threads.  This prevents checking for
1489                 * success of set_cpus_allowed_ptr() on all attached tasks
1490                 * before cpus_allowed may be changed.
1491                 */
1492                ret = -EINVAL;
1493                if (task->flags & PF_NO_SETAFFINITY)
1494                        goto out_unlock;
1495                ret = security_task_setscheduler(task);
1496                if (ret)
1497                        goto out_unlock;
1498        }
1499
1500        /*
1501         * Mark attach is in progress.  This makes validate_change() fail
1502         * changes which zero cpus/mems_allowed.
1503         */
1504        cs->attach_in_progress++;
1505        ret = 0;
1506out_unlock:
1507        mutex_unlock(&cpuset_mutex);
1508        return ret;
1509}
1510
1511static void cpuset_cancel_attach(struct cgroup *cgrp,
1512                                 struct cgroup_taskset *tset)
1513{
1514        mutex_lock(&cpuset_mutex);
1515        cgroup_cs(cgrp)->attach_in_progress--;
1516        mutex_unlock(&cpuset_mutex);
1517}
1518
1519/*
1520 * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1521 * but we can't allocate it dynamically there.  Define it global and
1522 * allocate from cpuset_init().
1523 */
1524static cpumask_var_t cpus_attach;
1525
1526static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1527{
1528        /* static buf protected by cpuset_mutex */
1529        static nodemask_t cpuset_attach_nodemask_to;
1530        struct mm_struct *mm;
1531        struct task_struct *task;
1532        struct task_struct *leader = cgroup_taskset_first(tset);
1533        struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1534        struct cpuset *cs = cgroup_cs(cgrp);
1535        struct cpuset *oldcs = cgroup_cs(oldcgrp);
1536        struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1537        struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1538
1539        mutex_lock(&cpuset_mutex);
1540
1541        /* prepare for attach */
1542        if (cs == &top_cpuset)
1543                cpumask_copy(cpus_attach, cpu_possible_mask);
1544        else
1545                guarantee_online_cpus(cpus_cs, cpus_attach);
1546
1547        guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
1548
1549        cgroup_taskset_for_each(task, cgrp, tset) {
1550                /*
1551                 * can_attach beforehand should guarantee that this doesn't
1552                 * fail.  TODO: have a better way to handle failure here
1553                 */
1554                WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1555
1556                cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1557                cpuset_update_task_spread_flag(cs, task);
1558        }
1559
1560        /*
1561         * Change mm, possibly for multiple threads in a threadgroup. This is
1562         * expensive and may sleep.
1563         */
1564        cpuset_attach_nodemask_to = cs->mems_allowed;
1565        mm = get_task_mm(leader);
1566        if (mm) {
1567                struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
1568
1569                mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1570
1571                /*
1572                 * old_mems_allowed is the same with mems_allowed here, except
1573                 * if this task is being moved automatically due to hotplug.
1574                 * In that case @mems_allowed has been updated and is empty,
1575                 * so @old_mems_allowed is the right nodesets that we migrate
1576                 * mm from.
1577                 */
1578                if (is_memory_migrate(cs)) {
1579                        cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
1580                                          &cpuset_attach_nodemask_to);
1581                }
1582                mmput(mm);
1583        }
1584
1585        cs->old_mems_allowed = cpuset_attach_nodemask_to;
1586
1587        cs->attach_in_progress--;
1588        if (!cs->attach_in_progress)
1589                wake_up(&cpuset_attach_wq);
1590
1591        mutex_unlock(&cpuset_mutex);
1592}
1593
1594/* The various types of files and directories in a cpuset file system */
1595
1596typedef enum {
1597        FILE_MEMORY_MIGRATE,
1598        FILE_CPULIST,
1599        FILE_MEMLIST,
1600        FILE_CPU_EXCLUSIVE,
1601        FILE_MEM_EXCLUSIVE,
1602        FILE_MEM_HARDWALL,
1603        FILE_SCHED_LOAD_BALANCE,
1604        FILE_SCHED_RELAX_DOMAIN_LEVEL,
1605        FILE_MEMORY_PRESSURE_ENABLED,
1606        FILE_MEMORY_PRESSURE,
1607        FILE_SPREAD_PAGE,
1608        FILE_SPREAD_SLAB,
1609} cpuset_filetype_t;
1610
1611static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1612{
1613        struct cpuset *cs = cgroup_cs(cgrp);
1614        cpuset_filetype_t type = cft->private;
1615        int retval = 0;
1616
1617        mutex_lock(&cpuset_mutex);
1618        if (!is_cpuset_online(cs)) {
1619                retval = -ENODEV;
1620                goto out_unlock;
1621        }
1622
1623        switch (type) {
1624        case FILE_CPU_EXCLUSIVE:
1625                retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1626                break;
1627        case FILE_MEM_EXCLUSIVE:
1628                retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1629                break;
1630        case FILE_MEM_HARDWALL:
1631                retval = update_flag(CS_MEM_HARDWALL, cs, val);
1632                break;
1633        case FILE_SCHED_LOAD_BALANCE:
1634                retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1635                break;
1636        case FILE_MEMORY_MIGRATE:
1637                retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1638                break;
1639        case FILE_MEMORY_PRESSURE_ENABLED:
1640                cpuset_memory_pressure_enabled = !!val;
1641                break;
1642        case FILE_MEMORY_PRESSURE:
1643                retval = -EACCES;
1644                break;
1645        case FILE_SPREAD_PAGE:
1646                retval = update_flag(CS_SPREAD_PAGE, cs, val);
1647                break;
1648        case FILE_SPREAD_SLAB:
1649                retval = update_flag(CS_SPREAD_SLAB, cs, val);
1650                break;
1651        default:
1652                retval = -EINVAL;
1653                break;
1654        }
1655out_unlock:
1656        mutex_unlock(&cpuset_mutex);
1657        return retval;
1658}
1659
1660static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1661{
1662        struct cpuset *cs = cgroup_cs(cgrp);
1663        cpuset_filetype_t type = cft->private;
1664        int retval = -ENODEV;
1665
1666        mutex_lock(&cpuset_mutex);
1667        if (!is_cpuset_online(cs))
1668                goto out_unlock;
1669
1670        switch (type) {
1671        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1672                retval = update_relax_domain_level(cs, val);
1673                break;
1674        default:
1675                retval = -EINVAL;
1676                break;
1677        }
1678out_unlock:
1679        mutex_unlock(&cpuset_mutex);
1680        return retval;
1681}
1682
1683/*
1684 * Common handling for a write to a "cpus" or "mems" file.
1685 */
1686static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1687                                const char *buf)
1688{
1689        struct cpuset *cs = cgroup_cs(cgrp);
1690        struct cpuset *trialcs;
1691        int retval = -ENODEV;
1692
1693        /*
1694         * CPU or memory hotunplug may leave @cs w/o any execution
1695         * resources, in which case the hotplug code asynchronously updates
1696         * configuration and transfers all tasks to the nearest ancestor
1697         * which can execute.
1698         *
1699         * As writes to "cpus" or "mems" may restore @cs's execution
1700         * resources, wait for the previously scheduled operations before
1701         * proceeding, so that we don't end up keep removing tasks added
1702         * after execution capability is restored.
1703         */
1704        flush_work(&cpuset_hotplug_work);
1705
1706        mutex_lock(&cpuset_mutex);
1707        if (!is_cpuset_online(cs))
1708                goto out_unlock;
1709
1710        trialcs = alloc_trial_cpuset(cs);
1711        if (!trialcs) {
1712                retval = -ENOMEM;
1713                goto out_unlock;
1714        }
1715
1716        switch (cft->private) {
1717        case FILE_CPULIST:
1718                retval = update_cpumask(cs, trialcs, buf);
1719                break;
1720        case FILE_MEMLIST:
1721                retval = update_nodemask(cs, trialcs, buf);
1722                break;
1723        default:
1724                retval = -EINVAL;
1725                break;
1726        }
1727
1728        free_trial_cpuset(trialcs);
1729out_unlock:
1730        mutex_unlock(&cpuset_mutex);
1731        return retval;
1732}
1733
1734/*
1735 * These ascii lists should be read in a single call, by using a user
1736 * buffer large enough to hold the entire map.  If read in smaller
1737 * chunks, there is no guarantee of atomicity.  Since the display format
1738 * used, list of ranges of sequential numbers, is variable length,
1739 * and since these maps can change value dynamically, one could read
1740 * gibberish by doing partial reads while a list was changing.
1741 * A single large read to a buffer that crosses a page boundary is
1742 * ok, because the result being copied to user land is not recomputed
1743 * across a page fault.
1744 */
1745
1746static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1747{
1748        size_t count;
1749
1750        mutex_lock(&callback_mutex);
1751        count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1752        mutex_unlock(&callback_mutex);
1753
1754        return count;
1755}
1756
1757static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1758{
1759        size_t count;
1760
1761        mutex_lock(&callback_mutex);
1762        count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1763        mutex_unlock(&callback_mutex);
1764
1765        return count;
1766}
1767
1768static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
1769                                       struct cftype *cft,
1770                                       struct file *file,
1771                                       char __user *buf,
1772                                       size_t nbytes, loff_t *ppos)
1773{
1774        struct cpuset *cs = cgroup_cs(cgrp);
1775        cpuset_filetype_t type = cft->private;
1776        char *page;
1777        ssize_t retval = 0;
1778        char *s;
1779
1780        if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1781                return -ENOMEM;
1782
1783        s = page;
1784
1785        switch (type) {
1786        case FILE_CPULIST:
1787                s += cpuset_sprintf_cpulist(s, cs);
1788                break;
1789        case FILE_MEMLIST:
1790                s += cpuset_sprintf_memlist(s, cs);
1791                break;
1792        default:
1793                retval = -EINVAL;
1794                goto out;
1795        }
1796        *s++ = '\n';
1797
1798        retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1799out:
1800        free_page((unsigned long)page);
1801        return retval;
1802}
1803
1804static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
1805{
1806        struct cpuset *cs = cgroup_cs(cgrp);
1807        cpuset_filetype_t type = cft->private;
1808        switch (type) {
1809        case FILE_CPU_EXCLUSIVE:
1810                return is_cpu_exclusive(cs);
1811        case FILE_MEM_EXCLUSIVE:
1812                return is_mem_exclusive(cs);
1813        case FILE_MEM_HARDWALL:
1814                return is_mem_hardwall(cs);
1815        case FILE_SCHED_LOAD_BALANCE:
1816                return is_sched_load_balance(cs);
1817        case FILE_MEMORY_MIGRATE:
1818                return is_memory_migrate(cs);
1819        case FILE_MEMORY_PRESSURE_ENABLED:
1820                return cpuset_memory_pressure_enabled;
1821        case FILE_MEMORY_PRESSURE:
1822                return fmeter_getrate(&cs->fmeter);
1823        case FILE_SPREAD_PAGE:
1824                return is_spread_page(cs);
1825        case FILE_SPREAD_SLAB:
1826                return is_spread_slab(cs);
1827        default:
1828                BUG();
1829        }
1830
1831        /* Unreachable but makes gcc happy */
1832        return 0;
1833}
1834
1835static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
1836{
1837        struct cpuset *cs = cgroup_cs(cgrp);
1838        cpuset_filetype_t type = cft->private;
1839        switch (type) {
1840        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1841                return cs->relax_domain_level;
1842        default:
1843                BUG();
1844        }
1845
1846        /* Unrechable but makes gcc happy */
1847        return 0;
1848}
1849
1850
1851/*
1852 * for the common functions, 'private' gives the type of file
1853 */
1854
1855static struct cftype files[] = {
1856        {
1857                .name = "cpus",
1858                .read = cpuset_common_file_read,
1859                .write_string = cpuset_write_resmask,
1860                .max_write_len = (100U + 6 * NR_CPUS),
1861                .private = FILE_CPULIST,
1862        },
1863
1864        {
1865                .name = "mems",
1866                .read = cpuset_common_file_read,
1867                .write_string = cpuset_write_resmask,
1868                .max_write_len = (100U + 6 * MAX_NUMNODES),
1869                .private = FILE_MEMLIST,
1870        },
1871
1872        {
1873                .name = "cpu_exclusive",
1874                .read_u64 = cpuset_read_u64,
1875                .write_u64 = cpuset_write_u64,
1876                .private = FILE_CPU_EXCLUSIVE,
1877        },
1878
1879        {
1880                .name = "mem_exclusive",
1881                .read_u64 = cpuset_read_u64,
1882                .write_u64 = cpuset_write_u64,
1883                .private = FILE_MEM_EXCLUSIVE,
1884        },
1885
1886        {
1887                .name = "mem_hardwall",
1888                .read_u64 = cpuset_read_u64,
1889                .write_u64 = cpuset_write_u64,
1890                .private = FILE_MEM_HARDWALL,
1891        },
1892
1893        {
1894                .name = "sched_load_balance",
1895                .read_u64 = cpuset_read_u64,
1896                .write_u64 = cpuset_write_u64,
1897                .private = FILE_SCHED_LOAD_BALANCE,
1898        },
1899
1900        {
1901                .name = "sched_relax_domain_level",
1902                .read_s64 = cpuset_read_s64,
1903                .write_s64 = cpuset_write_s64,
1904                .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1905        },
1906
1907        {
1908                .name = "memory_migrate",
1909                .read_u64 = cpuset_read_u64,
1910                .write_u64 = cpuset_write_u64,
1911                .private = FILE_MEMORY_MIGRATE,
1912        },
1913
1914        {
1915                .name = "memory_pressure",
1916                .read_u64 = cpuset_read_u64,
1917                .write_u64 = cpuset_write_u64,
1918                .private = FILE_MEMORY_PRESSURE,
1919                .mode = S_IRUGO,
1920        },
1921
1922        {
1923                .name = "memory_spread_page",
1924                .read_u64 = cpuset_read_u64,
1925                .write_u64 = cpuset_write_u64,
1926                .private = FILE_SPREAD_PAGE,
1927        },
1928
1929        {
1930                .name = "memory_spread_slab",
1931                .read_u64 = cpuset_read_u64,
1932                .write_u64 = cpuset_write_u64,
1933                .private = FILE_SPREAD_SLAB,
1934        },
1935
1936        {
1937                .name = "memory_pressure_enabled",
1938                .flags = CFTYPE_ONLY_ON_ROOT,
1939                .read_u64 = cpuset_read_u64,
1940                .write_u64 = cpuset_write_u64,
1941                .private = FILE_MEMORY_PRESSURE_ENABLED,
1942        },
1943
1944        { }     /* terminate */
1945};
1946
1947/*
1948 *      cpuset_css_alloc - allocate a cpuset css
1949 *      cgrp:   control group that the new cpuset will be part of
1950 */
1951
1952static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
1953{
1954        struct cpuset *cs;
1955
1956        if (!cgrp->parent)
1957                return &top_cpuset.css;
1958
1959        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1960        if (!cs)
1961                return ERR_PTR(-ENOMEM);
1962        if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1963                kfree(cs);
1964                return ERR_PTR(-ENOMEM);
1965        }
1966
1967        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1968        cpumask_clear(cs->cpus_allowed);
1969        nodes_clear(cs->mems_allowed);
1970        fmeter_init(&cs->fmeter);
1971        cs->relax_domain_level = -1;
1972
1973        return &cs->css;
1974}
1975
1976static int cpuset_css_online(struct cgroup *cgrp)
1977{
1978        struct cpuset *cs = cgroup_cs(cgrp);
1979        struct cpuset *parent = parent_cs(cs);
1980        struct cpuset *tmp_cs;
1981        struct cgroup *pos_cg;
1982
1983        if (!parent)
1984                return 0;
1985
1986        mutex_lock(&cpuset_mutex);
1987
1988        set_bit(CS_ONLINE, &cs->flags);
1989        if (is_spread_page(parent))
1990                set_bit(CS_SPREAD_PAGE, &cs->flags);
1991        if (is_spread_slab(parent))
1992                set_bit(CS_SPREAD_SLAB, &cs->flags);
1993
1994        number_of_cpusets++;
1995
1996        if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
1997                goto out_unlock;
1998
1999        /*
2000         * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2001         * set.  This flag handling is implemented in cgroup core for
2002         * histrical reasons - the flag may be specified during mount.
2003         *
2004         * Currently, if any sibling cpusets have exclusive cpus or mem, we
2005         * refuse to clone the configuration - thereby refusing the task to
2006         * be entered, and as a result refusing the sys_unshare() or
2007         * clone() which initiated it.  If this becomes a problem for some
2008         * users who wish to allow that scenario, then this could be
2009         * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2010         * (and likewise for mems) to the new cgroup.
2011         */
2012        rcu_read_lock();
2013        cpuset_for_each_child(tmp_cs, pos_cg, parent) {
2014                if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2015                        rcu_read_unlock();
2016                        goto out_unlock;
2017                }
2018        }
2019        rcu_read_unlock();
2020
2021        mutex_lock(&callback_mutex);
2022        cs->mems_allowed = parent->mems_allowed;
2023        cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2024        mutex_unlock(&callback_mutex);
2025out_unlock:
2026        mutex_unlock(&cpuset_mutex);
2027        return 0;
2028}
2029
2030static void cpuset_css_offline(struct cgroup *cgrp)
2031{
2032        struct cpuset *cs = cgroup_cs(cgrp);
2033
2034        mutex_lock(&cpuset_mutex);
2035
2036        if (is_sched_load_balance(cs))
2037                update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2038
2039        number_of_cpusets--;
2040        clear_bit(CS_ONLINE, &cs->flags);
2041
2042        mutex_unlock(&cpuset_mutex);
2043}
2044
2045/*
2046 * If the cpuset being removed has its flag 'sched_load_balance'
2047 * enabled, then simulate turning sched_load_balance off, which
2048 * will call rebuild_sched_domains_locked().
2049 */
2050
2051static void cpuset_css_free(struct cgroup *cgrp)
2052{
2053        struct cpuset *cs = cgroup_cs(cgrp);
2054
2055        free_cpumask_var(cs->cpus_allowed);
2056        kfree(cs);
2057}
2058
2059struct cgroup_subsys cpuset_subsys = {
2060        .name = "cpuset",
2061        .css_alloc = cpuset_css_alloc,
2062        .css_online = cpuset_css_online,
2063        .css_offline = cpuset_css_offline,
2064        .css_free = cpuset_css_free,
2065        .can_attach = cpuset_can_attach,
2066        .cancel_attach = cpuset_cancel_attach,
2067        .attach = cpuset_attach,
2068        .subsys_id = cpuset_subsys_id,
2069        .base_cftypes = files,
2070        .early_init = 1,
2071};
2072
2073/**
2074 * cpuset_init - initialize cpusets at system boot
2075 *
2076 * Description: Initialize top_cpuset and the cpuset internal file system,
2077 **/
2078
2079int __init cpuset_init(void)
2080{
2081        int err = 0;
2082
2083        if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
2084                BUG();
2085
2086        cpumask_setall(top_cpuset.cpus_allowed);
2087        nodes_setall(top_cpuset.mems_allowed);
2088
2089        fmeter_init(&top_cpuset.fmeter);
2090        set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2091        top_cpuset.relax_domain_level = -1;
2092
2093        err = register_filesystem(&cpuset_fs_type);
2094        if (err < 0)
2095                return err;
2096
2097        if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
2098                BUG();
2099
2100        number_of_cpusets = 1;
2101        return 0;
2102}
2103
2104/*
2105 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2106 * or memory nodes, we need to walk over the cpuset hierarchy,
2107 * removing that CPU or node from all cpusets.  If this removes the
2108 * last CPU or node from a cpuset, then move the tasks in the empty
2109 * cpuset to its next-highest non-empty parent.
2110 */
2111static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2112{
2113        struct cpuset *parent;
2114
2115        /*
2116         * Find its next-highest non-empty parent, (top cpuset
2117         * has online cpus, so can't be empty).
2118         */
2119        parent = parent_cs(cs);
2120        while (cpumask_empty(parent->cpus_allowed) ||
2121                        nodes_empty(parent->mems_allowed))
2122                parent = parent_cs(parent);
2123
2124        if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2125                rcu_read_lock();
2126                printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n",
2127                       cgroup_name(cs->css.cgroup));
2128                rcu_read_unlock();
2129        }
2130}
2131
2132/**
2133 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2134 * @cs: cpuset in interest
2135 *
2136 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2137 * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
2138 * all its tasks are moved to the nearest ancestor with both resources.
2139 */
2140static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2141{
2142        static cpumask_t off_cpus;
2143        static nodemask_t off_mems;
2144        bool is_empty;
2145        bool sane = cgroup_sane_behavior(cs->css.cgroup);
2146
2147retry:
2148        wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2149
2150        mutex_lock(&cpuset_mutex);
2151
2152        /*
2153         * We have raced with task attaching. We wait until attaching
2154         * is finished, so we won't attach a task to an empty cpuset.
2155         */
2156        if (cs->attach_in_progress) {
2157                mutex_unlock(&cpuset_mutex);
2158                goto retry;
2159        }
2160
2161        cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2162        nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
2163
2164        mutex_lock(&callback_mutex);
2165        cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2166        mutex_unlock(&callback_mutex);
2167
2168        /*
2169         * If sane_behavior flag is set, we need to update tasks' cpumask
2170         * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
2171         * call update_tasks_cpumask() if the cpuset becomes empty, as
2172         * the tasks in it will be migrated to an ancestor.
2173         */
2174        if ((sane && cpumask_empty(cs->cpus_allowed)) ||
2175            (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
2176                update_tasks_cpumask(cs, NULL);
2177
2178        mutex_lock(&callback_mutex);
2179        nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2180        mutex_unlock(&callback_mutex);
2181
2182        /*
2183         * If sane_behavior flag is set, we need to update tasks' nodemask
2184         * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
2185         * call update_tasks_nodemask() if the cpuset becomes empty, as
2186         * the tasks in it will be migratd to an ancestor.
2187         */
2188        if ((sane && nodes_empty(cs->mems_allowed)) ||
2189            (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
2190                update_tasks_nodemask(cs, NULL);
2191
2192        is_empty = cpumask_empty(cs->cpus_allowed) ||
2193                nodes_empty(cs->mems_allowed);
2194
2195        mutex_unlock(&cpuset_mutex);
2196
2197        /*
2198         * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
2199         *
2200         * Otherwise move tasks to the nearest ancestor with execution
2201         * resources.  This is full cgroup operation which will
2202         * also call back into cpuset.  Should be done outside any lock.
2203         */
2204        if (!sane && is_empty)
2205                remove_tasks_in_empty_cpuset(cs);
2206}
2207
2208/**
2209 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2210 *
2211 * This function is called after either CPU or memory configuration has
2212 * changed and updates cpuset accordingly.  The top_cpuset is always
2213 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2214 * order to make cpusets transparent (of no affect) on systems that are
2215 * actively using CPU hotplug but making no active use of cpusets.
2216 *
2217 * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2218 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2219 * all descendants.
2220 *
2221 * Note that CPU offlining during suspend is ignored.  We don't modify
2222 * cpusets across suspend/resume cycles at all.
2223 */
2224static void cpuset_hotplug_workfn(struct work_struct *work)
2225{
2226        static cpumask_t new_cpus;
2227        static nodemask_t new_mems;
2228        bool cpus_updated, mems_updated;
2229
2230        mutex_lock(&cpuset_mutex);
2231
2232        /* fetch the available cpus/mems and find out which changed how */
2233        cpumask_copy(&new_cpus, cpu_active_mask);
2234        new_mems = node_states[N_MEMORY];
2235
2236        cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2237        mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
2238
2239        /* synchronize cpus_allowed to cpu_active_mask */
2240        if (cpus_updated) {
2241                mutex_lock(&callback_mutex);
2242                cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2243                mutex_unlock(&callback_mutex);
2244                /* we don't mess with cpumasks of tasks in top_cpuset */
2245        }
2246
2247        /* synchronize mems_allowed to N_MEMORY */
2248        if (mems_updated) {
2249                mutex_lock(&callback_mutex);
2250                top_cpuset.mems_allowed = new_mems;
2251                mutex_unlock(&callback_mutex);
2252                update_tasks_nodemask(&top_cpuset, NULL);
2253        }
2254
2255        mutex_unlock(&cpuset_mutex);
2256
2257        /* if cpus or mems changed, we need to propagate to descendants */
2258        if (cpus_updated || mems_updated) {
2259                struct cpuset *cs;
2260                struct cgroup *pos_cgrp;
2261
2262                rcu_read_lock();
2263                cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
2264                        if (!css_tryget(&cs->css))
2265                                continue;
2266                        rcu_read_unlock();
2267
2268                        cpuset_hotplug_update_tasks(cs);
2269
2270                        rcu_read_lock();
2271                        css_put(&cs->css);
2272                }
2273                rcu_read_unlock();
2274        }
2275
2276        /* rebuild sched domains if cpus_allowed has changed */
2277        if (cpus_updated)
2278                rebuild_sched_domains();
2279}
2280
2281void cpuset_update_active_cpus(bool cpu_online)
2282{
2283        /*
2284         * We're inside cpu hotplug critical region which usually nests
2285         * inside cgroup synchronization.  Bounce actual hotplug processing
2286         * to a work item to avoid reverse locking order.
2287         *
2288         * We still need to do partition_sched_domains() synchronously;
2289         * otherwise, the scheduler will get confused and put tasks to the
2290         * dead CPU.  Fall back to the default single domain.
2291         * cpuset_hotplug_workfn() will rebuild it as necessary.
2292         */
2293        partition_sched_domains(1, NULL, NULL);
2294        schedule_work(&cpuset_hotplug_work);
2295}
2296
2297/*
2298 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2299 * Call this routine anytime after node_states[N_MEMORY] changes.
2300 * See cpuset_update_active_cpus() for CPU hotplug handling.
2301 */
2302static int cpuset_track_online_nodes(struct notifier_block *self,
2303                                unsigned long action, void *arg)
2304{
2305        schedule_work(&cpuset_hotplug_work);
2306        return NOTIFY_OK;
2307}
2308
2309static struct notifier_block cpuset_track_online_nodes_nb = {
2310        .notifier_call = cpuset_track_online_nodes,
2311        .priority = 10,         /* ??! */
2312};
2313
2314/**
2315 * cpuset_init_smp - initialize cpus_allowed
2316 *
2317 * Description: Finish top cpuset after cpu, node maps are initialized
2318 */
2319void __init cpuset_init_smp(void)
2320{
2321        cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2322        top_cpuset.mems_allowed = node_states[N_MEMORY];
2323        top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2324
2325        register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2326}
2327
2328/**
2329 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2330 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2331 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2332 *
2333 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2334 * attached to the specified @tsk.  Guaranteed to return some non-empty
2335 * subset of cpu_online_mask, even if this means going outside the
2336 * tasks cpuset.
2337 **/
2338
2339void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2340{
2341        struct cpuset *cpus_cs;
2342
2343        mutex_lock(&callback_mutex);
2344        task_lock(tsk);
2345        cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2346        guarantee_online_cpus(cpus_cs, pmask);
2347        task_unlock(tsk);
2348        mutex_unlock(&callback_mutex);
2349}
2350
2351void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2352{
2353        const struct cpuset *cpus_cs;
2354
2355        rcu_read_lock();
2356        cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2357        do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
2358        rcu_read_unlock();
2359
2360        /*
2361         * We own tsk->cpus_allowed, nobody can change it under us.
2362         *
2363         * But we used cs && cs->cpus_allowed lockless and thus can
2364         * race with cgroup_attach_task() or update_cpumask() and get
2365         * the wrong tsk->cpus_allowed. However, both cases imply the
2366         * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2367         * which takes task_rq_lock().
2368         *
2369         * If we are called after it dropped the lock we must see all
2370         * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2371         * set any mask even if it is not right from task_cs() pov,
2372         * the pending set_cpus_allowed_ptr() will fix things.
2373         *
2374         * select_fallback_rq() will fix things ups and set cpu_possible_mask
2375         * if required.
2376         */
2377}
2378
2379void cpuset_init_current_mems_allowed(void)
2380{
2381        nodes_setall(current->mems_allowed);
2382}
2383
2384/**
2385 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2386 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2387 *
2388 * Description: Returns the nodemask_t mems_allowed of the cpuset
2389 * attached to the specified @tsk.  Guaranteed to return some non-empty
2390 * subset of node_states[N_MEMORY], even if this means going outside the
2391 * tasks cpuset.
2392 **/
2393
2394nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2395{
2396        struct cpuset *mems_cs;
2397        nodemask_t mask;
2398
2399        mutex_lock(&callback_mutex);
2400        task_lock(tsk);
2401        mems_cs = effective_nodemask_cpuset(task_cs(tsk));
2402        guarantee_online_mems(mems_cs, &mask);
2403        task_unlock(tsk);
2404        mutex_unlock(&callback_mutex);
2405
2406        return mask;
2407}
2408
2409/**
2410 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2411 * @nodemask: the nodemask to be checked
2412 *
2413 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2414 */
2415int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2416{
2417        return nodes_intersects(*nodemask, current->mems_allowed);
2418}
2419
2420/*
2421 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2422 * mem_hardwall ancestor to the specified cpuset.  Call holding
2423 * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2424 * (an unusual configuration), then returns the root cpuset.
2425 */
2426static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2427{
2428        while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2429                cs = parent_cs(cs);
2430        return cs;
2431}
2432
2433/**
2434 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2435 * @node: is this an allowed node?
2436 * @gfp_mask: memory allocation flags
2437 *
2438 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2439 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2440 * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2441 * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2442 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2443 * flag, yes.
2444 * Otherwise, no.
2445 *
2446 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2447 * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2448 * might sleep, and might allow a node from an enclosing cpuset.
2449 *
2450 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2451 * cpusets, and never sleeps.
2452 *
2453 * The __GFP_THISNODE placement logic is really handled elsewhere,
2454 * by forcibly using a zonelist starting at a specified node, and by
2455 * (in get_page_from_freelist()) refusing to consider the zones for
2456 * any node on the zonelist except the first.  By the time any such
2457 * calls get to this routine, we should just shut up and say 'yes'.
2458 *
2459 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2460 * and do not allow allocations outside the current tasks cpuset
2461 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2462 * GFP_KERNEL allocations are not so marked, so can escape to the
2463 * nearest enclosing hardwalled ancestor cpuset.
2464 *
2465 * Scanning up parent cpusets requires callback_mutex.  The
2466 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2467 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2468 * current tasks mems_allowed came up empty on the first pass over
2469 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2470 * cpuset are short of memory, might require taking the callback_mutex
2471 * mutex.
2472 *
2473 * The first call here from mm/page_alloc:get_page_from_freelist()
2474 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2475 * so no allocation on a node outside the cpuset is allowed (unless
2476 * in interrupt, of course).
2477 *
2478 * The second pass through get_page_from_freelist() doesn't even call
2479 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2480 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2481 * in alloc_flags.  That logic and the checks below have the combined
2482 * affect that:
2483 *      in_interrupt - any node ok (current task context irrelevant)
2484 *      GFP_ATOMIC   - any node ok
2485 *      TIF_MEMDIE   - any node ok
2486 *      GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2487 *      GFP_USER     - only nodes in current tasks mems allowed ok.
2488 *
2489 * Rule:
2490 *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2491 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2492 *    the code that might scan up ancestor cpusets and sleep.
2493 */
2494int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2495{
2496        const struct cpuset *cs;        /* current cpuset ancestors */
2497        int allowed;                    /* is allocation in zone z allowed? */
2498
2499        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2500                return 1;
2501        might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2502        if (node_isset(node, current->mems_allowed))
2503                return 1;
2504        /*
2505         * Allow tasks that have access to memory reserves because they have
2506         * been OOM killed to get memory anywhere.
2507         */
2508        if (unlikely(test_thread_flag(TIF_MEMDIE)))
2509                return 1;
2510        if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
2511                return 0;
2512
2513        if (current->flags & PF_EXITING) /* Let dying task have memory */
2514                return 1;
2515
2516        /* Not hardwall and node outside mems_allowed: scan up cpusets */
2517        mutex_lock(&callback_mutex);
2518
2519        task_lock(current);
2520        cs = nearest_hardwall_ancestor(task_cs(current));
2521        task_unlock(current);
2522
2523        allowed = node_isset(node, cs->mems_allowed);
2524        mutex_unlock(&callback_mutex);
2525        return allowed;
2526}
2527
2528/*
2529 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2530 * @node: is this an allowed node?
2531 * @gfp_mask: memory allocation flags
2532 *
2533 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2534 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2535 * yes.  If the task has been OOM killed and has access to memory reserves as
2536 * specified by the TIF_MEMDIE flag, yes.
2537 * Otherwise, no.
2538 *
2539 * The __GFP_THISNODE placement logic is really handled elsewhere,
2540 * by forcibly using a zonelist starting at a specified node, and by
2541 * (in get_page_from_freelist()) refusing to consider the zones for
2542 * any node on the zonelist except the first.  By the time any such
2543 * calls get to this routine, we should just shut up and say 'yes'.
2544 *
2545 * Unlike the cpuset_node_allowed_softwall() variant, above,
2546 * this variant requires that the node be in the current task's
2547 * mems_allowed or that we're in interrupt.  It does not scan up the
2548 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2549 * It never sleeps.
2550 */
2551int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2552{
2553        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2554                return 1;
2555        if (node_isset(node, current->mems_allowed))
2556                return 1;
2557        /*
2558         * Allow tasks that have access to memory reserves because they have
2559         * been OOM killed to get memory anywhere.
2560         */
2561        if (unlikely(test_thread_flag(TIF_MEMDIE)))
2562                return 1;
2563        return 0;
2564}
2565
2566/**
2567 * cpuset_mem_spread_node() - On which node to begin search for a file page
2568 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2569 *
2570 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2571 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2572 * and if the memory allocation used cpuset_mem_spread_node()
2573 * to determine on which node to start looking, as it will for
2574 * certain page cache or slab cache pages such as used for file
2575 * system buffers and inode caches, then instead of starting on the
2576 * local node to look for a free page, rather spread the starting
2577 * node around the tasks mems_allowed nodes.
2578 *
2579 * We don't have to worry about the returned node being offline
2580 * because "it can't happen", and even if it did, it would be ok.
2581 *
2582 * The routines calling guarantee_online_mems() are careful to
2583 * only set nodes in task->mems_allowed that are online.  So it
2584 * should not be possible for the following code to return an
2585 * offline node.  But if it did, that would be ok, as this routine
2586 * is not returning the node where the allocation must be, only
2587 * the node where the search should start.  The zonelist passed to
2588 * __alloc_pages() will include all nodes.  If the slab allocator
2589 * is passed an offline node, it will fall back to the local node.
2590 * See kmem_cache_alloc_node().
2591 */
2592
2593static int cpuset_spread_node(int *rotor)
2594{
2595        int node;
2596
2597        node = next_node(*rotor, current->mems_allowed);
2598        if (node == MAX_NUMNODES)
2599                node = first_node(current->mems_allowed);
2600        *rotor = node;
2601        return node;
2602}
2603
2604int cpuset_mem_spread_node(void)
2605{
2606        if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2607                current->cpuset_mem_spread_rotor =
2608                        node_random(&current->mems_allowed);
2609
2610        return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2611}
2612
2613int cpuset_slab_spread_node(void)
2614{
2615        if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2616                current->cpuset_slab_spread_rotor =
2617                        node_random(&current->mems_allowed);
2618
2619        return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2620}
2621
2622EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2623
2624/**
2625 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2626 * @tsk1: pointer to task_struct of some task.
2627 * @tsk2: pointer to task_struct of some other task.
2628 *
2629 * Description: Return true if @tsk1's mems_allowed intersects the
2630 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2631 * one of the task's memory usage might impact the memory available
2632 * to the other.
2633 **/
2634
2635int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2636                                   const struct task_struct *tsk2)
2637{
2638        return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2639}
2640
2641#define CPUSET_NODELIST_LEN     (256)
2642
2643/**
2644 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2645 * @task: pointer to task_struct of some task.
2646 *
2647 * Description: Prints @task's name, cpuset name, and cached copy of its
2648 * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2649 * dereferencing task_cs(task).
2650 */
2651void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2652{
2653         /* Statically allocated to prevent using excess stack. */
2654        static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2655        static DEFINE_SPINLOCK(cpuset_buffer_lock);
2656
2657        struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
2658
2659        rcu_read_lock();
2660        spin_lock(&cpuset_buffer_lock);
2661
2662        nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2663                           tsk->mems_allowed);
2664        printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2665               tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
2666
2667        spin_unlock(&cpuset_buffer_lock);
2668        rcu_read_unlock();
2669}
2670
2671/*
2672 * Collection of memory_pressure is suppressed unless
2673 * this flag is enabled by writing "1" to the special
2674 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2675 */
2676
2677int cpuset_memory_pressure_enabled __read_mostly;
2678
2679/**
2680 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2681 *
2682 * Keep a running average of the rate of synchronous (direct)
2683 * page reclaim efforts initiated by tasks in each cpuset.
2684 *
2685 * This represents the rate at which some task in the cpuset
2686 * ran low on memory on all nodes it was allowed to use, and
2687 * had to enter the kernels page reclaim code in an effort to
2688 * create more free memory by tossing clean pages or swapping
2689 * or writing dirty pages.
2690 *
2691 * Display to user space in the per-cpuset read-only file
2692 * "memory_pressure".  Value displayed is an integer
2693 * representing the recent rate of entry into the synchronous
2694 * (direct) page reclaim by any task attached to the cpuset.
2695 **/
2696
2697void __cpuset_memory_pressure_bump(void)
2698{
2699        task_lock(current);
2700        fmeter_markevent(&task_cs(current)->fmeter);
2701        task_unlock(current);
2702}
2703
2704#ifdef CONFIG_PROC_PID_CPUSET
2705/*
2706 * proc_cpuset_show()
2707 *  - Print tasks cpuset path into seq_file.
2708 *  - Used for /proc/<pid>/cpuset.
2709 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2710 *    doesn't really matter if tsk->cpuset changes after we read it,
2711 *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2712 *    anyway.
2713 */
2714int proc_cpuset_show(struct seq_file *m, void *unused_v)
2715{
2716        struct pid *pid;
2717        struct task_struct *tsk;
2718        char *buf;
2719        struct cgroup_subsys_state *css;
2720        int retval;
2721
2722        retval = -ENOMEM;
2723        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2724        if (!buf)
2725                goto out;
2726
2727        retval = -ESRCH;
2728        pid = m->private;
2729        tsk = get_pid_task(pid, PIDTYPE_PID);
2730        if (!tsk)
2731                goto out_free;
2732
2733        rcu_read_lock();
2734        css = task_subsys_state(tsk, cpuset_subsys_id);
2735        retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2736        rcu_read_unlock();
2737        if (retval < 0)
2738                goto out_put_task;
2739        seq_puts(m, buf);
2740        seq_putc(m, '\n');
2741out_put_task:
2742        put_task_struct(tsk);
2743out_free:
2744        kfree(buf);
2745out:
2746        return retval;
2747}
2748#endif /* CONFIG_PROC_PID_CPUSET */
2749
2750/* Display task mems_allowed in /proc/<pid>/status file. */
2751void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2752{
2753        seq_printf(m, "Mems_allowed:\t");
2754        seq_nodemask(m, &task->mems_allowed);
2755        seq_printf(m, "\n");
2756        seq_printf(m, "Mems_allowed_list:\t");
2757        seq_nodemask_list(m, &task->mems_allowed);
2758        seq_printf(m, "\n");
2759}
2760
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.