linux/kernel/cpuset.c
<<
>>
Prefs
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/list.h>
  37#include <linux/mempolicy.h>
  38#include <linux/mm.h>
  39#include <linux/memory.h>
  40#include <linux/export.h>
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/pagemap.h>
  44#include <linux/proc_fs.h>
  45#include <linux/rcupdate.h>
  46#include <linux/sched.h>
  47#include <linux/seq_file.h>
  48#include <linux/security.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/stat.h>
  52#include <linux/string.h>
  53#include <linux/time.h>
  54#include <linux/backing-dev.h>
  55#include <linux/sort.h>
  56
  57#include <asm/uaccess.h>
  58#include <linux/atomic.h>
  59#include <linux/mutex.h>
  60#include <linux/workqueue.h>
  61#include <linux/cgroup.h>
  62
  63/*
  64 * Workqueue for cpuset related tasks.
  65 *
  66 * Using kevent workqueue may cause deadlock when memory_migrate
  67 * is set. So we create a separate workqueue thread for cpuset.
  68 */
  69static struct workqueue_struct *cpuset_wq;
  70
  71/*
  72 * Tracks how many cpusets are currently defined in system.
  73 * When there is only one cpuset (the root cpuset) we can
  74 * short circuit some hooks.
  75 */
  76int number_of_cpusets __read_mostly;
  77
  78/* Forward declare cgroup structures */
  79struct cgroup_subsys cpuset_subsys;
  80struct cpuset;
  81
  82/* See "Frequency meter" comments, below. */
  83
  84struct fmeter {
  85        int cnt;                /* unprocessed events count */
  86        int val;                /* most recent output value */
  87        time_t time;            /* clock (secs) when val computed */
  88        spinlock_t lock;        /* guards read or write of above */
  89};
  90
  91struct cpuset {
  92        struct cgroup_subsys_state css;
  93
  94        unsigned long flags;            /* "unsigned long" so bitops work */
  95        cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
  96        nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
  97
  98        struct cpuset *parent;          /* my parent */
  99
 100        struct fmeter fmeter;           /* memory_pressure filter */
 101
 102        /* partition number for rebuild_sched_domains() */
 103        int pn;
 104
 105        /* for custom sched domain */
 106        int relax_domain_level;
 107
 108        /* used for walking a cpuset hierarchy */
 109        struct list_head stack_list;
 110};
 111
 112/* Retrieve the cpuset for a cgroup */
 113static inline struct cpuset *cgroup_cs(struct cgroup *cont)
 114{
 115        return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
 116                            struct cpuset, css);
 117}
 118
 119/* Retrieve the cpuset for a task */
 120static inline struct cpuset *task_cs(struct task_struct *task)
 121{
 122        return container_of(task_subsys_state(task, cpuset_subsys_id),
 123                            struct cpuset, css);
 124}
 125
 126#ifdef CONFIG_NUMA
 127static inline bool task_has_mempolicy(struct task_struct *task)
 128{
 129        return task->mempolicy;
 130}
 131#else
 132static inline bool task_has_mempolicy(struct task_struct *task)
 133{
 134        return false;
 135}
 136#endif
 137
 138
 139/* bits in struct cpuset flags field */
 140typedef enum {
 141        CS_CPU_EXCLUSIVE,
 142        CS_MEM_EXCLUSIVE,
 143        CS_MEM_HARDWALL,
 144        CS_MEMORY_MIGRATE,
 145        CS_SCHED_LOAD_BALANCE,
 146        CS_SPREAD_PAGE,
 147        CS_SPREAD_SLAB,
 148} cpuset_flagbits_t;
 149
 150/* convenient tests for these bits */
 151static inline int is_cpu_exclusive(const struct cpuset *cs)
 152{
 153        return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 154}
 155
 156static inline int is_mem_exclusive(const struct cpuset *cs)
 157{
 158        return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 159}
 160
 161static inline int is_mem_hardwall(const struct cpuset *cs)
 162{
 163        return test_bit(CS_MEM_HARDWALL, &cs->flags);
 164}
 165
 166static inline int is_sched_load_balance(const struct cpuset *cs)
 167{
 168        return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 169}
 170
 171static inline int is_memory_migrate(const struct cpuset *cs)
 172{
 173        return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 174}
 175
 176static inline int is_spread_page(const struct cpuset *cs)
 177{
 178        return test_bit(CS_SPREAD_PAGE, &cs->flags);
 179}
 180
 181static inline int is_spread_slab(const struct cpuset *cs)
 182{
 183        return test_bit(CS_SPREAD_SLAB, &cs->flags);
 184}
 185
 186static struct cpuset top_cpuset = {
 187        .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
 188};
 189
 190/*
 191 * There are two global mutexes guarding cpuset structures.  The first
 192 * is the main control groups cgroup_mutex, accessed via
 193 * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
 194 * callback_mutex, below. They can nest.  It is ok to first take
 195 * cgroup_mutex, then nest callback_mutex.  We also require taking
 196 * task_lock() when dereferencing a task's cpuset pointer.  See "The
 197 * task_lock() exception", at the end of this comment.
 198 *
 199 * A task must hold both mutexes to modify cpusets.  If a task
 200 * holds cgroup_mutex, then it blocks others wanting that mutex,
 201 * ensuring that it is the only task able to also acquire callback_mutex
 202 * and be able to modify cpusets.  It can perform various checks on
 203 * the cpuset structure first, knowing nothing will change.  It can
 204 * also allocate memory while just holding cgroup_mutex.  While it is
 205 * performing these checks, various callback routines can briefly
 206 * acquire callback_mutex to query cpusets.  Once it is ready to make
 207 * the changes, it takes callback_mutex, blocking everyone else.
 208 *
 209 * Calls to the kernel memory allocator can not be made while holding
 210 * callback_mutex, as that would risk double tripping on callback_mutex
 211 * from one of the callbacks into the cpuset code from within
 212 * __alloc_pages().
 213 *
 214 * If a task is only holding callback_mutex, then it has read-only
 215 * access to cpusets.
 216 *
 217 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 218 * by other task, we use alloc_lock in the task_struct fields to protect
 219 * them.
 220 *
 221 * The cpuset_common_file_read() handlers only hold callback_mutex across
 222 * small pieces of code, such as when reading out possibly multi-word
 223 * cpumasks and nodemasks.
 224 *
 225 * Accessing a task's cpuset should be done in accordance with the
 226 * guidelines for accessing subsystem state in kernel/cgroup.c
 227 */
 228
 229static DEFINE_MUTEX(callback_mutex);
 230
 231/*
 232 * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
 233 * buffers.  They are statically allocated to prevent using excess stack
 234 * when calling cpuset_print_task_mems_allowed().
 235 */
 236#define CPUSET_NAME_LEN         (128)
 237#define CPUSET_NODELIST_LEN     (256)
 238static char cpuset_name[CPUSET_NAME_LEN];
 239static char cpuset_nodelist[CPUSET_NODELIST_LEN];
 240static DEFINE_SPINLOCK(cpuset_buffer_lock);
 241
 242/*
 243 * This is ugly, but preserves the userspace API for existing cpuset
 244 * users. If someone tries to mount the "cpuset" filesystem, we
 245 * silently switch it to mount "cgroup" instead
 246 */
 247static struct dentry *cpuset_mount(struct file_system_type *fs_type,
 248                         int flags, const char *unused_dev_name, void *data)
 249{
 250        struct file_system_type *cgroup_fs = get_fs_type("cgroup");
 251        struct dentry *ret = ERR_PTR(-ENODEV);
 252        if (cgroup_fs) {
 253                char mountopts[] =
 254                        "cpuset,noprefix,"
 255                        "release_agent=/sbin/cpuset_release_agent";
 256                ret = cgroup_fs->mount(cgroup_fs, flags,
 257                                           unused_dev_name, mountopts);
 258                put_filesystem(cgroup_fs);
 259        }
 260        return ret;
 261}
 262
 263static struct file_system_type cpuset_fs_type = {
 264        .name = "cpuset",
 265        .mount = cpuset_mount,
 266};
 267
 268/*
 269 * Return in pmask the portion of a cpusets's cpus_allowed that
 270 * are online.  If none are online, walk up the cpuset hierarchy
 271 * until we find one that does have some online cpus.  If we get
 272 * all the way to the top and still haven't found any online cpus,
 273 * return cpu_online_mask.  Or if passed a NULL cs from an exit'ing
 274 * task, return cpu_online_mask.
 275 *
 276 * One way or another, we guarantee to return some non-empty subset
 277 * of cpu_online_mask.
 278 *
 279 * Call with callback_mutex held.
 280 */
 281
 282static void guarantee_online_cpus(const struct cpuset *cs,
 283                                  struct cpumask *pmask)
 284{
 285        while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 286                cs = cs->parent;
 287        if (cs)
 288                cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 289        else
 290                cpumask_copy(pmask, cpu_online_mask);
 291        BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
 292}
 293
 294/*
 295 * Return in *pmask the portion of a cpusets's mems_allowed that
 296 * are online, with memory.  If none are online with memory, walk
 297 * up the cpuset hierarchy until we find one that does have some
 298 * online mems.  If we get all the way to the top and still haven't
 299 * found any online mems, return node_states[N_HIGH_MEMORY].
 300 *
 301 * One way or another, we guarantee to return some non-empty subset
 302 * of node_states[N_HIGH_MEMORY].
 303 *
 304 * Call with callback_mutex held.
 305 */
 306
 307static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 308{
 309        while (cs && !nodes_intersects(cs->mems_allowed,
 310                                        node_states[N_HIGH_MEMORY]))
 311                cs = cs->parent;
 312        if (cs)
 313                nodes_and(*pmask, cs->mems_allowed,
 314                                        node_states[N_HIGH_MEMORY]);
 315        else
 316                *pmask = node_states[N_HIGH_MEMORY];
 317        BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
 318}
 319
 320/*
 321 * update task's spread flag if cpuset's page/slab spread flag is set
 322 *
 323 * Called with callback_mutex/cgroup_mutex held
 324 */
 325static void cpuset_update_task_spread_flag(struct cpuset *cs,
 326                                        struct task_struct *tsk)
 327{
 328        if (is_spread_page(cs))
 329                tsk->flags |= PF_SPREAD_PAGE;
 330        else
 331                tsk->flags &= ~PF_SPREAD_PAGE;
 332        if (is_spread_slab(cs))
 333                tsk->flags |= PF_SPREAD_SLAB;
 334        else
 335                tsk->flags &= ~PF_SPREAD_SLAB;
 336}
 337
 338/*
 339 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 340 *
 341 * One cpuset is a subset of another if all its allowed CPUs and
 342 * Memory Nodes are a subset of the other, and its exclusive flags
 343 * are only set if the other's are set.  Call holding cgroup_mutex.
 344 */
 345
 346static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 347{
 348        return  cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 349                nodes_subset(p->mems_allowed, q->mems_allowed) &&
 350                is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 351                is_mem_exclusive(p) <= is_mem_exclusive(q);
 352}
 353
 354/**
 355 * alloc_trial_cpuset - allocate a trial cpuset
 356 * @cs: the cpuset that the trial cpuset duplicates
 357 */
 358static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
 359{
 360        struct cpuset *trial;
 361
 362        trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 363        if (!trial)
 364                return NULL;
 365
 366        if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
 367                kfree(trial);
 368                return NULL;
 369        }
 370        cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 371
 372        return trial;
 373}
 374
 375/**
 376 * free_trial_cpuset - free the trial cpuset
 377 * @trial: the trial cpuset to be freed
 378 */
 379static void free_trial_cpuset(struct cpuset *trial)
 380{
 381        free_cpumask_var(trial->cpus_allowed);
 382        kfree(trial);
 383}
 384
 385/*
 386 * validate_change() - Used to validate that any proposed cpuset change
 387 *                     follows the structural rules for cpusets.
 388 *
 389 * If we replaced the flag and mask values of the current cpuset
 390 * (cur) with those values in the trial cpuset (trial), would
 391 * our various subset and exclusive rules still be valid?  Presumes
 392 * cgroup_mutex held.
 393 *
 394 * 'cur' is the address of an actual, in-use cpuset.  Operations
 395 * such as list traversal that depend on the actual address of the
 396 * cpuset in the list must use cur below, not trial.
 397 *
 398 * 'trial' is the address of bulk structure copy of cur, with
 399 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 400 * or flags changed to new, trial values.
 401 *
 402 * Return 0 if valid, -errno if not.
 403 */
 404
 405static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 406{
 407        struct cgroup *cont;
 408        struct cpuset *c, *par;
 409
 410        /* Each of our child cpusets must be a subset of us */
 411        list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
 412                if (!is_cpuset_subset(cgroup_cs(cont), trial))
 413                        return -EBUSY;
 414        }
 415
 416        /* Remaining checks don't apply to root cpuset */
 417        if (cur == &top_cpuset)
 418                return 0;
 419
 420        par = cur->parent;
 421
 422        /* We must be a subset of our parent cpuset */
 423        if (!is_cpuset_subset(trial, par))
 424                return -EACCES;
 425
 426        /*
 427         * If either I or some sibling (!= me) is exclusive, we can't
 428         * overlap
 429         */
 430        list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
 431                c = cgroup_cs(cont);
 432                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 433                    c != cur &&
 434                    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 435                        return -EINVAL;
 436                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 437                    c != cur &&
 438                    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 439                        return -EINVAL;
 440        }
 441
 442        /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
 443        if (cgroup_task_count(cur->css.cgroup)) {
 444                if (cpumask_empty(trial->cpus_allowed) ||
 445                    nodes_empty(trial->mems_allowed)) {
 446                        return -ENOSPC;
 447                }
 448        }
 449
 450        return 0;
 451}
 452
 453#ifdef CONFIG_SMP
 454/*
 455 * Helper routine for generate_sched_domains().
 456 * Do cpusets a, b have overlapping cpus_allowed masks?
 457 */
 458static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 459{
 460        return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 461}
 462
 463static void
 464update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 465{
 466        if (dattr->relax_domain_level < c->relax_domain_level)
 467                dattr->relax_domain_level = c->relax_domain_level;
 468        return;
 469}
 470
 471static void
 472update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
 473{
 474        LIST_HEAD(q);
 475
 476        list_add(&c->stack_list, &q);
 477        while (!list_empty(&q)) {
 478                struct cpuset *cp;
 479                struct cgroup *cont;
 480                struct cpuset *child;
 481
 482                cp = list_first_entry(&q, struct cpuset, stack_list);
 483                list_del(q.next);
 484
 485                if (cpumask_empty(cp->cpus_allowed))
 486                        continue;
 487
 488                if (is_sched_load_balance(cp))
 489                        update_domain_attr(dattr, cp);
 490
 491                list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 492                        child = cgroup_cs(cont);
 493                        list_add_tail(&child->stack_list, &q);
 494                }
 495        }
 496}
 497
 498/*
 499 * generate_sched_domains()
 500 *
 501 * This function builds a partial partition of the systems CPUs
 502 * A 'partial partition' is a set of non-overlapping subsets whose
 503 * union is a subset of that set.
 504 * The output of this function needs to be passed to kernel/sched.c
 505 * partition_sched_domains() routine, which will rebuild the scheduler's
 506 * load balancing domains (sched domains) as specified by that partial
 507 * partition.
 508 *
 509 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
 510 * for a background explanation of this.
 511 *
 512 * Does not return errors, on the theory that the callers of this
 513 * routine would rather not worry about failures to rebuild sched
 514 * domains when operating in the severe memory shortage situations
 515 * that could cause allocation failures below.
 516 *
 517 * Must be called with cgroup_lock held.
 518 *
 519 * The three key local variables below are:
 520 *    q  - a linked-list queue of cpuset pointers, used to implement a
 521 *         top-down scan of all cpusets.  This scan loads a pointer
 522 *         to each cpuset marked is_sched_load_balance into the
 523 *         array 'csa'.  For our purposes, rebuilding the schedulers
 524 *         sched domains, we can ignore !is_sched_load_balance cpusets.
 525 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 526 *         that need to be load balanced, for convenient iterative
 527 *         access by the subsequent code that finds the best partition,
 528 *         i.e the set of domains (subsets) of CPUs such that the
 529 *         cpus_allowed of every cpuset marked is_sched_load_balance
 530 *         is a subset of one of these domains, while there are as
 531 *         many such domains as possible, each as small as possible.
 532 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 533 *         the kernel/sched.c routine partition_sched_domains() in a
 534 *         convenient format, that can be easily compared to the prior
 535 *         value to determine what partition elements (sched domains)
 536 *         were changed (added or removed.)
 537 *
 538 * Finding the best partition (set of domains):
 539 *      The triple nested loops below over i, j, k scan over the
 540 *      load balanced cpusets (using the array of cpuset pointers in
 541 *      csa[]) looking for pairs of cpusets that have overlapping
 542 *      cpus_allowed, but which don't have the same 'pn' partition
 543 *      number and gives them in the same partition number.  It keeps
 544 *      looping on the 'restart' label until it can no longer find
 545 *      any such pairs.
 546 *
 547 *      The union of the cpus_allowed masks from the set of
 548 *      all cpusets having the same 'pn' value then form the one
 549 *      element of the partition (one sched domain) to be passed to
 550 *      partition_sched_domains().
 551 */
 552static int generate_sched_domains(cpumask_var_t **domains,
 553                        struct sched_domain_attr **attributes)
 554{
 555        LIST_HEAD(q);           /* queue of cpusets to be scanned */
 556        struct cpuset *cp;      /* scans q */
 557        struct cpuset **csa;    /* array of all cpuset ptrs */
 558        int csn;                /* how many cpuset ptrs in csa so far */
 559        int i, j, k;            /* indices for partition finding loops */
 560        cpumask_var_t *doms;    /* resulting partition; i.e. sched domains */
 561        struct sched_domain_attr *dattr;  /* attributes for custom domains */
 562        int ndoms = 0;          /* number of sched domains in result */
 563        int nslot;              /* next empty doms[] struct cpumask slot */
 564
 565        doms = NULL;
 566        dattr = NULL;
 567        csa = NULL;
 568
 569        /* Special case for the 99% of systems with one, full, sched domain */
 570        if (is_sched_load_balance(&top_cpuset)) {
 571                ndoms = 1;
 572                doms = alloc_sched_domains(ndoms);
 573                if (!doms)
 574                        goto done;
 575
 576                dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 577                if (dattr) {
 578                        *dattr = SD_ATTR_INIT;
 579                        update_domain_attr_tree(dattr, &top_cpuset);
 580                }
 581                cpumask_copy(doms[0], top_cpuset.cpus_allowed);
 582
 583                goto done;
 584        }
 585
 586        csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
 587        if (!csa)
 588                goto done;
 589        csn = 0;
 590
 591        list_add(&top_cpuset.stack_list, &q);
 592        while (!list_empty(&q)) {
 593                struct cgroup *cont;
 594                struct cpuset *child;   /* scans child cpusets of cp */
 595
 596                cp = list_first_entry(&q, struct cpuset, stack_list);
 597                list_del(q.next);
 598
 599                if (cpumask_empty(cp->cpus_allowed))
 600                        continue;
 601
 602                /*
 603                 * All child cpusets contain a subset of the parent's cpus, so
 604                 * just skip them, and then we call update_domain_attr_tree()
 605                 * to calc relax_domain_level of the corresponding sched
 606                 * domain.
 607                 */
 608                if (is_sched_load_balance(cp)) {
 609                        csa[csn++] = cp;
 610                        continue;
 611                }
 612
 613                list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 614                        child = cgroup_cs(cont);
 615                        list_add_tail(&child->stack_list, &q);
 616                }
 617        }
 618
 619        for (i = 0; i < csn; i++)
 620                csa[i]->pn = i;
 621        ndoms = csn;
 622
 623restart:
 624        /* Find the best partition (set of sched domains) */
 625        for (i = 0; i < csn; i++) {
 626                struct cpuset *a = csa[i];
 627                int apn = a->pn;
 628
 629                for (j = 0; j < csn; j++) {
 630                        struct cpuset *b = csa[j];
 631                        int bpn = b->pn;
 632
 633                        if (apn != bpn && cpusets_overlap(a, b)) {
 634                                for (k = 0; k < csn; k++) {
 635                                        struct cpuset *c = csa[k];
 636
 637                                        if (c->pn == bpn)
 638                                                c->pn = apn;
 639                                }
 640                                ndoms--;        /* one less element */
 641                                goto restart;
 642                        }
 643                }
 644        }
 645
 646        /*
 647         * Now we know how many domains to create.
 648         * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 649         */
 650        doms = alloc_sched_domains(ndoms);
 651        if (!doms)
 652                goto done;
 653
 654        /*
 655         * The rest of the code, including the scheduler, can deal with
 656         * dattr==NULL case. No need to abort if alloc fails.
 657         */
 658        dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
 659
 660        for (nslot = 0, i = 0; i < csn; i++) {
 661                struct cpuset *a = csa[i];
 662                struct cpumask *dp;
 663                int apn = a->pn;
 664
 665                if (apn < 0) {
 666                        /* Skip completed partitions */
 667                        continue;
 668                }
 669
 670                dp = doms[nslot];
 671
 672                if (nslot == ndoms) {
 673                        static int warnings = 10;
 674                        if (warnings) {
 675                                printk(KERN_WARNING
 676                                 "rebuild_sched_domains confused:"
 677                                  " nslot %d, ndoms %d, csn %d, i %d,"
 678                                  " apn %d\n",
 679                                  nslot, ndoms, csn, i, apn);
 680                                warnings--;
 681                        }
 682                        continue;
 683                }
 684
 685                cpumask_clear(dp);
 686                if (dattr)
 687                        *(dattr + nslot) = SD_ATTR_INIT;
 688                for (j = i; j < csn; j++) {
 689                        struct cpuset *b = csa[j];
 690
 691                        if (apn == b->pn) {
 692                                cpumask_or(dp, dp, b->cpus_allowed);
 693                                if (dattr)
 694                                        update_domain_attr_tree(dattr + nslot, b);
 695
 696                                /* Done with this partition */
 697                                b->pn = -1;
 698                        }
 699                }
 700                nslot++;
 701        }
 702        BUG_ON(nslot != ndoms);
 703
 704done:
 705        kfree(csa);
 706
 707        /*
 708         * Fallback to the default domain if kmalloc() failed.
 709         * See comments in partition_sched_domains().
 710         */
 711        if (doms == NULL)
 712                ndoms = 1;
 713
 714        *domains    = doms;
 715        *attributes = dattr;
 716        return ndoms;
 717}
 718
 719/*
 720 * Rebuild scheduler domains.
 721 *
 722 * Call with neither cgroup_mutex held nor within get_online_cpus().
 723 * Takes both cgroup_mutex and get_online_cpus().
 724 *
 725 * Cannot be directly called from cpuset code handling changes
 726 * to the cpuset pseudo-filesystem, because it cannot be called
 727 * from code that already holds cgroup_mutex.
 728 */
 729static void do_rebuild_sched_domains(struct work_struct *unused)
 730{
 731        struct sched_domain_attr *attr;
 732        cpumask_var_t *doms;
 733        int ndoms;
 734
 735        get_online_cpus();
 736
 737        /* Generate domain masks and attrs */
 738        cgroup_lock();
 739        ndoms = generate_sched_domains(&doms, &attr);
 740        cgroup_unlock();
 741
 742        /* Have scheduler rebuild the domains */
 743        partition_sched_domains(ndoms, doms, attr);
 744
 745        put_online_cpus();
 746}
 747#else /* !CONFIG_SMP */
 748static void do_rebuild_sched_domains(struct work_struct *unused)
 749{
 750}
 751
 752static int generate_sched_domains(cpumask_var_t **domains,
 753                        struct sched_domain_attr **attributes)
 754{
 755        *domains = NULL;
 756        return 1;
 757}
 758#endif /* CONFIG_SMP */
 759
 760static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
 761
 762/*
 763 * Rebuild scheduler domains, asynchronously via workqueue.
 764 *
 765 * If the flag 'sched_load_balance' of any cpuset with non-empty
 766 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 767 * which has that flag enabled, or if any cpuset with a non-empty
 768 * 'cpus' is removed, then call this routine to rebuild the
 769 * scheduler's dynamic sched domains.
 770 *
 771 * The rebuild_sched_domains() and partition_sched_domains()
 772 * routines must nest cgroup_lock() inside get_online_cpus(),
 773 * but such cpuset changes as these must nest that locking the
 774 * other way, holding cgroup_lock() for much of the code.
 775 *
 776 * So in order to avoid an ABBA deadlock, the cpuset code handling
 777 * these user changes delegates the actual sched domain rebuilding
 778 * to a separate workqueue thread, which ends up processing the
 779 * above do_rebuild_sched_domains() function.
 780 */
 781static void async_rebuild_sched_domains(void)
 782{
 783        queue_work(cpuset_wq, &rebuild_sched_domains_work);
 784}
 785
 786/*
 787 * Accomplishes the same scheduler domain rebuild as the above
 788 * async_rebuild_sched_domains(), however it directly calls the
 789 * rebuild routine synchronously rather than calling it via an
 790 * asynchronous work thread.
 791 *
 792 * This can only be called from code that is not holding
 793 * cgroup_mutex (not nested in a cgroup_lock() call.)
 794 */
 795void rebuild_sched_domains(void)
 796{
 797        do_rebuild_sched_domains(NULL);
 798}
 799
 800/**
 801 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
 802 * @tsk: task to test
 803 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
 804 *
 805 * Call with cgroup_mutex held.  May take callback_mutex during call.
 806 * Called for each task in a cgroup by cgroup_scan_tasks().
 807 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
 808 * words, if its mask is not equal to its cpuset's mask).
 809 */
 810static int cpuset_test_cpumask(struct task_struct *tsk,
 811                               struct cgroup_scanner *scan)
 812{
 813        return !cpumask_equal(&tsk->cpus_allowed,
 814                        (cgroup_cs(scan->cg))->cpus_allowed);
 815}
 816
 817/**
 818 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 819 * @tsk: task to test
 820 * @scan: struct cgroup_scanner containing the cgroup of the task
 821 *
 822 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 823 * cpus_allowed mask needs to be changed.
 824 *
 825 * We don't need to re-check for the cgroup/cpuset membership, since we're
 826 * holding cgroup_lock() at this point.
 827 */
 828static void cpuset_change_cpumask(struct task_struct *tsk,
 829                                  struct cgroup_scanner *scan)
 830{
 831        set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
 832}
 833
 834/**
 835 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 836 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 837 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 838 *
 839 * Called with cgroup_mutex held
 840 *
 841 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 842 * calling callback functions for each.
 843 *
 844 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 845 * if @heap != NULL.
 846 */
 847static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
 848{
 849        struct cgroup_scanner scan;
 850
 851        scan.cg = cs->css.cgroup;
 852        scan.test_task = cpuset_test_cpumask;
 853        scan.process_task = cpuset_change_cpumask;
 854        scan.heap = heap;
 855        cgroup_scan_tasks(&scan);
 856}
 857
 858/**
 859 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 860 * @cs: the cpuset to consider
 861 * @buf: buffer of cpu numbers written to this cpuset
 862 */
 863static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 864                          const char *buf)
 865{
 866        struct ptr_heap heap;
 867        int retval;
 868        int is_load_balanced;
 869
 870        /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
 871        if (cs == &top_cpuset)
 872                return -EACCES;
 873
 874        /*
 875         * An empty cpus_allowed is ok only if the cpuset has no tasks.
 876         * Since cpulist_parse() fails on an empty mask, we special case
 877         * that parsing.  The validate_change() call ensures that cpusets
 878         * with tasks have cpus.
 879         */
 880        if (!*buf) {
 881                cpumask_clear(trialcs->cpus_allowed);
 882        } else {
 883                retval = cpulist_parse(buf, trialcs->cpus_allowed);
 884                if (retval < 0)
 885                        return retval;
 886
 887                if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
 888                        return -EINVAL;
 889        }
 890        retval = validate_change(cs, trialcs);
 891        if (retval < 0)
 892                return retval;
 893
 894        /* Nothing to do if the cpus didn't change */
 895        if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 896                return 0;
 897
 898        retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 899        if (retval)
 900                return retval;
 901
 902        is_load_balanced = is_sched_load_balance(trialcs);
 903
 904        mutex_lock(&callback_mutex);
 905        cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 906        mutex_unlock(&callback_mutex);
 907
 908        /*
 909         * Scan tasks in the cpuset, and update the cpumasks of any
 910         * that need an update.
 911         */
 912        update_tasks_cpumask(cs, &heap);
 913
 914        heap_free(&heap);
 915
 916        if (is_load_balanced)
 917                async_rebuild_sched_domains();
 918        return 0;
 919}
 920
 921/*
 922 * cpuset_migrate_mm
 923 *
 924 *    Migrate memory region from one set of nodes to another.
 925 *
 926 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 927 *    so that the migration code can allocate pages on these nodes.
 928 *
 929 *    Call holding cgroup_mutex, so current's cpuset won't change
 930 *    during this call, as manage_mutex holds off any cpuset_attach()
 931 *    calls.  Therefore we don't need to take task_lock around the
 932 *    call to guarantee_online_mems(), as we know no one is changing
 933 *    our task's cpuset.
 934 *
 935 *    While the mm_struct we are migrating is typically from some
 936 *    other task, the task_struct mems_allowed that we are hacking
 937 *    is for our current task, which must allocate new pages for that
 938 *    migrating memory region.
 939 */
 940
 941static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 942                                                        const nodemask_t *to)
 943{
 944        struct task_struct *tsk = current;
 945
 946        tsk->mems_allowed = *to;
 947
 948        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 949
 950        guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
 951}
 952
 953/*
 954 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 955 * @tsk: the task to change
 956 * @newmems: new nodes that the task will be set
 957 *
 958 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 959 * we structure updates as setting all new allowed nodes, then clearing newly
 960 * disallowed ones.
 961 */
 962static void cpuset_change_task_nodemask(struct task_struct *tsk,
 963                                        nodemask_t *newmems)
 964{
 965        bool need_loop;
 966
 967        /*
 968         * Allow tasks that have access to memory reserves because they have
 969         * been OOM killed to get memory anywhere.
 970         */
 971        if (unlikely(test_thread_flag(TIF_MEMDIE)))
 972                return;
 973        if (current->flags & PF_EXITING) /* Let dying task have memory */
 974                return;
 975
 976        task_lock(tsk);
 977        /*
 978         * Determine if a loop is necessary if another thread is doing
 979         * get_mems_allowed().  If at least one node remains unchanged and
 980         * tsk does not have a mempolicy, then an empty nodemask will not be
 981         * possible when mems_allowed is larger than a word.
 982         */
 983        need_loop = task_has_mempolicy(tsk) ||
 984                        !nodes_intersects(*newmems, tsk->mems_allowed);
 985
 986        if (need_loop)
 987                write_seqcount_begin(&tsk->mems_allowed_seq);
 988
 989        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
 990        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 991
 992        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
 993        tsk->mems_allowed = *newmems;
 994
 995        if (need_loop)
 996                write_seqcount_end(&tsk->mems_allowed_seq);
 997
 998        task_unlock(tsk);
 999}
1000
1001/*
1002 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1003 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1004 * memory_migrate flag is set. Called with cgroup_mutex held.
1005 */
1006static void cpuset_change_nodemask(struct task_struct *p,
1007                                   struct cgroup_scanner *scan)
1008{
1009        struct mm_struct *mm;
1010        struct cpuset *cs;
1011        int migrate;
1012        const nodemask_t *oldmem = scan->data;
1013        static nodemask_t newmems;      /* protected by cgroup_mutex */
1014
1015        cs = cgroup_cs(scan->cg);
1016        guarantee_online_mems(cs, &newmems);
1017
1018        cpuset_change_task_nodemask(p, &newmems);
1019
1020        mm = get_task_mm(p);
1021        if (!mm)
1022                return;
1023
1024        migrate = is_memory_migrate(cs);
1025
1026        mpol_rebind_mm(mm, &cs->mems_allowed);
1027        if (migrate)
1028                cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1029        mmput(mm);
1030}
1031
1032static void *cpuset_being_rebound;
1033
1034/**
1035 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1036 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1037 * @oldmem: old mems_allowed of cpuset cs
1038 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1039 *
1040 * Called with cgroup_mutex held
1041 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1042 * if @heap != NULL.
1043 */
1044static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1045                                 struct ptr_heap *heap)
1046{
1047        struct cgroup_scanner scan;
1048
1049        cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
1050
1051        scan.cg = cs->css.cgroup;
1052        scan.test_task = NULL;
1053        scan.process_task = cpuset_change_nodemask;
1054        scan.heap = heap;
1055        scan.data = (nodemask_t *)oldmem;
1056
1057        /*
1058         * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1059         * take while holding tasklist_lock.  Forks can happen - the
1060         * mpol_dup() cpuset_being_rebound check will catch such forks,
1061         * and rebind their vma mempolicies too.  Because we still hold
1062         * the global cgroup_mutex, we know that no other rebind effort
1063         * will be contending for the global variable cpuset_being_rebound.
1064         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1065         * is idempotent.  Also migrate pages in each mm to new nodes.
1066         */
1067        cgroup_scan_tasks(&scan);
1068
1069        /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1070        cpuset_being_rebound = NULL;
1071}
1072
1073/*
1074 * Handle user request to change the 'mems' memory placement
1075 * of a cpuset.  Needs to validate the request, update the
1076 * cpusets mems_allowed, and for each task in the cpuset,
1077 * update mems_allowed and rebind task's mempolicy and any vma
1078 * mempolicies and if the cpuset is marked 'memory_migrate',
1079 * migrate the tasks pages to the new memory.
1080 *
1081 * Call with cgroup_mutex held.  May take callback_mutex during call.
1082 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1083 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1084 * their mempolicies to the cpusets new mems_allowed.
1085 */
1086static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1087                           const char *buf)
1088{
1089        NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1090        int retval;
1091        struct ptr_heap heap;
1092
1093        if (!oldmem)
1094                return -ENOMEM;
1095
1096        /*
1097         * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1098         * it's read-only
1099         */
1100        if (cs == &top_cpuset) {
1101                retval = -EACCES;
1102                goto done;
1103        }
1104
1105        /*
1106         * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1107         * Since nodelist_parse() fails on an empty mask, we special case
1108         * that parsing.  The validate_change() call ensures that cpusets
1109         * with tasks have memory.
1110         */
1111        if (!*buf) {
1112                nodes_clear(trialcs->mems_allowed);
1113        } else {
1114                retval = nodelist_parse(buf, trialcs->mems_allowed);
1115                if (retval < 0)
1116                        goto done;
1117
1118                if (!nodes_subset(trialcs->mems_allowed,
1119                                node_states[N_HIGH_MEMORY])) {
1120                        retval =  -EINVAL;
1121                        goto done;
1122                }
1123        }
1124        *oldmem = cs->mems_allowed;
1125        if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1126                retval = 0;             /* Too easy - nothing to do */
1127                goto done;
1128        }
1129        retval = validate_change(cs, trialcs);
1130        if (retval < 0)
1131                goto done;
1132
1133        retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1134        if (retval < 0)
1135                goto done;
1136
1137        mutex_lock(&callback_mutex);
1138        cs->mems_allowed = trialcs->mems_allowed;
1139        mutex_unlock(&callback_mutex);
1140
1141        update_tasks_nodemask(cs, oldmem, &heap);
1142
1143        heap_free(&heap);
1144done:
1145        NODEMASK_FREE(oldmem);
1146        return retval;
1147}
1148
1149int current_cpuset_is_being_rebound(void)
1150{
1151        return task_cs(current) == cpuset_being_rebound;
1152}
1153
1154static int update_relax_domain_level(struct cpuset *cs, s64 val)
1155{
1156#ifdef CONFIG_SMP
1157        if (val < -1 || val >= sched_domain_level_max)
1158                return -EINVAL;
1159#endif
1160
1161        if (val != cs->relax_domain_level) {
1162                cs->relax_domain_level = val;
1163                if (!cpumask_empty(cs->cpus_allowed) &&
1164                    is_sched_load_balance(cs))
1165                        async_rebuild_sched_domains();
1166        }
1167
1168        return 0;
1169}
1170
1171/*
1172 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1173 * @tsk: task to be updated
1174 * @scan: struct cgroup_scanner containing the cgroup of the task
1175 *
1176 * Called by cgroup_scan_tasks() for each task in a cgroup.
1177 *
1178 * We don't need to re-check for the cgroup/cpuset membership, since we're
1179 * holding cgroup_lock() at this point.
1180 */
1181static void cpuset_change_flag(struct task_struct *tsk,
1182                                struct cgroup_scanner *scan)
1183{
1184        cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1185}
1186
1187/*
1188 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1189 * @cs: the cpuset in which each task's spread flags needs to be changed
1190 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1191 *
1192 * Called with cgroup_mutex held
1193 *
1194 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1195 * calling callback functions for each.
1196 *
1197 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1198 * if @heap != NULL.
1199 */
1200static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1201{
1202        struct cgroup_scanner scan;
1203
1204        scan.cg = cs->css.cgroup;
1205        scan.test_task = NULL;
1206        scan.process_task = cpuset_change_flag;
1207        scan.heap = heap;
1208        cgroup_scan_tasks(&scan);
1209}
1210
1211/*
1212 * update_flag - read a 0 or a 1 in a file and update associated flag
1213 * bit:         the bit to update (see cpuset_flagbits_t)
1214 * cs:          the cpuset to update
1215 * turning_on:  whether the flag is being set or cleared
1216 *
1217 * Call with cgroup_mutex held.
1218 */
1219
1220static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1221                       int turning_on)
1222{
1223        struct cpuset *trialcs;
1224        int balance_flag_changed;
1225        int spread_flag_changed;
1226        struct ptr_heap heap;
1227        int err;
1228
1229        trialcs = alloc_trial_cpuset(cs);
1230        if (!trialcs)
1231                return -ENOMEM;
1232
1233        if (turning_on)
1234                set_bit(bit, &trialcs->flags);
1235        else
1236                clear_bit(bit, &trialcs->flags);
1237
1238        err = validate_change(cs, trialcs);
1239        if (err < 0)
1240                goto out;
1241
1242        err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1243        if (err < 0)
1244                goto out;
1245
1246        balance_flag_changed = (is_sched_load_balance(cs) !=
1247                                is_sched_load_balance(trialcs));
1248
1249        spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1250                        || (is_spread_page(cs) != is_spread_page(trialcs)));
1251
1252        mutex_lock(&callback_mutex);
1253        cs->flags = trialcs->flags;
1254        mutex_unlock(&callback_mutex);
1255
1256        if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1257                async_rebuild_sched_domains();
1258
1259        if (spread_flag_changed)
1260                update_tasks_flags(cs, &heap);
1261        heap_free(&heap);
1262out:
1263        free_trial_cpuset(trialcs);
1264        return err;
1265}
1266
1267/*
1268 * Frequency meter - How fast is some event occurring?
1269 *
1270 * These routines manage a digitally filtered, constant time based,
1271 * event frequency meter.  There are four routines:
1272 *   fmeter_init() - initialize a frequency meter.
1273 *   fmeter_markevent() - called each time the event happens.
1274 *   fmeter_getrate() - returns the recent rate of such events.
1275 *   fmeter_update() - internal routine used to update fmeter.
1276 *
1277 * A common data structure is passed to each of these routines,
1278 * which is used to keep track of the state required to manage the
1279 * frequency meter and its digital filter.
1280 *
1281 * The filter works on the number of events marked per unit time.
1282 * The filter is single-pole low-pass recursive (IIR).  The time unit
1283 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1284 * simulate 3 decimal digits of precision (multiplied by 1000).
1285 *
1286 * With an FM_COEF of 933, and a time base of 1 second, the filter
1287 * has a half-life of 10 seconds, meaning that if the events quit
1288 * happening, then the rate returned from the fmeter_getrate()
1289 * will be cut in half each 10 seconds, until it converges to zero.
1290 *
1291 * It is not worth doing a real infinitely recursive filter.  If more
1292 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1293 * just compute FM_MAXTICKS ticks worth, by which point the level
1294 * will be stable.
1295 *
1296 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1297 * arithmetic overflow in the fmeter_update() routine.
1298 *
1299 * Given the simple 32 bit integer arithmetic used, this meter works
1300 * best for reporting rates between one per millisecond (msec) and
1301 * one per 32 (approx) seconds.  At constant rates faster than one
1302 * per msec it maxes out at values just under 1,000,000.  At constant
1303 * rates between one per msec, and one per second it will stabilize
1304 * to a value N*1000, where N is the rate of events per second.
1305 * At constant rates between one per second and one per 32 seconds,
1306 * it will be choppy, moving up on the seconds that have an event,
1307 * and then decaying until the next event.  At rates slower than
1308 * about one in 32 seconds, it decays all the way back to zero between
1309 * each event.
1310 */
1311
1312#define FM_COEF 933             /* coefficient for half-life of 10 secs */
1313#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1314#define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
1315#define FM_SCALE 1000           /* faux fixed point scale */
1316
1317/* Initialize a frequency meter */
1318static void fmeter_init(struct fmeter *fmp)
1319{
1320        fmp->cnt = 0;
1321        fmp->val = 0;
1322        fmp->time = 0;
1323        spin_lock_init(&fmp->lock);
1324}
1325
1326/* Internal meter update - process cnt events and update value */
1327static void fmeter_update(struct fmeter *fmp)
1328{
1329        time_t now = get_seconds();
1330        time_t ticks = now - fmp->time;
1331
1332        if (ticks == 0)
1333                return;
1334
1335        ticks = min(FM_MAXTICKS, ticks);
1336        while (ticks-- > 0)
1337                fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1338        fmp->time = now;
1339
1340        fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1341        fmp->cnt = 0;
1342}
1343
1344/* Process any previous ticks, then bump cnt by one (times scale). */
1345static void fmeter_markevent(struct fmeter *fmp)
1346{
1347        spin_lock(&fmp->lock);
1348        fmeter_update(fmp);
1349        fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1350        spin_unlock(&fmp->lock);
1351}
1352
1353/* Process any previous ticks, then return current value. */
1354static int fmeter_getrate(struct fmeter *fmp)
1355{
1356        int val;
1357
1358        spin_lock(&fmp->lock);
1359        fmeter_update(fmp);
1360        val = fmp->val;
1361        spin_unlock(&fmp->lock);
1362        return val;
1363}
1364
1365/*
1366 * Protected by cgroup_lock. The nodemasks must be stored globally because
1367 * dynamically allocating them is not allowed in can_attach, and they must
1368 * persist until attach.
1369 */
1370static cpumask_var_t cpus_attach;
1371static nodemask_t cpuset_attach_nodemask_from;
1372static nodemask_t cpuset_attach_nodemask_to;
1373
1374/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1375static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1376{
1377        struct cpuset *cs = cgroup_cs(cgrp);
1378        struct task_struct *task;
1379        int ret;
1380
1381        if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1382                return -ENOSPC;
1383
1384        cgroup_taskset_for_each(task, cgrp, tset) {
1385                /*
1386                 * Kthreads bound to specific cpus cannot be moved to a new
1387                 * cpuset; we cannot change their cpu affinity and
1388                 * isolating such threads by their set of allowed nodes is
1389                 * unnecessary.  Thus, cpusets are not applicable for such
1390                 * threads.  This prevents checking for success of
1391                 * set_cpus_allowed_ptr() on all attached tasks before
1392                 * cpus_allowed may be changed.
1393                 */
1394                if (task->flags & PF_THREAD_BOUND)
1395                        return -EINVAL;
1396                if ((ret = security_task_setscheduler(task)))
1397                        return ret;
1398        }
1399
1400        /* prepare for attach */
1401        if (cs == &top_cpuset)
1402                cpumask_copy(cpus_attach, cpu_possible_mask);
1403        else
1404                guarantee_online_cpus(cs, cpus_attach);
1405
1406        guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1407
1408        return 0;
1409}
1410
1411static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
1412{
1413        struct mm_struct *mm;
1414        struct task_struct *task;
1415        struct task_struct *leader = cgroup_taskset_first(tset);
1416        struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
1417        struct cpuset *cs = cgroup_cs(cgrp);
1418        struct cpuset *oldcs = cgroup_cs(oldcgrp);
1419
1420        cgroup_taskset_for_each(task, cgrp, tset) {
1421                /*
1422                 * can_attach beforehand should guarantee that this doesn't
1423                 * fail.  TODO: have a better way to handle failure here
1424                 */
1425                WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1426
1427                cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1428                cpuset_update_task_spread_flag(cs, task);
1429        }
1430
1431        /*
1432         * Change mm, possibly for multiple threads in a threadgroup. This is
1433         * expensive and may sleep.
1434         */
1435        cpuset_attach_nodemask_from = oldcs->mems_allowed;
1436        cpuset_attach_nodemask_to = cs->mems_allowed;
1437        mm = get_task_mm(leader);
1438        if (mm) {
1439                mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1440                if (is_memory_migrate(cs))
1441                        cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1442                                          &cpuset_attach_nodemask_to);
1443                mmput(mm);
1444        }
1445}
1446
1447/* The various types of files and directories in a cpuset file system */
1448
1449typedef enum {
1450        FILE_MEMORY_MIGRATE,
1451        FILE_CPULIST,
1452        FILE_MEMLIST,
1453        FILE_CPU_EXCLUSIVE,
1454        FILE_MEM_EXCLUSIVE,
1455        FILE_MEM_HARDWALL,
1456        FILE_SCHED_LOAD_BALANCE,
1457        FILE_SCHED_RELAX_DOMAIN_LEVEL,
1458        FILE_MEMORY_PRESSURE_ENABLED,
1459        FILE_MEMORY_PRESSURE,
1460        FILE_SPREAD_PAGE,
1461        FILE_SPREAD_SLAB,
1462} cpuset_filetype_t;
1463
1464static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1465{
1466        int retval = 0;
1467        struct cpuset *cs = cgroup_cs(cgrp);
1468        cpuset_filetype_t type = cft->private;
1469
1470        if (!cgroup_lock_live_group(cgrp))
1471                return -ENODEV;
1472
1473        switch (type) {
1474        case FILE_CPU_EXCLUSIVE:
1475                retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1476                break;
1477        case FILE_MEM_EXCLUSIVE:
1478                retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1479                break;
1480        case FILE_MEM_HARDWALL:
1481                retval = update_flag(CS_MEM_HARDWALL, cs, val);
1482                break;
1483        case FILE_SCHED_LOAD_BALANCE:
1484                retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1485                break;
1486        case FILE_MEMORY_MIGRATE:
1487                retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1488                break;
1489        case FILE_MEMORY_PRESSURE_ENABLED:
1490                cpuset_memory_pressure_enabled = !!val;
1491                break;
1492        case FILE_MEMORY_PRESSURE:
1493                retval = -EACCES;
1494                break;
1495        case FILE_SPREAD_PAGE:
1496                retval = update_flag(CS_SPREAD_PAGE, cs, val);
1497                break;
1498        case FILE_SPREAD_SLAB:
1499                retval = update_flag(CS_SPREAD_SLAB, cs, val);
1500                break;
1501        default:
1502                retval = -EINVAL;
1503                break;
1504        }
1505        cgroup_unlock();
1506        return retval;
1507}
1508
1509static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1510{
1511        int retval = 0;
1512        struct cpuset *cs = cgroup_cs(cgrp);
1513        cpuset_filetype_t type = cft->private;
1514
1515        if (!cgroup_lock_live_group(cgrp))
1516                return -ENODEV;
1517
1518        switch (type) {
1519        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1520                retval = update_relax_domain_level(cs, val);
1521                break;
1522        default:
1523                retval = -EINVAL;
1524                break;
1525        }
1526        cgroup_unlock();
1527        return retval;
1528}
1529
1530/*
1531 * Common handling for a write to a "cpus" or "mems" file.
1532 */
1533static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1534                                const char *buf)
1535{
1536        int retval = 0;
1537        struct cpuset *cs = cgroup_cs(cgrp);
1538        struct cpuset *trialcs;
1539
1540        if (!cgroup_lock_live_group(cgrp))
1541                return -ENODEV;
1542
1543        trialcs = alloc_trial_cpuset(cs);
1544        if (!trialcs) {
1545                retval = -ENOMEM;
1546                goto out;
1547        }
1548
1549        switch (cft->private) {
1550        case FILE_CPULIST:
1551                retval = update_cpumask(cs, trialcs, buf);
1552                break;
1553        case FILE_MEMLIST:
1554                retval = update_nodemask(cs, trialcs, buf);
1555                break;
1556        default:
1557                retval = -EINVAL;
1558                break;
1559        }
1560
1561        free_trial_cpuset(trialcs);
1562out:
1563        cgroup_unlock();
1564        return retval;
1565}
1566
1567/*
1568 * These ascii lists should be read in a single call, by using a user
1569 * buffer large enough to hold the entire map.  If read in smaller
1570 * chunks, there is no guarantee of atomicity.  Since the display format
1571 * used, list of ranges of sequential numbers, is variable length,
1572 * and since these maps can change value dynamically, one could read
1573 * gibberish by doing partial reads while a list was changing.
1574 * A single large read to a buffer that crosses a page boundary is
1575 * ok, because the result being copied to user land is not recomputed
1576 * across a page fault.
1577 */
1578
1579static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1580{
1581        size_t count;
1582
1583        mutex_lock(&callback_mutex);
1584        count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1585        mutex_unlock(&callback_mutex);
1586
1587        return count;
1588}
1589
1590static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1591{
1592        size_t count;
1593
1594        mutex_lock(&callback_mutex);
1595        count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1596        mutex_unlock(&callback_mutex);
1597
1598        return count;
1599}
1600
1601static ssize_t cpuset_common_file_read(struct cgroup *cont,
1602                                       struct cftype *cft,
1603                                       struct file *file,
1604                                       char __user *buf,
1605                                       size_t nbytes, loff_t *ppos)
1606{
1607        struct cpuset *cs = cgroup_cs(cont);
1608        cpuset_filetype_t type = cft->private;
1609        char *page;
1610        ssize_t retval = 0;
1611        char *s;
1612
1613        if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1614                return -ENOMEM;
1615
1616        s = page;
1617
1618        switch (type) {
1619        case FILE_CPULIST:
1620                s += cpuset_sprintf_cpulist(s, cs);
1621                break;
1622        case FILE_MEMLIST:
1623                s += cpuset_sprintf_memlist(s, cs);
1624                break;
1625        default:
1626                retval = -EINVAL;
1627                goto out;
1628        }
1629        *s++ = '\n';
1630
1631        retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1632out:
1633        free_page((unsigned long)page);
1634        return retval;
1635}
1636
1637static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1638{
1639        struct cpuset *cs = cgroup_cs(cont);
1640        cpuset_filetype_t type = cft->private;
1641        switch (type) {
1642        case FILE_CPU_EXCLUSIVE:
1643                return is_cpu_exclusive(cs);
1644        case FILE_MEM_EXCLUSIVE:
1645                return is_mem_exclusive(cs);
1646        case FILE_MEM_HARDWALL:
1647                return is_mem_hardwall(cs);
1648        case FILE_SCHED_LOAD_BALANCE:
1649                return is_sched_load_balance(cs);
1650        case FILE_MEMORY_MIGRATE:
1651                return is_memory_migrate(cs);
1652        case FILE_MEMORY_PRESSURE_ENABLED:
1653                return cpuset_memory_pressure_enabled;
1654        case FILE_MEMORY_PRESSURE:
1655                return fmeter_getrate(&cs->fmeter);
1656        case FILE_SPREAD_PAGE:
1657                return is_spread_page(cs);
1658        case FILE_SPREAD_SLAB:
1659                return is_spread_slab(cs);
1660        default:
1661                BUG();
1662        }
1663
1664        /* Unreachable but makes gcc happy */
1665        return 0;
1666}
1667
1668static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1669{
1670        struct cpuset *cs = cgroup_cs(cont);
1671        cpuset_filetype_t type = cft->private;
1672        switch (type) {
1673        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1674                return cs->relax_domain_level;
1675        default:
1676                BUG();
1677        }
1678
1679        /* Unrechable but makes gcc happy */
1680        return 0;
1681}
1682
1683
1684/*
1685 * for the common functions, 'private' gives the type of file
1686 */
1687
1688static struct cftype files[] = {
1689        {
1690                .name = "cpus",
1691                .read = cpuset_common_file_read,
1692                .write_string = cpuset_write_resmask,
1693                .max_write_len = (100U + 6 * NR_CPUS),
1694                .private = FILE_CPULIST,
1695        },
1696
1697        {
1698                .name = "mems",
1699                .read = cpuset_common_file_read,
1700                .write_string = cpuset_write_resmask,
1701                .max_write_len = (100U + 6 * MAX_NUMNODES),
1702                .private = FILE_MEMLIST,
1703        },
1704
1705        {
1706                .name = "cpu_exclusive",
1707                .read_u64 = cpuset_read_u64,
1708                .write_u64 = cpuset_write_u64,
1709                .private = FILE_CPU_EXCLUSIVE,
1710        },
1711
1712        {
1713                .name = "mem_exclusive",
1714                .read_u64 = cpuset_read_u64,
1715                .write_u64 = cpuset_write_u64,
1716                .private = FILE_MEM_EXCLUSIVE,
1717        },
1718
1719        {
1720                .name = "mem_hardwall",
1721                .read_u64 = cpuset_read_u64,
1722                .write_u64 = cpuset_write_u64,
1723                .private = FILE_MEM_HARDWALL,
1724        },
1725
1726        {
1727                .name = "sched_load_balance",
1728                .read_u64 = cpuset_read_u64,
1729                .write_u64 = cpuset_write_u64,
1730                .private = FILE_SCHED_LOAD_BALANCE,
1731        },
1732
1733        {
1734                .name = "sched_relax_domain_level",
1735                .read_s64 = cpuset_read_s64,
1736                .write_s64 = cpuset_write_s64,
1737                .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1738        },
1739
1740        {
1741                .name = "memory_migrate",
1742                .read_u64 = cpuset_read_u64,
1743                .write_u64 = cpuset_write_u64,
1744                .private = FILE_MEMORY_MIGRATE,
1745        },
1746
1747        {
1748                .name = "memory_pressure",
1749                .read_u64 = cpuset_read_u64,
1750                .write_u64 = cpuset_write_u64,
1751                .private = FILE_MEMORY_PRESSURE,
1752                .mode = S_IRUGO,
1753        },
1754
1755        {
1756                .name = "memory_spread_page",
1757                .read_u64 = cpuset_read_u64,
1758                .write_u64 = cpuset_write_u64,
1759                .private = FILE_SPREAD_PAGE,
1760        },
1761
1762        {
1763                .name = "memory_spread_slab",
1764                .read_u64 = cpuset_read_u64,
1765                .write_u64 = cpuset_write_u64,
1766                .private = FILE_SPREAD_SLAB,
1767        },
1768
1769        {
1770                .name = "memory_pressure_enabled",
1771                .flags = CFTYPE_ONLY_ON_ROOT,
1772                .read_u64 = cpuset_read_u64,
1773                .write_u64 = cpuset_write_u64,
1774                .private = FILE_MEMORY_PRESSURE_ENABLED,
1775        },
1776
1777        { }     /* terminate */
1778};
1779
1780/*
1781 * post_clone() is called during cgroup_create() when the
1782 * clone_children mount argument was specified.  The cgroup
1783 * can not yet have any tasks.
1784 *
1785 * Currently we refuse to set up the cgroup - thereby
1786 * refusing the task to be entered, and as a result refusing
1787 * the sys_unshare() or clone() which initiated it - if any
1788 * sibling cpusets have exclusive cpus or mem.
1789 *
1790 * If this becomes a problem for some users who wish to
1791 * allow that scenario, then cpuset_post_clone() could be
1792 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1793 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1794 * held.
1795 */
1796static void cpuset_post_clone(struct cgroup *cgroup)
1797{
1798        struct cgroup *parent, *child;
1799        struct cpuset *cs, *parent_cs;
1800
1801        parent = cgroup->parent;
1802        list_for_each_entry(child, &parent->children, sibling) {
1803                cs = cgroup_cs(child);
1804                if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1805                        return;
1806        }
1807        cs = cgroup_cs(cgroup);
1808        parent_cs = cgroup_cs(parent);
1809
1810        mutex_lock(&callback_mutex);
1811        cs->mems_allowed = parent_cs->mems_allowed;
1812        cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1813        mutex_unlock(&callback_mutex);
1814        return;
1815}
1816
1817/*
1818 *      cpuset_create - create a cpuset
1819 *      cont:   control group that the new cpuset will be part of
1820 */
1821
1822static struct cgroup_subsys_state *cpuset_create(struct cgroup *cont)
1823{
1824        struct cpuset *cs;
1825        struct cpuset *parent;
1826
1827        if (!cont->parent) {
1828                return &top_cpuset.css;
1829        }
1830        parent = cgroup_cs(cont->parent);
1831        cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1832        if (!cs)
1833                return ERR_PTR(-ENOMEM);
1834        if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1835                kfree(cs);
1836                return ERR_PTR(-ENOMEM);
1837        }
1838
1839        cs->flags = 0;
1840        if (is_spread_page(parent))
1841                set_bit(CS_SPREAD_PAGE, &cs->flags);
1842        if (is_spread_slab(parent))
1843                set_bit(CS_SPREAD_SLAB, &cs->flags);
1844        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1845        cpumask_clear(cs->cpus_allowed);
1846        nodes_clear(cs->mems_allowed);
1847        fmeter_init(&cs->fmeter);
1848        cs->relax_domain_level = -1;
1849
1850        cs->parent = parent;
1851        number_of_cpusets++;
1852        return &cs->css ;
1853}
1854
1855/*
1856 * If the cpuset being removed has its flag 'sched_load_balance'
1857 * enabled, then simulate turning sched_load_balance off, which
1858 * will call async_rebuild_sched_domains().
1859 */
1860
1861static void cpuset_destroy(struct cgroup *cont)
1862{
1863        struct cpuset *cs = cgroup_cs(cont);
1864
1865        if (is_sched_load_balance(cs))
1866                update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1867
1868        number_of_cpusets--;
1869        free_cpumask_var(cs->cpus_allowed);
1870        kfree(cs);
1871}
1872
1873struct cgroup_subsys cpuset_subsys = {
1874        .name = "cpuset",
1875        .create = cpuset_create,
1876        .destroy = cpuset_destroy,
1877        .can_attach = cpuset_can_attach,
1878        .attach = cpuset_attach,
1879        .post_clone = cpuset_post_clone,
1880        .subsys_id = cpuset_subsys_id,
1881        .base_cftypes = files,
1882        .early_init = 1,
1883};
1884
1885/**
1886 * cpuset_init - initialize cpusets at system boot
1887 *
1888 * Description: Initialize top_cpuset and the cpuset internal file system,
1889 **/
1890
1891int __init cpuset_init(void)
1892{
1893        int err = 0;
1894
1895        if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1896                BUG();
1897
1898        cpumask_setall(top_cpuset.cpus_allowed);
1899        nodes_setall(top_cpuset.mems_allowed);
1900
1901        fmeter_init(&top_cpuset.fmeter);
1902        set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1903        top_cpuset.relax_domain_level = -1;
1904
1905        err = register_filesystem(&cpuset_fs_type);
1906        if (err < 0)
1907                return err;
1908
1909        if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1910                BUG();
1911
1912        number_of_cpusets = 1;
1913        return 0;
1914}
1915
1916/**
1917 * cpuset_do_move_task - move a given task to another cpuset
1918 * @tsk: pointer to task_struct the task to move
1919 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1920 *
1921 * Called by cgroup_scan_tasks() for each task in a cgroup.
1922 * Return nonzero to stop the walk through the tasks.
1923 */
1924static void cpuset_do_move_task(struct task_struct *tsk,
1925                                struct cgroup_scanner *scan)
1926{
1927        struct cgroup *new_cgroup = scan->data;
1928
1929        cgroup_attach_task(new_cgroup, tsk);
1930}
1931
1932/**
1933 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1934 * @from: cpuset in which the tasks currently reside
1935 * @to: cpuset to which the tasks will be moved
1936 *
1937 * Called with cgroup_mutex held
1938 * callback_mutex must not be held, as cpuset_attach() will take it.
1939 *
1940 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1941 * calling callback functions for each.
1942 */
1943static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1944{
1945        struct cgroup_scanner scan;
1946
1947        scan.cg = from->css.cgroup;
1948        scan.test_task = NULL; /* select all tasks in cgroup */
1949        scan.process_task = cpuset_do_move_task;
1950        scan.heap = NULL;
1951        scan.data = to->css.cgroup;
1952
1953        if (cgroup_scan_tasks(&scan))
1954                printk(KERN_ERR "move_member_tasks_to_cpuset: "
1955                                "cgroup_scan_tasks failed\n");
1956}
1957
1958/*
1959 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1960 * or memory nodes, we need to walk over the cpuset hierarchy,
1961 * removing that CPU or node from all cpusets.  If this removes the
1962 * last CPU or node from a cpuset, then move the tasks in the empty
1963 * cpuset to its next-highest non-empty parent.
1964 *
1965 * Called with cgroup_mutex held
1966 * callback_mutex must not be held, as cpuset_attach() will take it.
1967 */
1968static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1969{
1970        struct cpuset *parent;
1971
1972        /*
1973         * The cgroup's css_sets list is in use if there are tasks
1974         * in the cpuset; the list is empty if there are none;
1975         * the cs->css.refcnt seems always 0.
1976         */
1977        if (list_empty(&cs->css.cgroup->css_sets))
1978                return;
1979
1980        /*
1981         * Find its next-highest non-empty parent, (top cpuset
1982         * has online cpus, so can't be empty).
1983         */
1984        parent = cs->parent;
1985        while (cpumask_empty(parent->cpus_allowed) ||
1986                        nodes_empty(parent->mems_allowed))
1987                parent = parent->parent;
1988
1989        move_member_tasks_to_cpuset(cs, parent);
1990}
1991
1992/*
1993 * Walk the specified cpuset subtree and look for empty cpusets.
1994 * The tasks of such cpuset must be moved to a parent cpuset.
1995 *
1996 * Called with cgroup_mutex held.  We take callback_mutex to modify
1997 * cpus_allowed and mems_allowed.
1998 *
1999 * This walk processes the tree from top to bottom, completing one layer
2000 * before dropping down to the next.  It always processes a node before
2001 * any of its children.
2002 *
2003 * For now, since we lack memory hot unplug, we'll never see a cpuset
2004 * that has tasks along with an empty 'mems'.  But if we did see such
2005 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2006 */
2007static void scan_for_empty_cpusets(struct cpuset *root)
2008{
2009        LIST_HEAD(queue);
2010        struct cpuset *cp;      /* scans cpusets being updated */
2011        struct cpuset *child;   /* scans child cpusets of cp */
2012        struct cgroup *cont;
2013        static nodemask_t oldmems;      /* protected by cgroup_mutex */
2014
2015        list_add_tail((struct list_head *)&root->stack_list, &queue);
2016
2017        while (!list_empty(&queue)) {
2018                cp = list_first_entry(&queue, struct cpuset, stack_list);
2019                list_del(queue.next);
2020                list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2021                        child = cgroup_cs(cont);
2022                        list_add_tail(&child->stack_list, &queue);
2023                }
2024
2025                /* Continue past cpusets with all cpus, mems online */
2026                if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
2027                    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2028                        continue;
2029
2030                oldmems = cp->mems_allowed;
2031
2032                /* Remove offline cpus and mems from this cpuset. */
2033                mutex_lock(&callback_mutex);
2034                cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2035                            cpu_active_mask);
2036                nodes_and(cp->mems_allowed, cp->mems_allowed,
2037                                                node_states[N_HIGH_MEMORY]);
2038                mutex_unlock(&callback_mutex);
2039
2040                /* Move tasks from the empty cpuset to a parent */
2041                if (cpumask_empty(cp->cpus_allowed) ||
2042                     nodes_empty(cp->mems_allowed))
2043                        remove_tasks_in_empty_cpuset(cp);
2044                else {
2045                        update_tasks_cpumask(cp, NULL);
2046                        update_tasks_nodemask(cp, &oldmems, NULL);
2047                }
2048        }
2049}
2050
2051/*
2052 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2053 * period.  This is necessary in order to make cpusets transparent
2054 * (of no affect) on systems that are actively using CPU hotplug
2055 * but making no active use of cpusets.
2056 *
2057 * The only exception to this is suspend/resume, where we don't
2058 * modify cpusets at all.
2059 *
2060 * This routine ensures that top_cpuset.cpus_allowed tracks
2061 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2062 *
2063 * Called within get_online_cpus().  Needs to call cgroup_lock()
2064 * before calling generate_sched_domains().
2065 */
2066void cpuset_update_active_cpus(void)
2067{
2068        struct sched_domain_attr *attr;
2069        cpumask_var_t *doms;
2070        int ndoms;
2071
2072        cgroup_lock();
2073        mutex_lock(&callback_mutex);
2074        cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2075        mutex_unlock(&callback_mutex);
2076        scan_for_empty_cpusets(&top_cpuset);
2077        ndoms = generate_sched_domains(&doms, &attr);
2078        cgroup_unlock();
2079
2080        /* Have scheduler rebuild the domains */
2081        partition_sched_domains(ndoms, doms, attr);
2082}
2083
2084#ifdef CONFIG_MEMORY_HOTPLUG
2085/*
2086 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2087 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
2088 * See also the previous routine cpuset_track_online_cpus().
2089 */
2090static int cpuset_track_online_nodes(struct notifier_block *self,
2091                                unsigned long action, void *arg)
2092{
2093        static nodemask_t oldmems;      /* protected by cgroup_mutex */
2094
2095        cgroup_lock();
2096        switch (action) {
2097        case MEM_ONLINE:
2098                oldmems = top_cpuset.mems_allowed;
2099                mutex_lock(&callback_mutex);
2100                top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2101                mutex_unlock(&callback_mutex);
2102                update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2103                break;
2104        case MEM_OFFLINE:
2105                /*
2106                 * needn't update top_cpuset.mems_allowed explicitly because
2107                 * scan_for_empty_cpusets() will update it.
2108                 */
2109                scan_for_empty_cpusets(&top_cpuset);
2110                break;
2111        default:
2112                break;
2113        }
2114        cgroup_unlock();
2115
2116        return NOTIFY_OK;
2117}
2118#endif
2119
2120/**
2121 * cpuset_init_smp - initialize cpus_allowed
2122 *
2123 * Description: Finish top cpuset after cpu, node maps are initialized
2124 **/
2125
2126void __init cpuset_init_smp(void)
2127{
2128        cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2129        top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2130
2131        hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2132
2133        cpuset_wq = create_singlethread_workqueue("cpuset");
2134        BUG_ON(!cpuset_wq);
2135}
2136
2137/**
2138 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2139 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2140 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2141 *
2142 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2143 * attached to the specified @tsk.  Guaranteed to return some non-empty
2144 * subset of cpu_online_mask, even if this means going outside the
2145 * tasks cpuset.
2146 **/
2147
2148void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2149{
2150        mutex_lock(&callback_mutex);
2151        task_lock(tsk);
2152        guarantee_online_cpus(task_cs(tsk), pmask);
2153        task_unlock(tsk);
2154        mutex_unlock(&callback_mutex);
2155}
2156
2157void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2158{
2159        const struct cpuset *cs;
2160
2161        rcu_read_lock();
2162        cs = task_cs(tsk);
2163        if (cs)
2164                do_set_cpus_allowed(tsk, cs->cpus_allowed);
2165        rcu_read_unlock();
2166
2167        /*
2168         * We own tsk->cpus_allowed, nobody can change it under us.
2169         *
2170         * But we used cs && cs->cpus_allowed lockless and thus can
2171         * race with cgroup_attach_task() or update_cpumask() and get
2172         * the wrong tsk->cpus_allowed. However, both cases imply the
2173         * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2174         * which takes task_rq_lock().
2175         *
2176         * If we are called after it dropped the lock we must see all
2177         * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2178         * set any mask even if it is not right from task_cs() pov,
2179         * the pending set_cpus_allowed_ptr() will fix things.
2180         *
2181         * select_fallback_rq() will fix things ups and set cpu_possible_mask
2182         * if required.
2183         */
2184}
2185
2186void cpuset_init_current_mems_allowed(void)
2187{
2188        nodes_setall(current->mems_allowed);
2189}
2190
2191/**
2192 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2193 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2194 *
2195 * Description: Returns the nodemask_t mems_allowed of the cpuset
2196 * attached to the specified @tsk.  Guaranteed to return some non-empty
2197 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2198 * tasks cpuset.
2199 **/
2200
2201nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2202{
2203        nodemask_t mask;
2204
2205        mutex_lock(&callback_mutex);
2206        task_lock(tsk);
2207        guarantee_online_mems(task_cs(tsk), &mask);
2208        task_unlock(tsk);
2209        mutex_unlock(&callback_mutex);
2210
2211        return mask;
2212}
2213
2214/**
2215 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2216 * @nodemask: the nodemask to be checked
2217 *
2218 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2219 */
2220int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2221{
2222        return nodes_intersects(*nodemask, current->mems_allowed);
2223}
2224
2225/*
2226 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2227 * mem_hardwall ancestor to the specified cpuset.  Call holding
2228 * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2229 * (an unusual configuration), then returns the root cpuset.
2230 */
2231static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2232{
2233        while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
2234                cs = cs->parent;
2235        return cs;
2236}
2237
2238/**
2239 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2240 * @node: is this an allowed node?
2241 * @gfp_mask: memory allocation flags
2242 *
2243 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2244 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2245 * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2246 * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2247 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2248 * flag, yes.
2249 * Otherwise, no.
2250 *
2251 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2252 * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2253 * might sleep, and might allow a node from an enclosing cpuset.
2254 *
2255 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2256 * cpusets, and never sleeps.
2257 *
2258 * The __GFP_THISNODE placement logic is really handled elsewhere,
2259 * by forcibly using a zonelist starting at a specified node, and by
2260 * (in get_page_from_freelist()) refusing to consider the zones for
2261 * any node on the zonelist except the first.  By the time any such
2262 * calls get to this routine, we should just shut up and say 'yes'.
2263 *
2264 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2265 * and do not allow allocations outside the current tasks cpuset
2266 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2267 * GFP_KERNEL allocations are not so marked, so can escape to the
2268 * nearest enclosing hardwalled ancestor cpuset.
2269 *
2270 * Scanning up parent cpusets requires callback_mutex.  The
2271 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2272 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2273 * current tasks mems_allowed came up empty on the first pass over
2274 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2275 * cpuset are short of memory, might require taking the callback_mutex
2276 * mutex.
2277 *
2278 * The first call here from mm/page_alloc:get_page_from_freelist()
2279 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2280 * so no allocation on a node outside the cpuset is allowed (unless
2281 * in interrupt, of course).
2282 *
2283 * The second pass through get_page_from_freelist() doesn't even call
2284 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2285 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2286 * in alloc_flags.  That logic and the checks below have the combined
2287 * affect that:
2288 *      in_interrupt - any node ok (current task context irrelevant)
2289 *      GFP_ATOMIC   - any node ok
2290 *      TIF_MEMDIE   - any node ok
2291 *      GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2292 *      GFP_USER     - only nodes in current tasks mems allowed ok.
2293 *
2294 * Rule:
2295 *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2296 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2297 *    the code that might scan up ancestor cpusets and sleep.
2298 */
2299int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2300{
2301        const struct cpuset *cs;        /* current cpuset ancestors */
2302        int allowed;                    /* is allocation in zone z allowed? */
2303
2304        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2305                return 1;
2306        might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2307        if (node_isset(node, current->mems_allowed))
2308                return 1;
2309        /*
2310         * Allow tasks that have access to memory reserves because they have
2311         * been OOM killed to get memory anywhere.
2312         */
2313        if (unlikely(test_thread_flag(TIF_MEMDIE)))
2314                return 1;
2315        if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
2316                return 0;
2317
2318        if (current->flags & PF_EXITING) /* Let dying task have memory */
2319                return 1;
2320
2321        /* Not hardwall and node outside mems_allowed: scan up cpusets */
2322        mutex_lock(&callback_mutex);
2323
2324        task_lock(current);
2325        cs = nearest_hardwall_ancestor(task_cs(current));
2326        task_unlock(current);
2327
2328        allowed = node_isset(node, cs->mems_allowed);
2329        mutex_unlock(&callback_mutex);
2330        return allowed;
2331}
2332
2333/*
2334 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2335 * @node: is this an allowed node?
2336 * @gfp_mask: memory allocation flags
2337 *
2338 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2339 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2340 * yes.  If the task has been OOM killed and has access to memory reserves as
2341 * specified by the TIF_MEMDIE flag, yes.
2342 * Otherwise, no.
2343 *
2344 * The __GFP_THISNODE placement logic is really handled elsewhere,
2345 * by forcibly using a zonelist starting at a specified node, and by
2346 * (in get_page_from_freelist()) refusing to consider the zones for
2347 * any node on the zonelist except the first.  By the time any such
2348 * calls get to this routine, we should just shut up and say 'yes'.
2349 *
2350 * Unlike the cpuset_node_allowed_softwall() variant, above,
2351 * this variant requires that the node be in the current task's
2352 * mems_allowed or that we're in interrupt.  It does not scan up the
2353 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2354 * It never sleeps.
2355 */
2356int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2357{
2358        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2359                return 1;
2360        if (node_isset(node, current->mems_allowed))
2361                return 1;
2362        /*
2363         * Allow tasks that have access to memory reserves because they have
2364         * been OOM killed to get memory anywhere.
2365         */
2366        if (unlikely(test_thread_flag(TIF_MEMDIE)))
2367                return 1;
2368        return 0;
2369}
2370
2371/**
2372 * cpuset_unlock - release lock on cpuset changes
2373 *
2374 * Undo the lock taken in a previous cpuset_lock() call.
2375 */
2376
2377void cpuset_unlock(void)
2378{
2379        mutex_unlock(&callback_mutex);
2380}
2381
2382/**
2383 * cpuset_mem_spread_node() - On which node to begin search for a file page
2384 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2385 *
2386 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2387 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2388 * and if the memory allocation used cpuset_mem_spread_node()
2389 * to determine on which node to start looking, as it will for
2390 * certain page cache or slab cache pages such as used for file
2391 * system buffers and inode caches, then instead of starting on the
2392 * local node to look for a free page, rather spread the starting
2393 * node around the tasks mems_allowed nodes.
2394 *
2395 * We don't have to worry about the returned node being offline
2396 * because "it can't happen", and even if it did, it would be ok.
2397 *
2398 * The routines calling guarantee_online_mems() are careful to
2399 * only set nodes in task->mems_allowed that are online.  So it
2400 * should not be possible for the following code to return an
2401 * offline node.  But if it did, that would be ok, as this routine
2402 * is not returning the node where the allocation must be, only
2403 * the node where the search should start.  The zonelist passed to
2404 * __alloc_pages() will include all nodes.  If the slab allocator
2405 * is passed an offline node, it will fall back to the local node.
2406 * See kmem_cache_alloc_node().
2407 */
2408
2409static int cpuset_spread_node(int *rotor)
2410{
2411        int node;
2412
2413        node = next_node(*rotor, current->mems_allowed);
2414        if (node == MAX_NUMNODES)
2415                node = first_node(current->mems_allowed);
2416        *rotor = node;
2417        return node;
2418}
2419
2420int cpuset_mem_spread_node(void)
2421{
2422        if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2423                current->cpuset_mem_spread_rotor =
2424                        node_random(&current->mems_allowed);
2425
2426        return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2427}
2428
2429int cpuset_slab_spread_node(void)
2430{
2431        if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2432                current->cpuset_slab_spread_rotor =
2433                        node_random(&current->mems_allowed);
2434
2435        return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2436}
2437
2438EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2439
2440/**
2441 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2442 * @tsk1: pointer to task_struct of some task.
2443 * @tsk2: pointer to task_struct of some other task.
2444 *
2445 * Description: Return true if @tsk1's mems_allowed intersects the
2446 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2447 * one of the task's memory usage might impact the memory available
2448 * to the other.
2449 **/
2450
2451int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2452                                   const struct task_struct *tsk2)
2453{
2454        return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2455}
2456
2457/**
2458 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2459 * @task: pointer to task_struct of some task.
2460 *
2461 * Description: Prints @task's name, cpuset name, and cached copy of its
2462 * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2463 * dereferencing task_cs(task).
2464 */
2465void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2466{
2467        struct dentry *dentry;
2468
2469        dentry = task_cs(tsk)->css.cgroup->dentry;
2470        spin_lock(&cpuset_buffer_lock);
2471        snprintf(cpuset_name, CPUSET_NAME_LEN,
2472                 dentry ? (const char *)dentry->d_name.name : "/");
2473        nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2474                           tsk->mems_allowed);
2475        printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2476               tsk->comm, cpuset_name, cpuset_nodelist);
2477        spin_unlock(&cpuset_buffer_lock);
2478}
2479
2480/*
2481 * Collection of memory_pressure is suppressed unless
2482 * this flag is enabled by writing "1" to the special
2483 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2484 */
2485
2486int cpuset_memory_pressure_enabled __read_mostly;
2487
2488/**
2489 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2490 *
2491 * Keep a running average of the rate of synchronous (direct)
2492 * page reclaim efforts initiated by tasks in each cpuset.
2493 *
2494 * This represents the rate at which some task in the cpuset
2495 * ran low on memory on all nodes it was allowed to use, and
2496 * had to enter the kernels page reclaim code in an effort to
2497 * create more free memory by tossing clean pages or swapping
2498 * or writing dirty pages.
2499 *
2500 * Display to user space in the per-cpuset read-only file
2501 * "memory_pressure".  Value displayed is an integer
2502 * representing the recent rate of entry into the synchronous
2503 * (direct) page reclaim by any task attached to the cpuset.
2504 **/
2505
2506void __cpuset_memory_pressure_bump(void)
2507{
2508        task_lock(current);
2509        fmeter_markevent(&task_cs(current)->fmeter);
2510        task_unlock(current);
2511}
2512
2513#ifdef CONFIG_PROC_PID_CPUSET
2514/*
2515 * proc_cpuset_show()
2516 *  - Print tasks cpuset path into seq_file.
2517 *  - Used for /proc/<pid>/cpuset.
2518 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2519 *    doesn't really matter if tsk->cpuset changes after we read it,
2520 *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
2521 *    anyway.
2522 */
2523static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2524{
2525        struct pid *pid;
2526        struct task_struct *tsk;
2527        char *buf;
2528        struct cgroup_subsys_state *css;
2529        int retval;
2530
2531        retval = -ENOMEM;
2532        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2533        if (!buf)
2534                goto out;
2535
2536        retval = -ESRCH;
2537        pid = m->private;
2538        tsk = get_pid_task(pid, PIDTYPE_PID);
2539        if (!tsk)
2540                goto out_free;
2541
2542        retval = -EINVAL;
2543        cgroup_lock();
2544        css = task_subsys_state(tsk, cpuset_subsys_id);
2545        retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2546        if (retval < 0)
2547                goto out_unlock;
2548        seq_puts(m, buf);
2549        seq_putc(m, '\n');
2550out_unlock:
2551        cgroup_unlock();
2552        put_task_struct(tsk);
2553out_free:
2554        kfree(buf);
2555out:
2556        return retval;
2557}
2558
2559static int cpuset_open(struct inode *inode, struct file *file)
2560{
2561        struct pid *pid = PROC_I(inode)->pid;
2562        return single_open(file, proc_cpuset_show, pid);
2563}
2564
2565const struct file_operations proc_cpuset_operations = {
2566        .open           = cpuset_open,
2567        .read           = seq_read,
2568        .llseek         = seq_lseek,
2569        .release        = single_release,
2570};
2571#endif /* CONFIG_PROC_PID_CPUSET */
2572
2573/* Display task mems_allowed in /proc/<pid>/status file. */
2574void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2575{
2576        seq_printf(m, "Mems_allowed:\t");
2577        seq_nodemask(m, &task->mems_allowed);
2578        seq_printf(m, "\n");
2579        seq_printf(m, "Mems_allowed_list:\t");
2580        seq_nodemask_list(m, &task->mems_allowed);
2581        seq_printf(m, "\n");
2582}
2583
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.