linux/kernel/cpuset.c
<<
>>
Prefs
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   8 *
   9 *  Portions derived from Patrick Mochel's sysfs code.
  10 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  11 *
  12 *  2003-10-10 Written by Simon Derr.
  13 *  2003-10-22 Updates by Stephen Hemminger.
  14 *  2004 May-July Rework by Paul Jackson.
  15 *
  16 *  This file is subject to the terms and conditions of the GNU General Public
  17 *  License.  See the file COPYING in the main directory of the Linux
  18 *  distribution for more details.
  19 */
  20
  21#include <linux/cpu.h>
  22#include <linux/cpumask.h>
  23#include <linux/cpuset.h>
  24#include <linux/err.h>
  25#include <linux/errno.h>
  26#include <linux/file.h>
  27#include <linux/fs.h>
  28#include <linux/init.h>
  29#include <linux/interrupt.h>
  30#include <linux/kernel.h>
  31#include <linux/kmod.h>
  32#include <linux/list.h>
  33#include <linux/mempolicy.h>
  34#include <linux/mm.h>
  35#include <linux/module.h>
  36#include <linux/mount.h>
  37#include <linux/namei.h>
  38#include <linux/pagemap.h>
  39#include <linux/proc_fs.h>
  40#include <linux/rcupdate.h>
  41#include <linux/sched.h>
  42#include <linux/seq_file.h>
  43#include <linux/security.h>
  44#include <linux/slab.h>
  45#include <linux/smp_lock.h>
  46#include <linux/spinlock.h>
  47#include <linux/stat.h>
  48#include <linux/string.h>
  49#include <linux/time.h>
  50#include <linux/backing-dev.h>
  51#include <linux/sort.h>
  52
  53#include <asm/uaccess.h>
  54#include <asm/atomic.h>
  55#include <linux/mutex.h>
  56
  57#define CPUSET_SUPER_MAGIC              0x27e0eb
  58
  59/*
  60 * Tracks how many cpusets are currently defined in system.
  61 * When there is only one cpuset (the root cpuset) we can
  62 * short circuit some hooks.
  63 */
  64int number_of_cpusets __read_mostly;
  65
  66/* See "Frequency meter" comments, below. */
  67
  68struct fmeter {
  69        int cnt;                /* unprocessed events count */
  70        int val;                /* most recent output value */
  71        time_t time;            /* clock (secs) when val computed */
  72        spinlock_t lock;        /* guards read or write of above */
  73};
  74
  75struct cpuset {
  76        unsigned long flags;            /* "unsigned long" so bitops work */
  77        cpumask_t cpus_allowed;         /* CPUs allowed to tasks in cpuset */
  78        nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
  79
  80        /*
  81         * Count is atomic so can incr (fork) or decr (exit) without a lock.
  82         */
  83        atomic_t count;                 /* count tasks using this cpuset */
  84
  85        /*
  86         * We link our 'sibling' struct into our parents 'children'.
  87         * Our children link their 'sibling' into our 'children'.
  88         */
  89        struct list_head sibling;       /* my parents children */
  90        struct list_head children;      /* my children */
  91
  92        struct cpuset *parent;          /* my parent */
  93        struct dentry *dentry;          /* cpuset fs entry */
  94
  95        /*
  96         * Copy of global cpuset_mems_generation as of the most
  97         * recent time this cpuset changed its mems_allowed.
  98         */
  99        int mems_generation;
 100
 101        struct fmeter fmeter;           /* memory_pressure filter */
 102};
 103
 104/* bits in struct cpuset flags field */
 105typedef enum {
 106        CS_CPU_EXCLUSIVE,
 107        CS_MEM_EXCLUSIVE,
 108        CS_MEMORY_MIGRATE,
 109        CS_REMOVED,
 110        CS_NOTIFY_ON_RELEASE,
 111        CS_SPREAD_PAGE,
 112        CS_SPREAD_SLAB,
 113} cpuset_flagbits_t;
 114
 115/* convenient tests for these bits */
 116static inline int is_cpu_exclusive(const struct cpuset *cs)
 117{
 118        return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 119}
 120
 121static inline int is_mem_exclusive(const struct cpuset *cs)
 122{
 123        return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 124}
 125
 126static inline int is_removed(const struct cpuset *cs)
 127{
 128        return test_bit(CS_REMOVED, &cs->flags);
 129}
 130
 131static inline int notify_on_release(const struct cpuset *cs)
 132{
 133        return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
 134}
 135
 136static inline int is_memory_migrate(const struct cpuset *cs)
 137{
 138        return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 139}
 140
 141static inline int is_spread_page(const struct cpuset *cs)
 142{
 143        return test_bit(CS_SPREAD_PAGE, &cs->flags);
 144}
 145
 146static inline int is_spread_slab(const struct cpuset *cs)
 147{
 148        return test_bit(CS_SPREAD_SLAB, &cs->flags);
 149}
 150
 151/*
 152 * Increment this integer everytime any cpuset changes its
 153 * mems_allowed value.  Users of cpusets can track this generation
 154 * number, and avoid having to lock and reload mems_allowed unless
 155 * the cpuset they're using changes generation.
 156 *
 157 * A single, global generation is needed because attach_task() could
 158 * reattach a task to a different cpuset, which must not have its
 159 * generation numbers aliased with those of that tasks previous cpuset.
 160 *
 161 * Generations are needed for mems_allowed because one task cannot
 162 * modify anothers memory placement.  So we must enable every task,
 163 * on every visit to __alloc_pages(), to efficiently check whether
 164 * its current->cpuset->mems_allowed has changed, requiring an update
 165 * of its current->mems_allowed.
 166 *
 167 * Since cpuset_mems_generation is guarded by manage_mutex,
 168 * there is no need to mark it atomic.
 169 */
 170static int cpuset_mems_generation;
 171
 172static struct cpuset top_cpuset = {
 173        .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
 174        .cpus_allowed = CPU_MASK_ALL,
 175        .mems_allowed = NODE_MASK_ALL,
 176        .count = ATOMIC_INIT(0),
 177        .sibling = LIST_HEAD_INIT(top_cpuset.sibling),
 178        .children = LIST_HEAD_INIT(top_cpuset.children),
 179};
 180
 181static struct vfsmount *cpuset_mount;
 182static struct super_block *cpuset_sb;
 183
 184/*
 185 * We have two global cpuset mutexes below.  They can nest.
 186 * It is ok to first take manage_mutex, then nest callback_mutex.  We also
 187 * require taking task_lock() when dereferencing a tasks cpuset pointer.
 188 * See "The task_lock() exception", at the end of this comment.
 189 *
 190 * A task must hold both mutexes to modify cpusets.  If a task
 191 * holds manage_mutex, then it blocks others wanting that mutex,
 192 * ensuring that it is the only task able to also acquire callback_mutex
 193 * and be able to modify cpusets.  It can perform various checks on
 194 * the cpuset structure first, knowing nothing will change.  It can
 195 * also allocate memory while just holding manage_mutex.  While it is
 196 * performing these checks, various callback routines can briefly
 197 * acquire callback_mutex to query cpusets.  Once it is ready to make
 198 * the changes, it takes callback_mutex, blocking everyone else.
 199 *
 200 * Calls to the kernel memory allocator can not be made while holding
 201 * callback_mutex, as that would risk double tripping on callback_mutex
 202 * from one of the callbacks into the cpuset code from within
 203 * __alloc_pages().
 204 *
 205 * If a task is only holding callback_mutex, then it has read-only
 206 * access to cpusets.
 207 *
 208 * The task_struct fields mems_allowed and mems_generation may only
 209 * be accessed in the context of that task, so require no locks.
 210 *
 211 * Any task can increment and decrement the count field without lock.
 212 * So in general, code holding manage_mutex or callback_mutex can't rely
 213 * on the count field not changing.  However, if the count goes to
 214 * zero, then only attach_task(), which holds both mutexes, can
 215 * increment it again.  Because a count of zero means that no tasks
 216 * are currently attached, therefore there is no way a task attached
 217 * to that cpuset can fork (the other way to increment the count).
 218 * So code holding manage_mutex or callback_mutex can safely assume that
 219 * if the count is zero, it will stay zero.  Similarly, if a task
 220 * holds manage_mutex or callback_mutex on a cpuset with zero count, it
 221 * knows that the cpuset won't be removed, as cpuset_rmdir() needs
 222 * both of those mutexes.
 223 *
 224 * The cpuset_common_file_write handler for operations that modify
 225 * the cpuset hierarchy holds manage_mutex across the entire operation,
 226 * single threading all such cpuset modifications across the system.
 227 *
 228 * The cpuset_common_file_read() handlers only hold callback_mutex across
 229 * small pieces of code, such as when reading out possibly multi-word
 230 * cpumasks and nodemasks.
 231 *
 232 * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't
 233 * (usually) take either mutex.  These are the two most performance
 234 * critical pieces of code here.  The exception occurs on cpuset_exit(),
 235 * when a task in a notify_on_release cpuset exits.  Then manage_mutex
 236 * is taken, and if the cpuset count is zero, a usermode call made
 237 * to /sbin/cpuset_release_agent with the name of the cpuset (path
 238 * relative to the root of cpuset file system) as the argument.
 239 *
 240 * A cpuset can only be deleted if both its 'count' of using tasks
 241 * is zero, and its list of 'children' cpusets is empty.  Since all
 242 * tasks in the system use _some_ cpuset, and since there is always at
 243 * least one task in the system (init), therefore, top_cpuset
 244 * always has either children cpusets and/or using tasks.  So we don't
 245 * need a special hack to ensure that top_cpuset cannot be deleted.
 246 *
 247 * The above "Tale of Two Semaphores" would be complete, but for:
 248 *
 249 *      The task_lock() exception
 250 *
 251 * The need for this exception arises from the action of attach_task(),
 252 * which overwrites one tasks cpuset pointer with another.  It does
 253 * so using both mutexes, however there are several performance
 254 * critical places that need to reference task->cpuset without the
 255 * expense of grabbing a system global mutex.  Therefore except as
 256 * noted below, when dereferencing or, as in attach_task(), modifying
 257 * a tasks cpuset pointer we use task_lock(), which acts on a spinlock
 258 * (task->alloc_lock) already in the task_struct routinely used for
 259 * such matters.
 260 *
 261 * P.S.  One more locking exception.  RCU is used to guard the
 262 * update of a tasks cpuset pointer by attach_task() and the
 263 * access of task->cpuset->mems_generation via that pointer in
 264 * the routine cpuset_update_task_memory_state().
 265 */
 266
 267static DEFINE_MUTEX(manage_mutex);
 268static DEFINE_MUTEX(callback_mutex);
 269
 270/*
 271 * A couple of forward declarations required, due to cyclic reference loop:
 272 *  cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file
 273 *  -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir.
 274 */
 275
 276static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode);
 277static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry);
 278
 279static struct backing_dev_info cpuset_backing_dev_info = {
 280        .ra_pages = 0,          /* No readahead */
 281        .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
 282};
 283
 284static struct inode *cpuset_new_inode(mode_t mode)
 285{
 286        struct inode *inode = new_inode(cpuset_sb);
 287
 288        if (inode) {
 289                inode->i_mode = mode;
 290                inode->i_uid = current->fsuid;
 291                inode->i_gid = current->fsgid;
 292                inode->i_blocks = 0;
 293                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 294                inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info;
 295        }
 296        return inode;
 297}
 298
 299static void cpuset_diput(struct dentry *dentry, struct inode *inode)
 300{
 301        /* is dentry a directory ? if so, kfree() associated cpuset */
 302        if (S_ISDIR(inode->i_mode)) {
 303                struct cpuset *cs = dentry->d_fsdata;
 304                BUG_ON(!(is_removed(cs)));
 305                kfree(cs);
 306        }
 307        iput(inode);
 308}
 309
 310static struct dentry_operations cpuset_dops = {
 311        .d_iput = cpuset_diput,
 312};
 313
 314static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name)
 315{
 316        struct dentry *d = lookup_one_len(name, parent, strlen(name));
 317        if (!IS_ERR(d))
 318                d->d_op = &cpuset_dops;
 319        return d;
 320}
 321
 322static void remove_dir(struct dentry *d)
 323{
 324        struct dentry *parent = dget(d->d_parent);
 325
 326        d_delete(d);
 327        simple_rmdir(parent->d_inode, d);
 328        dput(parent);
 329}
 330
 331/*
 332 * NOTE : the dentry must have been dget()'ed
 333 */
 334static void cpuset_d_remove_dir(struct dentry *dentry)
 335{
 336        struct list_head *node;
 337
 338        spin_lock(&dcache_lock);
 339        node = dentry->d_subdirs.next;
 340        while (node != &dentry->d_subdirs) {
 341                struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
 342                list_del_init(node);
 343                if (d->d_inode) {
 344                        d = dget_locked(d);
 345                        spin_unlock(&dcache_lock);
 346                        d_delete(d);
 347                        simple_unlink(dentry->d_inode, d);
 348                        dput(d);
 349                        spin_lock(&dcache_lock);
 350                }
 351                node = dentry->d_subdirs.next;
 352        }
 353        list_del_init(&dentry->d_u.d_child);
 354        spin_unlock(&dcache_lock);
 355        remove_dir(dentry);
 356}
 357
 358static struct super_operations cpuset_ops = {
 359        .statfs = simple_statfs,
 360        .drop_inode = generic_delete_inode,
 361};
 362
 363static int cpuset_fill_super(struct super_block *sb, void *unused_data,
 364                                                        int unused_silent)
 365{
 366        struct inode *inode;
 367        struct dentry *root;
 368
 369        sb->s_blocksize = PAGE_CACHE_SIZE;
 370        sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
 371        sb->s_magic = CPUSET_SUPER_MAGIC;
 372        sb->s_op = &cpuset_ops;
 373        cpuset_sb = sb;
 374
 375        inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR);
 376        if (inode) {
 377                inode->i_op = &simple_dir_inode_operations;
 378                inode->i_fop = &simple_dir_operations;
 379                /* directories start off with i_nlink == 2 (for "." entry) */
 380                inc_nlink(inode);
 381        } else {
 382                return -ENOMEM;
 383        }
 384
 385        root = d_alloc_root(inode);
 386        if (!root) {
 387                iput(inode);
 388                return -ENOMEM;
 389        }
 390        sb->s_root = root;
 391        return 0;
 392}
 393
 394static int cpuset_get_sb(struct file_system_type *fs_type,
 395                         int flags, const char *unused_dev_name,
 396                         void *data, struct vfsmount *mnt)
 397{
 398        return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt);
 399}
 400
 401static struct file_system_type cpuset_fs_type = {
 402        .name = "cpuset",
 403        .get_sb = cpuset_get_sb,
 404        .kill_sb = kill_litter_super,
 405};
 406
 407/* struct cftype:
 408 *
 409 * The files in the cpuset filesystem mostly have a very simple read/write
 410 * handling, some common function will take care of it. Nevertheless some cases
 411 * (read tasks) are special and therefore I define this structure for every
 412 * kind of file.
 413 *
 414 *
 415 * When reading/writing to a file:
 416 *      - the cpuset to use in file->f_dentry->d_parent->d_fsdata
 417 *      - the 'cftype' of the file is file->f_dentry->d_fsdata
 418 */
 419
 420struct cftype {
 421        char *name;
 422        int private;
 423        int (*open) (struct inode *inode, struct file *file);
 424        ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes,
 425                                                        loff_t *ppos);
 426        int (*write) (struct file *file, const char __user *buf, size_t nbytes,
 427                                                        loff_t *ppos);
 428        int (*release) (struct inode *inode, struct file *file);
 429};
 430
 431static inline struct cpuset *__d_cs(struct dentry *dentry)
 432{
 433        return dentry->d_fsdata;
 434}
 435
 436static inline struct cftype *__d_cft(struct dentry *dentry)
 437{
 438        return dentry->d_fsdata;
 439}
 440
 441/*
 442 * Call with manage_mutex held.  Writes path of cpuset into buf.
 443 * Returns 0 on success, -errno on error.
 444 */
 445
 446static int cpuset_path(const struct cpuset *cs, char *buf, int buflen)
 447{
 448        char *start;
 449
 450        start = buf + buflen;
 451
 452        *--start = '\0';
 453        for (;;) {
 454                int len = cs->dentry->d_name.len;
 455                if ((start -= len) < buf)
 456                        return -ENAMETOOLONG;
 457                memcpy(start, cs->dentry->d_name.name, len);
 458                cs = cs->parent;
 459                if (!cs)
 460                        break;
 461                if (!cs->parent)
 462                        continue;
 463                if (--start < buf)
 464                        return -ENAMETOOLONG;
 465                *start = '/';
 466        }
 467        memmove(buf, start, buf + buflen - start);
 468        return 0;
 469}
 470
 471/*
 472 * Notify userspace when a cpuset is released, by running
 473 * /sbin/cpuset_release_agent with the name of the cpuset (path
 474 * relative to the root of cpuset file system) as the argument.
 475 *
 476 * Most likely, this user command will try to rmdir this cpuset.
 477 *
 478 * This races with the possibility that some other task will be
 479 * attached to this cpuset before it is removed, or that some other
 480 * user task will 'mkdir' a child cpuset of this cpuset.  That's ok.
 481 * The presumed 'rmdir' will fail quietly if this cpuset is no longer
 482 * unused, and this cpuset will be reprieved from its death sentence,
 483 * to continue to serve a useful existence.  Next time it's released,
 484 * we will get notified again, if it still has 'notify_on_release' set.
 485 *
 486 * The final arg to call_usermodehelper() is 0, which means don't
 487 * wait.  The separate /sbin/cpuset_release_agent task is forked by
 488 * call_usermodehelper(), then control in this thread returns here,
 489 * without waiting for the release agent task.  We don't bother to
 490 * wait because the caller of this routine has no use for the exit
 491 * status of the /sbin/cpuset_release_agent task, so no sense holding
 492 * our caller up for that.
 493 *
 494 * When we had only one cpuset mutex, we had to call this
 495 * without holding it, to avoid deadlock when call_usermodehelper()
 496 * allocated memory.  With two locks, we could now call this while
 497 * holding manage_mutex, but we still don't, so as to minimize
 498 * the time manage_mutex is held.
 499 */
 500
 501static void cpuset_release_agent(const char *pathbuf)
 502{
 503        char *argv[3], *envp[3];
 504        int i;
 505
 506        if (!pathbuf)
 507                return;
 508
 509        i = 0;
 510        argv[i++] = "/sbin/cpuset_release_agent";
 511        argv[i++] = (char *)pathbuf;
 512        argv[i] = NULL;
 513
 514        i = 0;
 515        /* minimal command environment */
 516        envp[i++] = "HOME=/";
 517        envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
 518        envp[i] = NULL;
 519
 520        call_usermodehelper(argv[0], argv, envp, 0);
 521        kfree(pathbuf);
 522}
 523
 524/*
 525 * Either cs->count of using tasks transitioned to zero, or the
 526 * cs->children list of child cpusets just became empty.  If this
 527 * cs is notify_on_release() and now both the user count is zero and
 528 * the list of children is empty, prepare cpuset path in a kmalloc'd
 529 * buffer, to be returned via ppathbuf, so that the caller can invoke
 530 * cpuset_release_agent() with it later on, once manage_mutex is dropped.
 531 * Call here with manage_mutex held.
 532 *
 533 * This check_for_release() routine is responsible for kmalloc'ing
 534 * pathbuf.  The above cpuset_release_agent() is responsible for
 535 * kfree'ing pathbuf.  The caller of these routines is responsible
 536 * for providing a pathbuf pointer, initialized to NULL, then
 537 * calling check_for_release() with manage_mutex held and the address
 538 * of the pathbuf pointer, then dropping manage_mutex, then calling
 539 * cpuset_release_agent() with pathbuf, as set by check_for_release().
 540 */
 541
 542static void check_for_release(struct cpuset *cs, char **ppathbuf)
 543{
 544        if (notify_on_release(cs) && atomic_read(&cs->count) == 0 &&
 545            list_empty(&cs->children)) {
 546                char *buf;
 547
 548                buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
 549                if (!buf)
 550                        return;
 551                if (cpuset_path(cs, buf, PAGE_SIZE) < 0)
 552                        kfree(buf);
 553                else
 554                        *ppathbuf = buf;
 555        }
 556}
 557
 558/*
 559 * Return in *pmask the portion of a cpusets's cpus_allowed that
 560 * are online.  If none are online, walk up the cpuset hierarchy
 561 * until we find one that does have some online cpus.  If we get
 562 * all the way to the top and still haven't found any online cpus,
 563 * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
 564 * task, return cpu_online_map.
 565 *
 566 * One way or another, we guarantee to return some non-empty subset
 567 * of cpu_online_map.
 568 *
 569 * Call with callback_mutex held.
 570 */
 571
 572static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
 573{
 574        while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
 575                cs = cs->parent;
 576        if (cs)
 577                cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
 578        else
 579                *pmask = cpu_online_map;
 580        BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
 581}
 582
 583/*
 584 * Return in *pmask the portion of a cpusets's mems_allowed that
 585 * are online.  If none are online, walk up the cpuset hierarchy
 586 * until we find one that does have some online mems.  If we get
 587 * all the way to the top and still haven't found any online mems,
 588 * return node_online_map.
 589 *
 590 * One way or another, we guarantee to return some non-empty subset
 591 * of node_online_map.
 592 *
 593 * Call with callback_mutex held.
 594 */
 595
 596static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 597{
 598        while (cs && !nodes_intersects(cs->mems_allowed, node_online_map))
 599                cs = cs->parent;
 600        if (cs)
 601                nodes_and(*pmask, cs->mems_allowed, node_online_map);
 602        else
 603                *pmask = node_online_map;
 604        BUG_ON(!nodes_intersects(*pmask, node_online_map));
 605}
 606
 607/**
 608 * cpuset_update_task_memory_state - update task memory placement
 609 *
 610 * If the current tasks cpusets mems_allowed changed behind our
 611 * backs, update current->mems_allowed, mems_generation and task NUMA
 612 * mempolicy to the new value.
 613 *
 614 * Task mempolicy is updated by rebinding it relative to the
 615 * current->cpuset if a task has its memory placement changed.
 616 * Do not call this routine if in_interrupt().
 617 *
 618 * Call without callback_mutex or task_lock() held.  May be
 619 * called with or without manage_mutex held.  Thanks in part to
 620 * 'the_top_cpuset_hack', the tasks cpuset pointer will never
 621 * be NULL.  This routine also might acquire callback_mutex and
 622 * current->mm->mmap_sem during call.
 623 *
 624 * Reading current->cpuset->mems_generation doesn't need task_lock
 625 * to guard the current->cpuset derefence, because it is guarded
 626 * from concurrent freeing of current->cpuset by attach_task(),
 627 * using RCU.
 628 *
 629 * The rcu_dereference() is technically probably not needed,
 630 * as I don't actually mind if I see a new cpuset pointer but
 631 * an old value of mems_generation.  However this really only
 632 * matters on alpha systems using cpusets heavily.  If I dropped
 633 * that rcu_dereference(), it would save them a memory barrier.
 634 * For all other arch's, rcu_dereference is a no-op anyway, and for
 635 * alpha systems not using cpusets, another planned optimization,
 636 * avoiding the rcu critical section for tasks in the root cpuset
 637 * which is statically allocated, so can't vanish, will make this
 638 * irrelevant.  Better to use RCU as intended, than to engage in
 639 * some cute trick to save a memory barrier that is impossible to
 640 * test, for alpha systems using cpusets heavily, which might not
 641 * even exist.
 642 *
 643 * This routine is needed to update the per-task mems_allowed data,
 644 * within the tasks context, when it is trying to allocate memory
 645 * (in various mm/mempolicy.c routines) and notices that some other
 646 * task has been modifying its cpuset.
 647 */
 648
 649void cpuset_update_task_memory_state(void)
 650{
 651        int my_cpusets_mem_gen;
 652        struct task_struct *tsk = current;
 653        struct cpuset *cs;
 654
 655        if (tsk->cpuset == &top_cpuset) {
 656                /* Don't need rcu for top_cpuset.  It's never freed. */
 657                my_cpusets_mem_gen = top_cpuset.mems_generation;
 658        } else {
 659                rcu_read_lock();
 660                cs = rcu_dereference(tsk->cpuset);
 661                my_cpusets_mem_gen = cs->mems_generation;
 662                rcu_read_unlock();
 663        }
 664
 665        if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
 666                mutex_lock(&callback_mutex);
 667                task_lock(tsk);
 668                cs = tsk->cpuset;       /* Maybe changed when task not locked */
 669                guarantee_online_mems(cs, &tsk->mems_allowed);
 670                tsk->cpuset_mems_generation = cs->mems_generation;
 671                if (is_spread_page(cs))
 672                        tsk->flags |= PF_SPREAD_PAGE;
 673                else
 674                        tsk->flags &= ~PF_SPREAD_PAGE;
 675                if (is_spread_slab(cs))
 676                        tsk->flags |= PF_SPREAD_SLAB;
 677                else
 678                        tsk->flags &= ~PF_SPREAD_SLAB;
 679                task_unlock(tsk);
 680                mutex_unlock(&callback_mutex);
 681                mpol_rebind_task(tsk, &tsk->mems_allowed);
 682        }
 683}
 684
 685/*
 686 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 687 *
 688 * One cpuset is a subset of another if all its allowed CPUs and
 689 * Memory Nodes are a subset of the other, and its exclusive flags
 690 * are only set if the other's are set.  Call holding manage_mutex.
 691 */
 692
 693static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 694{
 695        return  cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
 696                nodes_subset(p->mems_allowed, q->mems_allowed) &&
 697                is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 698                is_mem_exclusive(p) <= is_mem_exclusive(q);
 699}
 700
 701/*
 702 * validate_change() - Used to validate that any proposed cpuset change
 703 *                     follows the structural rules for cpusets.
 704 *
 705 * If we replaced the flag and mask values of the current cpuset
 706 * (cur) with those values in the trial cpuset (trial), would
 707 * our various subset and exclusive rules still be valid?  Presumes
 708 * manage_mutex held.
 709 *
 710 * 'cur' is the address of an actual, in-use cpuset.  Operations
 711 * such as list traversal that depend on the actual address of the
 712 * cpuset in the list must use cur below, not trial.
 713 *
 714 * 'trial' is the address of bulk structure copy of cur, with
 715 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 716 * or flags changed to new, trial values.
 717 *
 718 * Return 0 if valid, -errno if not.
 719 */
 720
 721static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 722{
 723        struct cpuset *c, *par;
 724
 725        /* Each of our child cpusets must be a subset of us */
 726        list_for_each_entry(c, &cur->children, sibling) {
 727                if (!is_cpuset_subset(c, trial))
 728                        return -EBUSY;
 729        }
 730
 731        /* Remaining checks don't apply to root cpuset */
 732        if ((par = cur->parent) == NULL)
 733                return 0;
 734
 735        /* We must be a subset of our parent cpuset */
 736        if (!is_cpuset_subset(trial, par))
 737                return -EACCES;
 738
 739        /* If either I or some sibling (!= me) is exclusive, we can't overlap */
 740        list_for_each_entry(c, &par->children, sibling) {
 741                if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 742                    c != cur &&
 743                    cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
 744                        return -EINVAL;
 745                if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 746                    c != cur &&
 747                    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 748                        return -EINVAL;
 749        }
 750
 751        return 0;
 752}
 753
 754/*
 755 * For a given cpuset cur, partition the system as follows
 756 * a. All cpus in the parent cpuset's cpus_allowed that are not part of any
 757 *    exclusive child cpusets
 758 * b. All cpus in the current cpuset's cpus_allowed that are not part of any
 759 *    exclusive child cpusets
 760 * Build these two partitions by calling partition_sched_domains
 761 *
 762 * Call with manage_mutex held.  May nest a call to the
 763 * lock_cpu_hotplug()/unlock_cpu_hotplug() pair.
 764 * Must not be called holding callback_mutex, because we must
 765 * not call lock_cpu_hotplug() while holding callback_mutex.
 766 */
 767
 768static void update_cpu_domains(struct cpuset *cur)
 769{
 770        struct cpuset *c, *par = cur->parent;
 771        cpumask_t pspan, cspan;
 772
 773        if (par == NULL || cpus_empty(cur->cpus_allowed))
 774                return;
 775
 776        /*
 777         * Get all cpus from parent's cpus_allowed not part of exclusive
 778         * children
 779         */
 780        pspan = par->cpus_allowed;
 781        list_for_each_entry(c, &par->children, sibling) {
 782                if (is_cpu_exclusive(c))
 783                        cpus_andnot(pspan, pspan, c->cpus_allowed);
 784        }
 785        if (!is_cpu_exclusive(cur)) {
 786                cpus_or(pspan, pspan, cur->cpus_allowed);
 787                if (cpus_equal(pspan, cur->cpus_allowed))
 788                        return;
 789                cspan = CPU_MASK_NONE;
 790        } else {
 791                if (cpus_empty(pspan))
 792                        return;
 793                cspan = cur->cpus_allowed;
 794                /*
 795                 * Get all cpus from current cpuset's cpus_allowed not part
 796                 * of exclusive children
 797                 */
 798                list_for_each_entry(c, &cur->children, sibling) {
 799                        if (is_cpu_exclusive(c))
 800                                cpus_andnot(cspan, cspan, c->cpus_allowed);
 801                }
 802        }
 803
 804        lock_cpu_hotplug();
 805        partition_sched_domains(&pspan, &cspan);
 806        unlock_cpu_hotplug();
 807}
 808
 809/*
 810 * Call with manage_mutex held.  May take callback_mutex during call.
 811 */
 812
 813static int update_cpumask(struct cpuset *cs, char *buf)
 814{
 815        struct cpuset trialcs;
 816        int retval, cpus_unchanged;
 817
 818        /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
 819        if (cs == &top_cpuset)
 820                return -EACCES;
 821
 822        trialcs = *cs;
 823        retval = cpulist_parse(buf, trialcs.cpus_allowed);
 824        if (retval < 0)
 825                return retval;
 826        cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map);
 827        if (cpus_empty(trialcs.cpus_allowed))
 828                return -ENOSPC;
 829        retval = validate_change(cs, &trialcs);
 830        if (retval < 0)
 831                return retval;
 832        cpus_unchanged = cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed);
 833        mutex_lock(&callback_mutex);
 834        cs->cpus_allowed = trialcs.cpus_allowed;
 835        mutex_unlock(&callback_mutex);
 836        if (is_cpu_exclusive(cs) && !cpus_unchanged)
 837                update_cpu_domains(cs);
 838        return 0;
 839}
 840
 841/*
 842 * cpuset_migrate_mm
 843 *
 844 *    Migrate memory region from one set of nodes to another.
 845 *
 846 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 847 *    so that the migration code can allocate pages on these nodes.
 848 *
 849 *    Call holding manage_mutex, so our current->cpuset won't change
 850 *    during this call, as manage_mutex holds off any attach_task()
 851 *    calls.  Therefore we don't need to take task_lock around the
 852 *    call to guarantee_online_mems(), as we know no one is changing
 853 *    our tasks cpuset.
 854 *
 855 *    Hold callback_mutex around the two modifications of our tasks
 856 *    mems_allowed to synchronize with cpuset_mems_allowed().
 857 *
 858 *    While the mm_struct we are migrating is typically from some
 859 *    other task, the task_struct mems_allowed that we are hacking
 860 *    is for our current task, which must allocate new pages for that
 861 *    migrating memory region.
 862 *
 863 *    We call cpuset_update_task_memory_state() before hacking
 864 *    our tasks mems_allowed, so that we are assured of being in
 865 *    sync with our tasks cpuset, and in particular, callbacks to
 866 *    cpuset_update_task_memory_state() from nested page allocations
 867 *    won't see any mismatch of our cpuset and task mems_generation
 868 *    values, so won't overwrite our hacked tasks mems_allowed
 869 *    nodemask.
 870 */
 871
 872static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 873                                                        const nodemask_t *to)
 874{
 875        struct task_struct *tsk = current;
 876
 877        cpuset_update_task_memory_state();
 878
 879        mutex_lock(&callback_mutex);
 880        tsk->mems_allowed = *to;
 881        mutex_unlock(&callback_mutex);
 882
 883        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 884
 885        mutex_lock(&callback_mutex);
 886        guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed);
 887        mutex_unlock(&callback_mutex);
 888}
 889
 890/*
 891 * Handle user request to change the 'mems' memory placement
 892 * of a cpuset.  Needs to validate the request, update the
 893 * cpusets mems_allowed and mems_generation, and for each
 894 * task in the cpuset, rebind any vma mempolicies and if
 895 * the cpuset is marked 'memory_migrate', migrate the tasks
 896 * pages to the new memory.
 897 *
 898 * Call with manage_mutex held.  May take callback_mutex during call.
 899 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
 900 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
 901 * their mempolicies to the cpusets new mems_allowed.
 902 */
 903
 904static int update_nodemask(struct cpuset *cs, char *buf)
 905{
 906        struct cpuset trialcs;
 907        nodemask_t oldmem;
 908        struct task_struct *g, *p;
 909        struct mm_struct **mmarray;
 910        int i, n, ntasks;
 911        int migrate;
 912        int fudge;
 913        int retval;
 914
 915        /* top_cpuset.mems_allowed tracks node_online_map; it's read-only */
 916        if (cs == &top_cpuset)
 917                return -EACCES;
 918
 919        trialcs = *cs;
 920        retval = nodelist_parse(buf, trialcs.mems_allowed);
 921        if (retval < 0)
 922                goto done;
 923        nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
 924        oldmem = cs->mems_allowed;
 925        if (nodes_equal(oldmem, trialcs.mems_allowed)) {
 926                retval = 0;             /* Too easy - nothing to do */
 927                goto done;
 928        }
 929        if (nodes_empty(trialcs.mems_allowed)) {
 930                retval = -ENOSPC;
 931                goto done;
 932        }
 933        retval = validate_change(cs, &trialcs);
 934        if (retval < 0)
 935                goto done;
 936
 937        mutex_lock(&callback_mutex);
 938        cs->mems_allowed = trialcs.mems_allowed;
 939        cs->mems_generation = cpuset_mems_generation++;
 940        mutex_unlock(&callback_mutex);
 941
 942        set_cpuset_being_rebound(cs);           /* causes mpol_copy() rebind */
 943
 944        fudge = 10;                             /* spare mmarray[] slots */
 945        fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
 946        retval = -ENOMEM;
 947
 948        /*
 949         * Allocate mmarray[] to hold mm reference for each task
 950         * in cpuset cs.  Can't kmalloc GFP_KERNEL while holding
 951         * tasklist_lock.  We could use GFP_ATOMIC, but with a
 952         * few more lines of code, we can retry until we get a big
 953         * enough mmarray[] w/o using GFP_ATOMIC.
 954         */
 955        while (1) {
 956                ntasks = atomic_read(&cs->count);       /* guess */
 957                ntasks += fudge;
 958                mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
 959                if (!mmarray)
 960                        goto done;
 961                write_lock_irq(&tasklist_lock);         /* block fork */
 962                if (atomic_read(&cs->count) <= ntasks)
 963                        break;                          /* got enough */
 964                write_unlock_irq(&tasklist_lock);       /* try again */
 965                kfree(mmarray);
 966        }
 967
 968        n = 0;
 969
 970        /* Load up mmarray[] with mm reference for each task in cpuset. */
 971        do_each_thread(g, p) {
 972                struct mm_struct *mm;
 973
 974                if (n >= ntasks) {
 975                        printk(KERN_WARNING
 976                                "Cpuset mempolicy rebind incomplete.\n");
 977                        continue;
 978                }
 979                if (p->cpuset != cs)
 980                        continue;
 981                mm = get_task_mm(p);
 982                if (!mm)
 983                        continue;
 984                mmarray[n++] = mm;
 985        } while_each_thread(g, p);
 986        write_unlock_irq(&tasklist_lock);
 987
 988        /*
 989         * Now that we've dropped the tasklist spinlock, we can
 990         * rebind the vma mempolicies of each mm in mmarray[] to their
 991         * new cpuset, and release that mm.  The mpol_rebind_mm()
 992         * call takes mmap_sem, which we couldn't take while holding
 993         * tasklist_lock.  Forks can happen again now - the mpol_copy()
 994         * cpuset_being_rebound check will catch such forks, and rebind
 995         * their vma mempolicies too.  Because we still hold the global
 996         * cpuset manage_mutex, we know that no other rebind effort will
 997         * be contending for the global variable cpuset_being_rebound.
 998         * It's ok if we rebind the same mm twice; mpol_rebind_mm()
 999         * is idempotent.  Also migrate pages in each mm to new nodes.
1000         */
1001        migrate = is_memory_migrate(cs);
1002        for (i = 0; i < n; i++) {
1003                struct mm_struct *mm = mmarray[i];
1004
1005                mpol_rebind_mm(mm, &cs->mems_allowed);
1006                if (migrate)
1007                        cpuset_migrate_mm(mm, &oldmem, &cs->mems_allowed);
1008                mmput(mm);
1009        }
1010
1011        /* We're done rebinding vma's to this cpusets new mems_allowed. */
1012        kfree(mmarray);
1013        set_cpuset_being_rebound(NULL);
1014        retval = 0;
1015done:
1016        return retval;
1017}
1018
1019/*
1020 * Call with manage_mutex held.
1021 */
1022
1023static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
1024{
1025        if (simple_strtoul(buf, NULL, 10) != 0)
1026                cpuset_memory_pressure_enabled = 1;
1027        else
1028                cpuset_memory_pressure_enabled = 0;
1029        return 0;
1030}
1031
1032/*
1033 * update_flag - read a 0 or a 1 in a file and update associated flag
1034 * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
1035 *                              CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
1036 *                              CS_SPREAD_PAGE, CS_SPREAD_SLAB)
1037 * cs:  the cpuset to update
1038 * buf: the buffer where we read the 0 or 1
1039 *
1040 * Call with manage_mutex held.
1041 */
1042
1043static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1044{
1045        int turning_on;
1046        struct cpuset trialcs;
1047        int err, cpu_exclusive_changed;
1048
1049        turning_on = (simple_strtoul(buf, NULL, 10) != 0);
1050
1051        trialcs = *cs;
1052        if (turning_on)
1053                set_bit(bit, &trialcs.flags);
1054        else
1055                clear_bit(bit, &trialcs.flags);
1056
1057        err = validate_change(cs, &trialcs);
1058        if (err < 0)
1059                return err;
1060        cpu_exclusive_changed =
1061                (is_cpu_exclusive(cs) != is_cpu_exclusive(&trialcs));
1062        mutex_lock(&callback_mutex);
1063        if (turning_on)
1064                set_bit(bit, &cs->flags);
1065        else
1066                clear_bit(bit, &cs->flags);
1067        mutex_unlock(&callback_mutex);
1068
1069        if (cpu_exclusive_changed)
1070                update_cpu_domains(cs);
1071        return 0;
1072}
1073
1074/*
1075 * Frequency meter - How fast is some event occurring?
1076 *
1077 * These routines manage a digitally filtered, constant time based,
1078 * event frequency meter.  There are four routines:
1079 *   fmeter_init() - initialize a frequency meter.
1080 *   fmeter_markevent() - called each time the event happens.
1081 *   fmeter_getrate() - returns the recent rate of such events.
1082 *   fmeter_update() - internal routine used to update fmeter.
1083 *
1084 * A common data structure is passed to each of these routines,
1085 * which is used to keep track of the state required to manage the
1086 * frequency meter and its digital filter.
1087 *
1088 * The filter works on the number of events marked per unit time.
1089 * The filter is single-pole low-pass recursive (IIR).  The time unit
1090 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1091 * simulate 3 decimal digits of precision (multiplied by 1000).
1092 *
1093 * With an FM_COEF of 933, and a time base of 1 second, the filter
1094 * has a half-life of 10 seconds, meaning that if the events quit
1095 * happening, then the rate returned from the fmeter_getrate()
1096 * will be cut in half each 10 seconds, until it converges to zero.
1097 *
1098 * It is not worth doing a real infinitely recursive filter.  If more
1099 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1100 * just compute FM_MAXTICKS ticks worth, by which point the level
1101 * will be stable.
1102 *
1103 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1104 * arithmetic overflow in the fmeter_update() routine.
1105 *
1106 * Given the simple 32 bit integer arithmetic used, this meter works
1107 * best for reporting rates between one per millisecond (msec) and
1108 * one per 32 (approx) seconds.  At constant rates faster than one
1109 * per msec it maxes out at values just under 1,000,000.  At constant
1110 * rates between one per msec, and one per second it will stabilize
1111 * to a value N*1000, where N is the rate of events per second.
1112 * At constant rates between one per second and one per 32 seconds,
1113 * it will be choppy, moving up on the seconds that have an event,
1114 * and then decaying until the next event.  At rates slower than
1115 * about one in 32 seconds, it decays all the way back to zero between
1116 * each event.
1117 */
1118
1119#define FM_COEF 933             /* coefficient for half-life of 10 secs */
1120#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1121#define FM_MAXCNT 1000000       /* limit cnt to avoid overflow */
1122#define FM_SCALE 1000           /* faux fixed point scale */
1123
1124/* Initialize a frequency meter */
1125static void fmeter_init(struct fmeter *fmp)
1126{
1127        fmp->cnt = 0;
1128        fmp->val = 0;
1129        fmp->time = 0;
1130        spin_lock_init(&fmp->lock);
1131}
1132
1133/* Internal meter update - process cnt events and update value */
1134static void fmeter_update(struct fmeter *fmp)
1135{
1136        time_t now = get_seconds();
1137        time_t ticks = now - fmp->time;
1138
1139        if (ticks == 0)
1140                return;
1141
1142        ticks = min(FM_MAXTICKS, ticks);
1143        while (ticks-- > 0)
1144                fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1145        fmp->time = now;
1146
1147        fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1148        fmp->cnt = 0;
1149}
1150
1151/* Process any previous ticks, then bump cnt by one (times scale). */
1152static void fmeter_markevent(struct fmeter *fmp)
1153{
1154        spin_lock(&fmp->lock);
1155        fmeter_update(fmp);
1156        fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1157        spin_unlock(&fmp->lock);
1158}
1159
1160/* Process any previous ticks, then return current value. */
1161static int fmeter_getrate(struct fmeter *fmp)
1162{
1163        int val;
1164
1165        spin_lock(&fmp->lock);
1166        fmeter_update(fmp);
1167        val = fmp->val;
1168        spin_unlock(&fmp->lock);
1169        return val;
1170}
1171
1172/*
1173 * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
1174 * writing the path of the old cpuset in 'ppathbuf' if it needs to be
1175 * notified on release.
1176 *
1177 * Call holding manage_mutex.  May take callback_mutex and task_lock of
1178 * the task 'pid' during call.
1179 */
1180
1181static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
1182{
1183        pid_t pid;
1184        struct task_struct *tsk;
1185        struct cpuset *oldcs;
1186        cpumask_t cpus;
1187        nodemask_t from, to;
1188        struct mm_struct *mm;
1189        int retval;
1190
1191        if (sscanf(pidbuf, "%d", &pid) != 1)
1192                return -EIO;
1193        if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1194                return -ENOSPC;
1195
1196        if (pid) {
1197                read_lock(&tasklist_lock);
1198
1199                tsk = find_task_by_pid(pid);
1200                if (!tsk || tsk->flags & PF_EXITING) {
1201                        read_unlock(&tasklist_lock);
1202                        return -ESRCH;
1203                }
1204
1205                get_task_struct(tsk);
1206                read_unlock(&tasklist_lock);
1207
1208                if ((current->euid) && (current->euid != tsk->uid)
1209                    && (current->euid != tsk->suid)) {
1210                        put_task_struct(tsk);
1211                        return -EACCES;
1212                }
1213        } else {
1214                tsk = current;
1215                get_task_struct(tsk);
1216        }
1217
1218        retval = security_task_setscheduler(tsk, 0, NULL);
1219        if (retval) {
1220                put_task_struct(tsk);
1221                return retval;
1222        }
1223
1224        mutex_lock(&callback_mutex);
1225
1226        task_lock(tsk);
1227        oldcs = tsk->cpuset;
1228        /*
1229         * After getting 'oldcs' cpuset ptr, be sure still not exiting.
1230         * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack
1231         * then fail this attach_task(), to avoid breaking top_cpuset.count.
1232         */
1233        if (tsk->flags & PF_EXITING) {
1234                task_unlock(tsk);
1235                mutex_unlock(&callback_mutex);
1236                put_task_struct(tsk);
1237                return -ESRCH;
1238        }
1239        atomic_inc(&cs->count);
1240        rcu_assign_pointer(tsk->cpuset, cs);
1241        task_unlock(tsk);
1242
1243        guarantee_online_cpus(cs, &cpus);
1244        set_cpus_allowed(tsk, cpus);
1245
1246        from = oldcs->mems_allowed;
1247        to = cs->mems_allowed;
1248
1249        mutex_unlock(&callback_mutex);
1250
1251        mm = get_task_mm(tsk);
1252        if (mm) {
1253                mpol_rebind_mm(mm, &to);
1254                if (is_memory_migrate(cs))
1255                        cpuset_migrate_mm(mm, &from, &to);
1256                mmput(mm);
1257        }
1258
1259        put_task_struct(tsk);
1260        synchronize_rcu();
1261        if (atomic_dec_and_test(&oldcs->count))
1262                check_for_release(oldcs, ppathbuf);
1263        return 0;
1264}
1265
1266/* The various types of files and directories in a cpuset file system */
1267
1268typedef enum {
1269        FILE_ROOT,
1270        FILE_DIR,
1271        FILE_MEMORY_MIGRATE,
1272        FILE_CPULIST,
1273        FILE_MEMLIST,
1274        FILE_CPU_EXCLUSIVE,
1275        FILE_MEM_EXCLUSIVE,
1276        FILE_NOTIFY_ON_RELEASE,
1277        FILE_MEMORY_PRESSURE_ENABLED,
1278        FILE_MEMORY_PRESSURE,
1279        FILE_SPREAD_PAGE,
1280        FILE_SPREAD_SLAB,
1281        FILE_TASKLIST,
1282} cpuset_filetype_t;
1283
1284static ssize_t cpuset_common_file_write(struct file *file, const char __user *userbuf,
1285                                        size_t nbytes, loff_t *unused_ppos)
1286{
1287        struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1288        struct cftype *cft = __d_cft(file->f_dentry);
1289        cpuset_filetype_t type = cft->private;
1290        char *buffer;
1291        char *pathbuf = NULL;
1292        int retval = 0;
1293
1294        /* Crude upper limit on largest legitimate cpulist user might write. */
1295        if (nbytes > 100 + 6 * NR_CPUS)
1296                return -E2BIG;
1297
1298        /* +1 for nul-terminator */
1299        if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
1300                return -ENOMEM;
1301
1302        if (copy_from_user(buffer, userbuf, nbytes)) {
1303                retval = -EFAULT;
1304                goto out1;
1305        }
1306        buffer[nbytes] = 0;     /* nul-terminate */
1307
1308        mutex_lock(&manage_mutex);
1309
1310        if (is_removed(cs)) {
1311                retval = -ENODEV;
1312                goto out2;
1313        }
1314
1315        switch (type) {
1316        case FILE_CPULIST:
1317                retval = update_cpumask(cs, buffer);
1318                break;
1319        case FILE_MEMLIST:
1320                retval = update_nodemask(cs, buffer);
1321                break;
1322        case FILE_CPU_EXCLUSIVE:
1323                retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
1324                break;
1325        case FILE_MEM_EXCLUSIVE:
1326                retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
1327                break;
1328        case FILE_NOTIFY_ON_RELEASE:
1329                retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
1330                break;
1331        case FILE_MEMORY_MIGRATE:
1332                retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
1333                break;
1334        case FILE_MEMORY_PRESSURE_ENABLED:
1335                retval = update_memory_pressure_enabled(cs, buffer);
1336                break;
1337        case FILE_MEMORY_PRESSURE:
1338                retval = -EACCES;
1339                break;
1340        case FILE_SPREAD_PAGE:
1341                retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
1342                cs->mems_generation = cpuset_mems_generation++;
1343                break;
1344        case FILE_SPREAD_SLAB:
1345                retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
1346                cs->mems_generation = cpuset_mems_generation++;
1347                break;
1348        case FILE_TASKLIST:
1349                retval = attach_task(cs, buffer, &pathbuf);
1350                break;
1351        default:
1352                retval = -EINVAL;
1353                goto out2;
1354        }
1355
1356        if (retval == 0)
1357                retval = nbytes;
1358out2:
1359        mutex_unlock(&manage_mutex);
1360        cpuset_release_agent(pathbuf);
1361out1:
1362        kfree(buffer);
1363        return retval;
1364}
1365
1366static ssize_t cpuset_file_write(struct file *file, const char __user *buf,
1367                                                size_t nbytes, loff_t *ppos)
1368{
1369        ssize_t retval = 0;
1370        struct cftype *cft = __d_cft(file->f_dentry);
1371        if (!cft)
1372                return -ENODEV;
1373
1374        /* special function ? */
1375        if (cft->write)
1376                retval = cft->write(file, buf, nbytes, ppos);
1377        else
1378                retval = cpuset_common_file_write(file, buf, nbytes, ppos);
1379
1380        return retval;
1381}
1382
1383/*
1384 * These ascii lists should be read in a single call, by using a user
1385 * buffer large enough to hold the entire map.  If read in smaller
1386 * chunks, there is no guarantee of atomicity.  Since the display format
1387 * used, list of ranges of sequential numbers, is variable length,
1388 * and since these maps can change value dynamically, one could read
1389 * gibberish by doing partial reads while a list was changing.
1390 * A single large read to a buffer that crosses a page boundary is
1391 * ok, because the result being copied to user land is not recomputed
1392 * across a page fault.
1393 */
1394
1395static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1396{
1397        cpumask_t mask;
1398
1399        mutex_lock(&callback_mutex);
1400        mask = cs->cpus_allowed;
1401        mutex_unlock(&callback_mutex);
1402
1403        return cpulist_scnprintf(page, PAGE_SIZE, mask);
1404}
1405
1406static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1407{
1408        nodemask_t mask;
1409
1410        mutex_lock(&callback_mutex);
1411        mask = cs->mems_allowed;
1412        mutex_unlock(&callback_mutex);
1413
1414        return nodelist_scnprintf(page, PAGE_SIZE, mask);
1415}
1416
1417static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
1418                                size_t nbytes, loff_t *ppos)
1419{
1420        struct cftype *cft = __d_cft(file->f_dentry);
1421        struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1422        cpuset_filetype_t type = cft->private;
1423        char *page;
1424        ssize_t retval = 0;
1425        char *s;
1426
1427        if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1428                return -ENOMEM;
1429
1430        s = page;
1431
1432        switch (type) {
1433        case FILE_CPULIST:
1434                s += cpuset_sprintf_cpulist(s, cs);
1435                break;
1436        case FILE_MEMLIST:
1437                s += cpuset_sprintf_memlist(s, cs);
1438                break;
1439        case FILE_CPU_EXCLUSIVE:
1440                *s++ = is_cpu_exclusive(cs) ? '1' : '0';
1441                break;
1442        case FILE_MEM_EXCLUSIVE:
1443                *s++ = is_mem_exclusive(cs) ? '1' : '0';
1444                break;
1445        case FILE_NOTIFY_ON_RELEASE:
1446                *s++ = notify_on_release(cs) ? '1' : '0';
1447                break;
1448        case FILE_MEMORY_MIGRATE:
1449                *s++ = is_memory_migrate(cs) ? '1' : '0';
1450                break;
1451        case FILE_MEMORY_PRESSURE_ENABLED:
1452                *s++ = cpuset_memory_pressure_enabled ? '1' : '0';
1453                break;
1454        case FILE_MEMORY_PRESSURE:
1455                s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
1456                break;
1457        case FILE_SPREAD_PAGE:
1458                *s++ = is_spread_page(cs) ? '1' : '0';
1459                break;
1460        case FILE_SPREAD_SLAB:
1461                *s++ = is_spread_slab(cs) ? '1' : '0';
1462                break;
1463        default:
1464                retval = -EINVAL;
1465                goto out;
1466        }
1467        *s++ = '\n';
1468
1469        retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1470out:
1471        free_page((unsigned long)page);
1472        return retval;
1473}
1474
1475static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes,
1476                                                                loff_t *ppos)
1477{
1478        ssize_t retval = 0;
1479        struct cftype *cft = __d_cft(file->f_dentry);
1480        if (!cft)
1481                return -ENODEV;
1482
1483        /* special function ? */
1484        if (cft->read)
1485                retval = cft->read(file, buf, nbytes, ppos);
1486        else
1487                retval = cpuset_common_file_read(file, buf, nbytes, ppos);
1488
1489        return retval;
1490}
1491
1492static int cpuset_file_open(struct inode *inode, struct file *file)
1493{
1494        int err;
1495        struct cftype *cft;
1496
1497        err = generic_file_open(inode, file);
1498        if (err)
1499                return err;
1500
1501        cft = __d_cft(file->f_dentry);
1502        if (!cft)
1503                return -ENODEV;
1504        if (cft->open)
1505                err = cft->open(inode, file);
1506        else
1507                err = 0;
1508
1509        return err;
1510}
1511
1512static int cpuset_file_release(struct inode *inode, struct file *file)
1513{
1514        struct cftype *cft = __d_cft(file->f_dentry);
1515        if (cft->release)
1516                return cft->release(inode, file);
1517        return 0;
1518}
1519
1520/*
1521 * cpuset_rename - Only allow simple rename of directories in place.
1522 */
1523static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry,
1524                  struct inode *new_dir, struct dentry *new_dentry)
1525{
1526        if (!S_ISDIR(old_dentry->d_inode->i_mode))
1527                return -ENOTDIR;
1528        if (new_dentry->d_inode)
1529                return -EEXIST;
1530        if (old_dir != new_dir)
1531                return -EIO;
1532        return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
1533}
1534
1535static struct file_operations cpuset_file_operations = {
1536        .read = cpuset_file_read,
1537        .write = cpuset_file_write,
1538        .llseek = generic_file_llseek,
1539        .open = cpuset_file_open,
1540        .release = cpuset_file_release,
1541};
1542
1543static struct inode_operations cpuset_dir_inode_operations = {
1544        .lookup = simple_lookup,
1545        .mkdir = cpuset_mkdir,
1546        .rmdir = cpuset_rmdir,
1547        .rename = cpuset_rename,
1548};
1549
1550static int cpuset_create_file(struct dentry *dentry, int mode)
1551{
1552        struct inode *inode;
1553
1554        if (!dentry)
1555                return -ENOENT;
1556        if (dentry->d_inode)
1557                return -EEXIST;
1558
1559        inode = cpuset_new_inode(mode);
1560        if (!inode)
1561                return -ENOMEM;
1562
1563        if (S_ISDIR(mode)) {
1564                inode->i_op = &cpuset_dir_inode_operations;
1565                inode->i_fop = &simple_dir_operations;
1566
1567                /* start off with i_nlink == 2 (for "." entry) */
1568                inc_nlink(inode);
1569        } else if (S_ISREG(mode)) {
1570                inode->i_size = 0;
1571                inode->i_fop = &cpuset_file_operations;
1572        }
1573
1574        d_instantiate(dentry, inode);
1575        dget(dentry);   /* Extra count - pin the dentry in core */
1576        return 0;
1577}
1578
1579/*
1580 *      cpuset_create_dir - create a directory for an object.
1581 *      cs:     the cpuset we create the directory for.
1582 *              It must have a valid ->parent field
1583 *              And we are going to fill its ->dentry field.
1584 *      name:   The name to give to the cpuset directory. Will be copied.
1585 *      mode:   mode to set on new directory.
1586 */
1587
1588static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode)
1589{
1590        struct dentry *dentry = NULL;
1591        struct dentry *parent;
1592        int error = 0;
1593
1594        parent = cs->parent->dentry;
1595        dentry = cpuset_get_dentry(parent, name);
1596        if (IS_ERR(dentry))
1597                return PTR_ERR(dentry);
1598        error = cpuset_create_file(dentry, S_IFDIR | mode);
1599        if (!error) {
1600                dentry->d_fsdata = cs;
1601                inc_nlink(parent->d_inode);
1602                cs->dentry = dentry;
1603        }
1604        dput(dentry);
1605
1606        return error;
1607}
1608
1609static int cpuset_add_file(struct dentry *dir, const struct cftype *cft)
1610{
1611        struct dentry *dentry;
1612        int error;
1613
1614        mutex_lock(&dir->d_inode->i_mutex);
1615        dentry = cpuset_get_dentry(dir, cft->name);
1616        if (!IS_ERR(dentry)) {
1617                error = cpuset_create_file(dentry, 0644 | S_IFREG);
1618                if (!error)
1619                        dentry->d_fsdata = (void *)cft;
1620                dput(dentry);
1621        } else
1622                error = PTR_ERR(dentry);
1623        mutex_unlock(&dir->d_inode->i_mutex);
1624        return error;
1625}
1626
1627/*
1628 * Stuff for reading the 'tasks' file.
1629 *
1630 * Reading this file can return large amounts of data if a cpuset has
1631 * *lots* of attached tasks. So it may need several calls to read(),
1632 * but we cannot guarantee that the information we produce is correct
1633 * unless we produce it entirely atomically.
1634 *
1635 * Upon tasks file open(), a struct ctr_struct is allocated, that
1636 * will have a pointer to an array (also allocated here).  The struct
1637 * ctr_struct * is stored in file->private_data.  Its resources will
1638 * be freed by release() when the file is closed.  The array is used
1639 * to sprintf the PIDs and then used by read().
1640 */
1641
1642/* cpusets_tasks_read array */
1643
1644struct ctr_struct {
1645        char *buf;
1646        int bufsz;
1647};
1648
1649/*
1650 * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'.
1651 * Return actual number of pids loaded.  No need to task_lock(p)
1652 * when reading out p->cpuset, as we don't really care if it changes
1653 * on the next cycle, and we are not going to try to dereference it.
1654 */
1655static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1656{
1657        int n = 0;
1658        struct task_struct *g, *p;
1659
1660        read_lock(&tasklist_lock);
1661
1662        do_each_thread(g, p) {
1663                if (p->cpuset == cs) {
1664                        pidarray[n++] = p->pid;
1665                        if (unlikely(n == npids))
1666                                goto array_full;
1667                }
1668        } while_each_thread(g, p);
1669
1670array_full:
1671        read_unlock(&tasklist_lock);
1672        return n;
1673}
1674
1675static int cmppid(const void *a, const void *b)
1676{
1677        return *(pid_t *)a - *(pid_t *)b;
1678}
1679
1680/*
1681 * Convert array 'a' of 'npids' pid_t's to a string of newline separated
1682 * decimal pids in 'buf'.  Don't write more than 'sz' chars, but return
1683 * count 'cnt' of how many chars would be written if buf were large enough.
1684 */
1685static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids)
1686{
1687        int cnt = 0;
1688        int i;
1689
1690        for (i = 0; i < npids; i++)
1691                cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]);
1692        return cnt;
1693}
1694
1695/*
1696 * Handle an open on 'tasks' file.  Prepare a buffer listing the
1697 * process id's of tasks currently attached to the cpuset being opened.
1698 *
1699 * Does not require any specific cpuset mutexes, and does not take any.
1700 */
1701static int cpuset_tasks_open(struct inode *unused, struct file *file)
1702{
1703        struct cpuset *cs = __d_cs(file->f_dentry->d_parent);
1704        struct ctr_struct *ctr;
1705        pid_t *pidarray;
1706        int npids;
1707        char c;
1708
1709        if (!(file->f_mode & FMODE_READ))
1710                return 0;
1711
1712        ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
1713        if (!ctr)
1714                goto err0;
1715
1716        /*
1717         * If cpuset gets more users after we read count, we won't have
1718         * enough space - tough.  This race is indistinguishable to the
1719         * caller from the case that the additional cpuset users didn't
1720         * show up until sometime later on.
1721         */
1722        npids = atomic_read(&cs->count);
1723        pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
1724        if (!pidarray)
1725                goto err1;
1726
1727        npids = pid_array_load(pidarray, npids, cs);
1728        sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
1729
1730        /* Call pid_array_to_buf() twice, first just to get bufsz */
1731        ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
1732        ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
1733        if (!ctr->buf)
1734                goto err2;
1735        ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
1736
1737        kfree(pidarray);
1738        file->private_data = ctr;
1739        return 0;
1740
1741err2:
1742        kfree(pidarray);
1743err1:
1744        kfree(ctr);
1745err0:
1746        return -ENOMEM;
1747}
1748
1749static ssize_t cpuset_tasks_read(struct file *file, char __user *buf,
1750                                                size_t nbytes, loff_t *ppos)
1751{
1752        struct ctr_struct *ctr = file->private_data;
1753
1754        if (*ppos + nbytes > ctr->bufsz)
1755                nbytes = ctr->bufsz - *ppos;
1756        if (copy_to_user(buf, ctr->buf + *ppos, nbytes))
1757                return -EFAULT;
1758        *ppos += nbytes;
1759        return nbytes;
1760}
1761
1762static int cpuset_tasks_release(struct inode *unused_inode, struct file *file)
1763{
1764        struct ctr_struct *ctr;
1765
1766        if (file->f_mode & FMODE_READ) {
1767                ctr = file->private_data;
1768                kfree(ctr->buf);
1769                kfree(ctr);
1770        }
1771        return 0;
1772}
1773
1774/*
1775 * for the common functions, 'private' gives the type of file
1776 */
1777
1778static struct cftype cft_tasks = {
1779        .name = "tasks",
1780        .open = cpuset_tasks_open,
1781        .read = cpuset_tasks_read,
1782        .release = cpuset_tasks_release,
1783        .private = FILE_TASKLIST,
1784};
1785
1786static struct cftype cft_cpus = {
1787        .name = "cpus",
1788        .private = FILE_CPULIST,
1789};
1790
1791static struct cftype cft_mems = {
1792        .name = "mems",
1793        .private = FILE_MEMLIST,
1794};
1795
1796static struct cftype cft_cpu_exclusive = {
1797        .name = "cpu_exclusive",
1798        .private = FILE_CPU_EXCLUSIVE,
1799};
1800
1801static struct cftype cft_mem_exclusive = {
1802        .name = "mem_exclusive",
1803        .private = FILE_MEM_EXCLUSIVE,
1804};
1805
1806static struct cftype cft_notify_on_release = {
1807        .name = "notify_on_release",
1808        .private = FILE_NOTIFY_ON_RELEASE,
1809};
1810
1811static struct cftype cft_memory_migrate = {
1812        .name = "memory_migrate",
1813        .private = FILE_MEMORY_MIGRATE,
1814};
1815
1816static struct cftype cft_memory_pressure_enabled = {
1817        .name = "memory_pressure_enabled",
1818        .private = FILE_MEMORY_PRESSURE_ENABLED,
1819};
1820
1821static struct cftype cft_memory_pressure = {
1822        .name = "memory_pressure",
1823        .private = FILE_MEMORY_PRESSURE,
1824};
1825
1826static struct cftype cft_spread_page = {
1827        .name = "memory_spread_page",
1828        .private = FILE_SPREAD_PAGE,
1829};
1830
1831static struct cftype cft_spread_slab = {
1832        .name = "memory_spread_slab",
1833        .private = FILE_SPREAD_SLAB,
1834};
1835
1836static int cpuset_populate_dir(struct dentry *cs_dentry)
1837{
1838        int err;
1839
1840        if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0)
1841                return err;
1842        if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0)
1843                return err;
1844        if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0)
1845                return err;
1846        if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0)
1847                return err;
1848        if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
1849                return err;
1850        if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
1851                return err;
1852        if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
1853                return err;
1854        if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0)
1855                return err;
1856        if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0)
1857                return err;
1858        if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
1859                return err;
1860        return 0;
1861}
1862
1863/*
1864 *      cpuset_create - create a cpuset
1865 *      parent: cpuset that will be parent of the new cpuset.
1866 *      name:           name of the new cpuset. Will be strcpy'ed.
1867 *      mode:           mode to set on new inode
1868 *
1869 *      Must be called with the mutex on the parent inode held
1870 */
1871
1872static long cpuset_create(struct cpuset *parent, const char *name, int mode)
1873{
1874        struct cpuset *cs;
1875        int err;
1876
1877        cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1878        if (!cs)
1879                return -ENOMEM;
1880
1881        mutex_lock(&manage_mutex);
1882        cpuset_update_task_memory_state();
1883        cs->flags = 0;
1884        if (notify_on_release(parent))
1885                set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
1886        if (is_spread_page(parent))
1887                set_bit(CS_SPREAD_PAGE, &cs->flags);
1888        if (is_spread_slab(parent))
1889                set_bit(CS_SPREAD_SLAB, &cs->flags);
1890        cs->cpus_allowed = CPU_MASK_NONE;
1891        cs->mems_allowed = NODE_MASK_NONE;
1892        atomic_set(&cs->count, 0);
1893        INIT_LIST_HEAD(&cs->sibling);
1894        INIT_LIST_HEAD(&cs->children);
1895        cs->mems_generation = cpuset_mems_generation++;
1896        fmeter_init(&cs->fmeter);
1897
1898        cs->parent = parent;
1899
1900        mutex_lock(&callback_mutex);
1901        list_add(&cs->sibling, &cs->parent->children);
1902        number_of_cpusets++;
1903        mutex_unlock(&callback_mutex);
1904
1905        err = cpuset_create_dir(cs, name, mode);
1906        if (err < 0)
1907                goto err;
1908
1909        /*
1910         * Release manage_mutex before cpuset_populate_dir() because it
1911         * will down() this new directory's i_mutex and if we race with
1912         * another mkdir, we might deadlock.
1913         */
1914        mutex_unlock(&manage_mutex);
1915
1916        err = cpuset_populate_dir(cs->dentry);
1917        /* If err < 0, we have a half-filled directory - oh well ;) */
1918        return 0;
1919err:
1920        list_del(&cs->sibling);
1921        mutex_unlock(&manage_mutex);
1922        kfree(cs);
1923        return err;
1924}
1925
1926static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1927{
1928        struct cpuset *c_parent = dentry->d_parent->d_fsdata;
1929
1930        /* the vfs holds inode->i_mutex already */
1931        return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR);
1932}
1933
1934/*
1935 * Locking note on the strange update_flag() call below:
1936 *
1937 * If the cpuset being removed is marked cpu_exclusive, then simulate
1938 * turning cpu_exclusive off, which will call update_cpu_domains().
1939 * The lock_cpu_hotplug() call in update_cpu_domains() must not be
1940 * made while holding callback_mutex.  Elsewhere the kernel nests
1941 * callback_mutex inside lock_cpu_hotplug() calls.  So the reverse
1942 * nesting would risk an ABBA deadlock.
1943 */
1944
1945static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
1946{
1947        struct cpuset *cs = dentry->d_fsdata;
1948        struct dentry *d;
1949        struct cpuset *parent;
1950        char *pathbuf = NULL;
1951
1952        /* the vfs holds both inode->i_mutex already */
1953
1954        mutex_lock(&manage_mutex);
1955        cpuset_update_task_memory_state();
1956        if (atomic_read(&cs->count) > 0) {
1957                mutex_unlock(&manage_mutex);
1958                return -EBUSY;
1959        }
1960        if (!list_empty(&cs->children)) {
1961                mutex_unlock(&manage_mutex);
1962                return -EBUSY;
1963        }
1964        if (is_cpu_exclusive(cs)) {
1965                int retval = update_flag(CS_CPU_EXCLUSIVE, cs, "0");
1966                if (retval < 0) {
1967                        mutex_unlock(&manage_mutex);
1968                        return retval;
1969                }
1970        }
1971        parent = cs->parent;
1972        mutex_lock(&callback_mutex);
1973        set_bit(CS_REMOVED, &cs->flags);
1974        list_del(&cs->sibling); /* delete my sibling from parent->children */
1975        spin_lock(&cs->dentry->d_lock);
1976        d = dget(cs->dentry);
1977        cs->dentry = NULL;
1978        spin_unlock(&d->d_lock);
1979        cpuset_d_remove_dir(d);
1980        dput(d);
1981        number_of_cpusets--;
1982        mutex_unlock(&callback_mutex);
1983        if (list_empty(&parent->children))
1984                check_for_release(parent, &pathbuf);
1985        mutex_unlock(&manage_mutex);
1986        cpuset_release_agent(pathbuf);
1987        return 0;
1988}
1989
1990/*
1991 * cpuset_init_early - just enough so that the calls to
1992 * cpuset_update_task_memory_state() in early init code
1993 * are harmless.
1994 */
1995
1996int __init cpuset_init_early(void)
1997{
1998        struct task_struct *tsk = current;
1999
2000        tsk->cpuset = &top_cpuset;
2001        tsk->cpuset->mems_generation = cpuset_mems_generation++;
2002        return 0;
2003}
2004
2005/**
2006 * cpuset_init - initialize cpusets at system boot
2007 *
2008 * Description: Initialize top_cpuset and the cpuset internal file system,
2009 **/
2010
2011int __init cpuset_init(void)
2012{
2013        struct dentry *root;
2014        int err;
2015
2016        top_cpuset.cpus_allowed = CPU_MASK_ALL;
2017        top_cpuset.mems_allowed = NODE_MASK_ALL;
2018
2019        fmeter_init(&top_cpuset.fmeter);
2020        top_cpuset.mems_generation = cpuset_mems_generation++;
2021
2022        init_task.cpuset = &top_cpuset;
2023
2024        err = register_filesystem(&cpuset_fs_type);
2025        if (err < 0)
2026                goto out;
2027        cpuset_mount = kern_mount(&cpuset_fs_type);
2028        if (IS_ERR(cpuset_mount)) {
2029                printk(KERN_ERR "cpuset: could not mount!\n");
2030                err = PTR_ERR(cpuset_mount);
2031                cpuset_mount = NULL;
2032                goto out;
2033        }
2034        root = cpuset_mount->mnt_sb->s_root;
2035        root->d_fsdata = &top_cpuset;
2036        inc_nlink(root->d_inode);
2037        top_cpuset.dentry = root;
2038        root->d_inode->i_op = &cpuset_dir_inode_operations;
2039        number_of_cpusets = 1;
2040        err = cpuset_populate_dir(root);
2041        /* memory_pressure_enabled is in root cpuset only */
2042        if (err == 0)
2043                err = cpuset_add_file(root, &cft_memory_pressure_enabled);
2044out:
2045        return err;
2046}
2047
2048#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_MEMORY_HOTPLUG)
2049/*
2050 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
2051 * or memory nodes, we need to walk over the cpuset hierarchy,
2052 * removing that CPU or node from all cpusets.  If this removes the
2053 * last CPU or node from a cpuset, then the guarantee_online_cpus()
2054 * or guarantee_online_mems() code will use that emptied cpusets
2055 * parent online CPUs or nodes.  Cpusets that were already empty of
2056 * CPUs or nodes are left empty.
2057 *
2058 * This routine is intentionally inefficient in a couple of regards.
2059 * It will check all cpusets in a subtree even if the top cpuset of
2060 * the subtree has no offline CPUs or nodes.  It checks both CPUs and
2061 * nodes, even though the caller could have been coded to know that
2062 * only one of CPUs or nodes needed to be checked on a given call.
2063 * This was done to minimize text size rather than cpu cycles.
2064 *
2065 * Call with both manage_mutex and callback_mutex held.
2066 *
2067 * Recursive, on depth of cpuset subtree.
2068 */
2069
2070static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur)
2071{
2072        struct cpuset *c;
2073
2074        /* Each of our child cpusets mems must be online */
2075        list_for_each_entry(c, &cur->children, sibling) {
2076                guarantee_online_cpus_mems_in_subtree(c);
2077                if (!cpus_empty(c->cpus_allowed))
2078                        guarantee_online_cpus(c, &c->cpus_allowed);
2079                if (!nodes_empty(c->mems_allowed))
2080                        guarantee_online_mems(c, &c->mems_allowed);
2081        }
2082}
2083
2084/*
2085 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
2086 * cpu_online_map and node_online_map.  Force the top cpuset to track
2087 * whats online after any CPU or memory node hotplug or unplug event.
2088 *
2089 * To ensure that we don't remove a CPU or node from the top cpuset
2090 * that is currently in use by a child cpuset (which would violate
2091 * the rule that cpusets must be subsets of their parent), we first
2092 * call the recursive routine guarantee_online_cpus_mems_in_subtree().
2093 *
2094 * Since there are two callers of this routine, one for CPU hotplug
2095 * events and one for memory node hotplug events, we could have coded
2096 * two separate routines here.  We code it as a single common routine
2097 * in order to minimize text size.
2098 */
2099
2100static void common_cpu_mem_hotplug_unplug(void)
2101{
2102        mutex_lock(&manage_mutex);
2103        mutex_lock(&callback_mutex);
2104
2105        guarantee_online_cpus_mems_in_subtree(&top_cpuset);
2106        top_cpuset.cpus_allowed = cpu_online_map;
2107        top_cpuset.mems_allowed = node_online_map;
2108
2109        mutex_unlock(&callback_mutex);
2110        mutex_unlock(&manage_mutex);
2111}
2112#endif
2113
2114#ifdef CONFIG_HOTPLUG_CPU
2115/*
2116 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2117 * period.  This is necessary in order to make cpusets transparent
2118 * (of no affect) on systems that are actively using CPU hotplug
2119 * but making no active use of cpusets.
2120 *
2121 * This routine ensures that top_cpuset.cpus_allowed tracks
2122 * cpu_online_map on each CPU hotplug (cpuhp) event.
2123 */
2124
2125static int cpuset_handle_cpuhp(struct notifier_block *nb,
2126                                unsigned long phase, void *cpu)
2127{
2128        common_cpu_mem_hotplug_unplug();
2129        return 0;
2130}
2131#endif
2132
2133#ifdef CONFIG_MEMORY_HOTPLUG
2134/*
2135 * Keep top_cpuset.mems_allowed tracking node_online_map.
2136 * Call this routine anytime after you change node_online_map.
2137 * See also the previous routine cpuset_handle_cpuhp().
2138 */
2139
2140void cpuset_track_online_nodes(void)
2141{
2142        common_cpu_mem_hotplug_unplug();
2143}
2144#endif
2145
2146/**
2147 * cpuset_init_smp - initialize cpus_allowed
2148 *
2149 * Description: Finish top cpuset after cpu, node maps are initialized
2150 **/
2151
2152void __init cpuset_init_smp(void)
2153{
2154        top_cpuset.cpus_allowed = cpu_online_map;
2155        top_cpuset.mems_allowed = node_online_map;
2156
2157        hotcpu_notifier(cpuset_handle_cpuhp, 0);
2158}
2159
2160/**
2161 * cpuset_fork - attach newly forked task to its parents cpuset.
2162 * @tsk: pointer to task_struct of forking parent process.
2163 *
2164 * Description: A task inherits its parent's cpuset at fork().
2165 *
2166 * A pointer to the shared cpuset was automatically copied in fork.c
2167 * by dup_task_struct().  However, we ignore that copy, since it was
2168 * not made under the protection of task_lock(), so might no longer be
2169 * a valid cpuset pointer.  attach_task() might have already changed
2170 * current->cpuset, allowing the previously referenced cpuset to
2171 * be removed and freed.  Instead, we task_lock(current) and copy
2172 * its present value of current->cpuset for our freshly forked child.
2173 *
2174 * At the point that cpuset_fork() is called, 'current' is the parent
2175 * task, and the passed argument 'child' points to the child task.
2176 **/
2177
2178void cpuset_fork(struct task_struct *child)
2179{
2180        task_lock(current);
2181        child->cpuset = current->cpuset;
2182        atomic_inc(&child->cpuset->count);
2183        task_unlock(current);
2184}
2185
2186/**
2187 * cpuset_exit - detach cpuset from exiting task
2188 * @tsk: pointer to task_struct of exiting process
2189 *
2190 * Description: Detach cpuset from @tsk and release it.
2191 *
2192 * Note that cpusets marked notify_on_release force every task in
2193 * them to take the global manage_mutex mutex when exiting.
2194 * This could impact scaling on very large systems.  Be reluctant to
2195 * use notify_on_release cpusets where very high task exit scaling
2196 * is required on large systems.
2197 *
2198 * Don't even think about derefencing 'cs' after the cpuset use count
2199 * goes to zero, except inside a critical section guarded by manage_mutex
2200 * or callback_mutex.   Otherwise a zero cpuset use count is a license to
2201 * any other task to nuke the cpuset immediately, via cpuset_rmdir().
2202 *
2203 * This routine has to take manage_mutex, not callback_mutex, because
2204 * it is holding that mutex while calling check_for_release(),
2205 * which calls kmalloc(), so can't be called holding callback_mutex().
2206 *
2207 * We don't need to task_lock() this reference to tsk->cpuset,
2208 * because tsk is already marked PF_EXITING, so attach_task() won't
2209 * mess with it, or task is a failed fork, never visible to attach_task.
2210 *
2211 * the_top_cpuset_hack:
2212 *
2213 *    Set the exiting tasks cpuset to the root cpuset (top_cpuset).
2214 *
2215 *    Don't leave a task unable to allocate memory, as that is an
2216 *    accident waiting to happen should someone add a callout in
2217 *    do_exit() after the cpuset_exit() call that might allocate.
2218 *    If a task tries to allocate memory with an invalid cpuset,
2219 *    it will oops in cpuset_update_task_memory_state().
2220 *
2221 *    We call cpuset_exit() while the task is still competent to
2222 *    handle notify_on_release(), then leave the task attached to
2223 *    the root cpuset (top_cpuset) for the remainder of its exit.
2224 *
2225 *    To do this properly, we would increment the reference count on
2226 *    top_cpuset, and near the very end of the kernel/exit.c do_exit()
2227 *    code we would add a second cpuset function call, to drop that
2228 *    reference.  This would just create an unnecessary hot spot on
2229 *    the top_cpuset reference count, to no avail.
2230 *
2231 *    Normally, holding a reference to a cpuset without bumping its
2232 *    count is unsafe.   The cpuset could go away, or someone could
2233 *    attach us to a different cpuset, decrementing the count on
2234 *    the first cpuset that we never incremented.  But in this case,
2235 *    top_cpuset isn't going away, and either task has PF_EXITING set,
2236 *    which wards off any attach_task() attempts, or task is a failed
2237 *    fork, never visible to attach_task.
2238 *
2239 *    Another way to do this would be to set the cpuset pointer
2240 *    to NULL here, and check in cpuset_update_task_memory_state()
2241 *    for a NULL pointer.  This hack avoids that NULL check, for no
2242 *    cost (other than this way too long comment ;).
2243 **/
2244
2245void cpuset_exit(struct task_struct *tsk)
2246{
2247        struct cpuset *cs;
2248
2249        cs = tsk->cpuset;
2250        tsk->cpuset = &top_cpuset;      /* the_top_cpuset_hack - see above */
2251
2252        if (notify_on_release(cs)) {
2253                char *pathbuf = NULL;
2254
2255                mutex_lock(&manage_mutex);
2256                if (atomic_dec_and_test(&cs->count))
2257                        check_for_release(cs, &pathbuf);
2258                mutex_unlock(&manage_mutex);
2259                cpuset_release_agent(pathbuf);
2260        } else {
2261                atomic_dec(&cs->count);
2262        }
2263}
2264
2265/**
2266 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2267 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2268 *
2269 * Description: Returns the cpumask_t cpus_allowed of the cpuset
2270 * attached to the specified @tsk.  Guaranteed to return some non-empty
2271 * subset of cpu_online_map, even if this means going outside the
2272 * tasks cpuset.
2273 **/
2274
2275cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
2276{
2277        cpumask_t mask;
2278
2279        mutex_lock(&callback_mutex);
2280        task_lock(tsk);
2281        guarantee_online_cpus(tsk->cpuset, &mask);
2282        task_unlock(tsk);
2283        mutex_unlock(&callback_mutex);
2284
2285        return mask;
2286}
2287
2288void cpuset_init_current_mems_allowed(void)
2289{
2290        current->mems_allowed = NODE_MASK_ALL;
2291}
2292
2293/**
2294 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2295 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2296 *
2297 * Description: Returns the nodemask_t mems_allowed of the cpuset
2298 * attached to the specified @tsk.  Guaranteed to return some non-empty
2299 * subset of node_online_map, even if this means going outside the
2300 * tasks cpuset.
2301 **/
2302
2303nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2304{
2305        nodemask_t mask;
2306
2307        mutex_lock(&callback_mutex);
2308        task_lock(tsk);
2309        guarantee_online_mems(tsk->cpuset, &mask);
2310        task_unlock(tsk);
2311        mutex_unlock(&callback_mutex);
2312
2313        return mask;
2314}
2315
2316/**
2317 * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
2318 * @zl: the zonelist to be checked
2319 *
2320 * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
2321 */
2322int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
2323{
2324        int i;
2325
2326        for (i = 0; zl->zones[i]; i++) {
2327                int nid = zone_to_nid(zl->zones[i]);
2328
2329                if (node_isset(nid, current->mems_allowed))
2330                        return 1;
2331        }
2332        return 0;
2333}
2334
2335/*
2336 * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
2337 * ancestor to the specified cpuset.  Call holding callback_mutex.
2338 * If no ancestor is mem_exclusive (an unusual configuration), then
2339 * returns the root cpuset.
2340 */
2341static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
2342{
2343        while (!is_mem_exclusive(cs) && cs->parent)
2344                cs = cs->parent;
2345        return cs;
2346}
2347
2348/**
2349 * cpuset_zone_allowed - Can we allocate memory on zone z's memory node?
2350 * @z: is this zone on an allowed node?
2351 * @gfp_mask: memory allocation flags (we use __GFP_HARDWALL)
2352 *
2353 * If we're in interrupt, yes, we can always allocate.  If zone
2354 * z's node is in our tasks mems_allowed, yes.  If it's not a
2355 * __GFP_HARDWALL request and this zone's nodes is in the nearest
2356 * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
2357 * Otherwise, no.
2358 *
2359 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2360 * and do not allow allocations outside the current tasks cpuset.
2361 * GFP_KERNEL allocations are not so marked, so can escape to the
2362 * nearest mem_exclusive ancestor cpuset.
2363 *
2364 * Scanning up parent cpusets requires callback_mutex.  The __alloc_pages()
2365 * routine only calls here with __GFP_HARDWALL bit _not_ set if
2366 * it's a GFP_KERNEL allocation, and all nodes in the current tasks
2367 * mems_allowed came up empty on the first pass over the zonelist.
2368 * So only GFP_KERNEL allocations, if all nodes in the cpuset are
2369 * short of memory, might require taking the callback_mutex mutex.
2370 *
2371 * The first call here from mm/page_alloc:get_page_from_freelist()
2372 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so
2373 * no allocation on a node outside the cpuset is allowed (unless in
2374 * interrupt, of course).
2375 *
2376 * The second pass through get_page_from_freelist() doesn't even call
2377 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2378 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2379 * in alloc_flags.  That logic and the checks below have the combined
2380 * affect that:
2381 *      in_interrupt - any node ok (current task context irrelevant)
2382 *      GFP_ATOMIC   - any node ok
2383 *      GFP_KERNEL   - any node in enclosing mem_exclusive cpuset ok
2384 *      GFP_USER     - only nodes in current tasks mems allowed ok.
2385 *
2386 * Rule:
2387 *    Don't call cpuset_zone_allowed() if you can't sleep, unless you
2388 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2389 *    the code that might scan up ancestor cpusets and sleep.
2390 **/
2391
2392int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
2393{
2394        int node;                       /* node that zone z is on */
2395        const struct cpuset *cs;        /* current cpuset ancestors */
2396        int allowed;                    /* is allocation in zone z allowed? */
2397
2398        if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2399                return 1;
2400        node = zone_to_nid(z);
2401        might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2402        if (node_isset(node, current->mems_allowed))
2403                return 1;
2404        if (gfp_mask & __GFP_HARDWALL)  /* If hardwall request, stop here */
2405                return 0;
2406
2407        if (current->flags & PF_EXITING) /* Let dying task have memory */
2408                return 1;
2409
2410        /* Not hardwall and node outside mems_allowed: scan up cpusets */
2411        mutex_lock(&callback_mutex);
2412
2413        task_lock(current);
2414        cs = nearest_exclusive_ancestor(current->cpuset);
2415        task_unlock(current);
2416
2417        allowed = node_isset(node, cs->mems_allowed);
2418        mutex_unlock(&callback_mutex);
2419        return allowed;
2420}
2421
2422/**
2423 * cpuset_lock - lock out any changes to cpuset structures
2424 *
2425 * The out of memory (oom) code needs to mutex_lock cpusets
2426 * from being changed while it scans the tasklist looking for a
2427 * task in an overlapping cpuset.  Expose callback_mutex via this
2428 * cpuset_lock() routine, so the oom code can lock it, before
2429 * locking the task list.  The tasklist_lock is a spinlock, so
2430 * must be taken inside callback_mutex.
2431 */
2432
2433void cpuset_lock(void)
2434{
2435        mutex_lock(&callback_mutex);
2436}
2437
2438/**
2439 * cpuset_unlock - release lock on cpuset changes
2440 *
2441 * Undo the lock taken in a previous cpuset_lock() call.
2442 */
2443
2444void cpuset_unlock(void)
2445{
2446        mutex_unlock(&callback_mutex);
2447}
2448
2449/**
2450 * cpuset_mem_spread_node() - On which node to begin search for a page
2451 *
2452 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2453 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2454 * and if the memory allocation used cpuset_mem_spread_node()
2455 * to determine on which node to start looking, as it will for
2456 * certain page cache or slab cache pages such as used for file
2457 * system buffers and inode caches, then instead of starting on the
2458 * local node to look for a free page, rather spread the starting
2459 * node around the tasks mems_allowed nodes.
2460 *
2461 * We don't have to worry about the returned node being offline
2462 * because "it can't happen", and even if it did, it would be ok.
2463 *
2464 * The routines calling guarantee_online_mems() are careful to
2465 * only set nodes in task->mems_allowed that are online.  So it
2466 * should not be possible for the following code to return an
2467 * offline node.  But if it did, that would be ok, as this routine
2468 * is not returning the node where the allocation must be, only
2469 * the node where the search should start.  The zonelist passed to
2470 * __alloc_pages() will include all nodes.  If the slab allocator
2471 * is passed an offline node, it will fall back to the local node.
2472 * See kmem_cache_alloc_node().
2473 */
2474
2475int cpuset_mem_spread_node(void)
2476{
2477        int node;
2478
2479        node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
2480        if (node == MAX_NUMNODES)
2481                node = first_node(current->mems_allowed);
2482        current->cpuset_mem_spread_rotor = node;
2483        return node;
2484}
2485EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2486
2487/**
2488 * cpuset_excl_nodes_overlap - Do we overlap @p's mem_exclusive ancestors?
2489 * @p: pointer to task_struct of some other task.
2490 *
2491 * Description: Return true if the nearest mem_exclusive ancestor
2492 * cpusets of tasks @p and current overlap.  Used by oom killer to
2493 * determine if task @p's memory usage might impact the memory
2494 * available to the current task.
2495 *
2496 * Call while holding callback_mutex.
2497 **/
2498
2499int cpuset_excl_nodes_overlap(const struct task_struct *p)
2500{
2501        const struct cpuset *cs1, *cs2; /* my and p's cpuset ancestors */
2502        int overlap = 1;                /* do cpusets overlap? */
2503
2504        task_lock(current);
2505        if (current->flags & PF_EXITING) {
2506                task_unlock(current);
2507                goto done;
2508        }
2509        cs1 = nearest_exclusive_ancestor(current->cpuset);
2510        task_unlock(current);
2511
2512        task_lock((struct task_struct *)p);
2513        if (p->flags & PF_EXITING) {
2514                task_unlock((struct task_struct *)p);
2515                goto done;
2516        }
2517        cs2 = nearest_exclusive_ancestor(p->cpuset);
2518        task_unlock((struct task_struct *)p);
2519
2520        overlap = nodes_intersects(cs1->mems_allowed, cs2->mems_allowed);
2521done:
2522        return overlap;
2523}
2524
2525/*
2526 * Collection of memory_pressure is suppressed unless
2527 * this flag is enabled by writing "1" to the special
2528 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2529 */
2530
2531int cpuset_memory_pressure_enabled __read_mostly;
2532
2533/**
2534 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2535 *
2536 * Keep a running average of the rate of synchronous (direct)
2537 * page reclaim efforts initiated by tasks in each cpuset.
2538 *
2539 * This represents the rate at which some task in the cpuset
2540 * ran low on memory on all nodes it was allowed to use, and
2541 * had to enter the kernels page reclaim code in an effort to
2542 * create more free memory by tossing clean pages or swapping
2543 * or writing dirty pages.
2544 *
2545 * Display to user space in the per-cpuset read-only file
2546 * "memory_pressure".  Value displayed is an integer
2547 * representing the recent rate of entry into the synchronous
2548 * (direct) page reclaim by any task attached to the cpuset.
2549 **/
2550
2551void __cpuset_memory_pressure_bump(void)
2552{
2553        struct cpuset *cs;
2554
2555        task_lock(current);
2556        cs = current->cpuset;
2557        fmeter_markevent(&cs->fmeter);
2558        task_unlock(current);
2559}
2560
2561/*
2562 * proc_cpuset_show()
2563 *  - Print tasks cpuset path into seq_file.
2564 *  - Used for /proc/<pid>/cpuset.
2565 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2566 *    doesn't really matter if tsk->cpuset changes after we read it,
2567 *    and we take manage_mutex, keeping attach_task() from changing it
2568 *    anyway.  No need to check that tsk->cpuset != NULL, thanks to
2569 *    the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks
2570 *    cpuset to top_cpuset.
2571 */
2572static int proc_cpuset_show(struct seq_file *m, void *v)
2573{
2574        struct pid *pid;
2575        struct task_struct *tsk;
2576        char *buf;
2577        int retval;
2578
2579        retval = -ENOMEM;
2580        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2581        if (!buf)
2582                goto out;
2583
2584        retval = -ESRCH;
2585        pid = m->private;
2586        tsk = get_pid_task(pid, PIDTYPE_PID);
2587        if (!tsk)
2588                goto out_free;
2589
2590        retval = -EINVAL;
2591        mutex_lock(&manage_mutex);
2592
2593        retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
2594        if (retval < 0)
2595                goto out_unlock;
2596        seq_puts(m, buf);
2597        seq_putc(m, '\n');
2598out_unlock:
2599        mutex_unlock(&manage_mutex);
2600        put_task_struct(tsk);
2601out_free:
2602        kfree(buf);
2603out:
2604        return retval;
2605}
2606
2607static int cpuset_open(struct inode *inode, struct file *file)
2608{
2609        struct pid *pid = PROC_I(inode)->pid;
2610        return single_open(file, proc_cpuset_show, pid);
2611}
2612
2613struct file_operations proc_cpuset_operations = {
2614        .open           = cpuset_open,
2615        .read           = seq_read,
2616        .llseek         = seq_lseek,
2617        .release        = single_release,
2618};
2619
2620/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
2621char *cpuset_task_status_allowed(struct task_struct *task, char *buffer)
2622{
2623        buffer += sprintf(buffer, "Cpus_allowed:\t");
2624        buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed);
2625        buffer += sprintf(buffer, "\n");
2626        buffer += sprintf(buffer, "Mems_allowed:\t");
2627        buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed);
2628        buffer += sprintf(buffer, "\n");
2629        return buffer;
2630}
2631