linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/oom.h>
  14#include <linux/rcupdate.h>
  15#include <linux/export.h>
  16#include <linux/bug.h>
  17#include <linux/kthread.h>
  18#include <linux/stop_machine.h>
  19#include <linux/mutex.h>
  20#include <linux/gfp.h>
  21#include <linux/suspend.h>
  22
  23#include "smpboot.h"
  24
  25#ifdef CONFIG_SMP
  26/* Serializes the updates to cpu_online_mask, cpu_present_mask */
  27static DEFINE_MUTEX(cpu_add_remove_lock);
  28
  29/*
  30 * The following two API's must be used when attempting
  31 * to serialize the updates to cpu_online_mask, cpu_present_mask.
  32 */
  33void cpu_maps_update_begin(void)
  34{
  35        mutex_lock(&cpu_add_remove_lock);
  36}
  37
  38void cpu_maps_update_done(void)
  39{
  40        mutex_unlock(&cpu_add_remove_lock);
  41}
  42
  43static RAW_NOTIFIER_HEAD(cpu_chain);
  44
  45/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  46 * Should always be manipulated under cpu_add_remove_lock
  47 */
  48static int cpu_hotplug_disabled;
  49
  50#ifdef CONFIG_HOTPLUG_CPU
  51
  52static struct {
  53        struct task_struct *active_writer;
  54        struct mutex lock; /* Synchronizes accesses to refcount, */
  55        /*
  56         * Also blocks the new readers during
  57         * an ongoing cpu hotplug operation.
  58         */
  59        int refcount;
  60} cpu_hotplug = {
  61        .active_writer = NULL,
  62        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  63        .refcount = 0,
  64};
  65
  66void get_online_cpus(void)
  67{
  68        might_sleep();
  69        if (cpu_hotplug.active_writer == current)
  70                return;
  71        mutex_lock(&cpu_hotplug.lock);
  72        cpu_hotplug.refcount++;
  73        mutex_unlock(&cpu_hotplug.lock);
  74
  75}
  76EXPORT_SYMBOL_GPL(get_online_cpus);
  77
  78void put_online_cpus(void)
  79{
  80        if (cpu_hotplug.active_writer == current)
  81                return;
  82        mutex_lock(&cpu_hotplug.lock);
  83
  84        if (WARN_ON(!cpu_hotplug.refcount))
  85                cpu_hotplug.refcount++; /* try to fix things up */
  86
  87        if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  88                wake_up_process(cpu_hotplug.active_writer);
  89        mutex_unlock(&cpu_hotplug.lock);
  90
  91}
  92EXPORT_SYMBOL_GPL(put_online_cpus);
  93
  94/*
  95 * This ensures that the hotplug operation can begin only when the
  96 * refcount goes to zero.
  97 *
  98 * Note that during a cpu-hotplug operation, the new readers, if any,
  99 * will be blocked by the cpu_hotplug.lock
 100 *
 101 * Since cpu_hotplug_begin() is always called after invoking
 102 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 103 *
 104 * Note that theoretically, there is a possibility of a livelock:
 105 * - Refcount goes to zero, last reader wakes up the sleeping
 106 *   writer.
 107 * - Last reader unlocks the cpu_hotplug.lock.
 108 * - A new reader arrives at this moment, bumps up the refcount.
 109 * - The writer acquires the cpu_hotplug.lock finds the refcount
 110 *   non zero and goes to sleep again.
 111 *
 112 * However, this is very difficult to achieve in practice since
 113 * get_online_cpus() not an api which is called all that often.
 114 *
 115 */
 116void cpu_hotplug_begin(void)
 117{
 118        cpu_hotplug.active_writer = current;
 119
 120        for (;;) {
 121                mutex_lock(&cpu_hotplug.lock);
 122                if (likely(!cpu_hotplug.refcount))
 123                        break;
 124                __set_current_state(TASK_UNINTERRUPTIBLE);
 125                mutex_unlock(&cpu_hotplug.lock);
 126                schedule();
 127        }
 128}
 129
 130void cpu_hotplug_done(void)
 131{
 132        cpu_hotplug.active_writer = NULL;
 133        mutex_unlock(&cpu_hotplug.lock);
 134}
 135
 136/*
 137 * Wait for currently running CPU hotplug operations to complete (if any) and
 138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 140 * hotplug path before performing hotplug operations. So acquiring that lock
 141 * guarantees mutual exclusion from any currently running hotplug operations.
 142 */
 143void cpu_hotplug_disable(void)
 144{
 145        cpu_maps_update_begin();
 146        cpu_hotplug_disabled = 1;
 147        cpu_maps_update_done();
 148}
 149
 150void cpu_hotplug_enable(void)
 151{
 152        cpu_maps_update_begin();
 153        cpu_hotplug_disabled = 0;
 154        cpu_maps_update_done();
 155}
 156
 157#endif  /* CONFIG_HOTPLUG_CPU */
 158
 159/* Need to know about CPUs going up/down? */
 160int __ref register_cpu_notifier(struct notifier_block *nb)
 161{
 162        int ret;
 163        cpu_maps_update_begin();
 164        ret = raw_notifier_chain_register(&cpu_chain, nb);
 165        cpu_maps_update_done();
 166        return ret;
 167}
 168
 169static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
 170                        int *nr_calls)
 171{
 172        int ret;
 173
 174        ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
 175                                        nr_calls);
 176
 177        return notifier_to_errno(ret);
 178}
 179
 180static int cpu_notify(unsigned long val, void *v)
 181{
 182        return __cpu_notify(val, v, -1, NULL);
 183}
 184
 185#ifdef CONFIG_HOTPLUG_CPU
 186
 187static void cpu_notify_nofail(unsigned long val, void *v)
 188{
 189        BUG_ON(cpu_notify(val, v));
 190}
 191EXPORT_SYMBOL(register_cpu_notifier);
 192
 193void __ref unregister_cpu_notifier(struct notifier_block *nb)
 194{
 195        cpu_maps_update_begin();
 196        raw_notifier_chain_unregister(&cpu_chain, nb);
 197        cpu_maps_update_done();
 198}
 199EXPORT_SYMBOL(unregister_cpu_notifier);
 200
 201/**
 202 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 203 * @cpu: a CPU id
 204 *
 205 * This function walks all processes, finds a valid mm struct for each one and
 206 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 207 * trivial, there are various non-obvious corner cases, which this function
 208 * tries to solve in a safe manner.
 209 *
 210 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 211 * be called only for an already offlined CPU.
 212 */
 213void clear_tasks_mm_cpumask(int cpu)
 214{
 215        struct task_struct *p;
 216
 217        /*
 218         * This function is called after the cpu is taken down and marked
 219         * offline, so its not like new tasks will ever get this cpu set in
 220         * their mm mask. -- Peter Zijlstra
 221         * Thus, we may use rcu_read_lock() here, instead of grabbing
 222         * full-fledged tasklist_lock.
 223         */
 224        WARN_ON(cpu_online(cpu));
 225        rcu_read_lock();
 226        for_each_process(p) {
 227                struct task_struct *t;
 228
 229                /*
 230                 * Main thread might exit, but other threads may still have
 231                 * a valid mm. Find one.
 232                 */
 233                t = find_lock_task_mm(p);
 234                if (!t)
 235                        continue;
 236                cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
 237                task_unlock(t);
 238        }
 239        rcu_read_unlock();
 240}
 241
 242static inline void check_for_tasks(int cpu)
 243{
 244        struct task_struct *p;
 245        cputime_t utime, stime;
 246
 247        write_lock_irq(&tasklist_lock);
 248        for_each_process(p) {
 249                task_cputime(p, &utime, &stime);
 250                if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
 251                    (utime || stime))
 252                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
 253                                "(state = %ld, flags = %x)\n",
 254                                p->comm, task_pid_nr(p), cpu,
 255                                p->state, p->flags);
 256        }
 257        write_unlock_irq(&tasklist_lock);
 258}
 259
 260struct take_cpu_down_param {
 261        unsigned long mod;
 262        void *hcpu;
 263};
 264
 265/* Take this CPU down. */
 266static int __ref take_cpu_down(void *_param)
 267{
 268        struct take_cpu_down_param *param = _param;
 269        int err;
 270
 271        /* Ensure this CPU doesn't handle any more interrupts. */
 272        err = __cpu_disable();
 273        if (err < 0)
 274                return err;
 275
 276        cpu_notify(CPU_DYING | param->mod, param->hcpu);
 277        /* Park the stopper thread */
 278        kthread_park(current);
 279        return 0;
 280}
 281
 282/* Requires cpu_add_remove_lock to be held */
 283static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 284{
 285        int err, nr_calls = 0;
 286        void *hcpu = (void *)(long)cpu;
 287        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 288        struct take_cpu_down_param tcd_param = {
 289                .mod = mod,
 290                .hcpu = hcpu,
 291        };
 292
 293        if (num_online_cpus() == 1)
 294                return -EBUSY;
 295
 296        if (!cpu_online(cpu))
 297                return -EINVAL;
 298
 299        cpu_hotplug_begin();
 300
 301        err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
 302        if (err) {
 303                nr_calls--;
 304                __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
 305                printk("%s: attempt to take down CPU %u failed\n",
 306                                __func__, cpu);
 307                goto out_release;
 308        }
 309        smpboot_park_threads(cpu);
 310
 311        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 312        if (err) {
 313                /* CPU didn't die: tell everyone.  Can't complain. */
 314                smpboot_unpark_threads(cpu);
 315                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
 316                goto out_release;
 317        }
 318        BUG_ON(cpu_online(cpu));
 319
 320        /*
 321         * The migration_call() CPU_DYING callback will have removed all
 322         * runnable tasks from the cpu, there's only the idle task left now
 323         * that the migration thread is done doing the stop_machine thing.
 324         *
 325         * Wait for the stop thread to go away.
 326         */
 327        while (!idle_cpu(cpu))
 328                cpu_relax();
 329
 330        /* This actually kills the CPU. */
 331        __cpu_die(cpu);
 332
 333        /* CPU is completely dead: tell everyone.  Too late to complain. */
 334        cpu_notify_nofail(CPU_DEAD | mod, hcpu);
 335
 336        check_for_tasks(cpu);
 337
 338out_release:
 339        cpu_hotplug_done();
 340        if (!err)
 341                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 342        return err;
 343}
 344
 345int __ref cpu_down(unsigned int cpu)
 346{
 347        int err;
 348
 349        cpu_maps_update_begin();
 350
 351        if (cpu_hotplug_disabled) {
 352                err = -EBUSY;
 353                goto out;
 354        }
 355
 356        err = _cpu_down(cpu, 0);
 357
 358out:
 359        cpu_maps_update_done();
 360        return err;
 361}
 362EXPORT_SYMBOL(cpu_down);
 363#endif /*CONFIG_HOTPLUG_CPU*/
 364
 365/* Requires cpu_add_remove_lock to be held */
 366static int _cpu_up(unsigned int cpu, int tasks_frozen)
 367{
 368        int ret, nr_calls = 0;
 369        void *hcpu = (void *)(long)cpu;
 370        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 371        struct task_struct *idle;
 372
 373        cpu_hotplug_begin();
 374
 375        if (cpu_online(cpu) || !cpu_present(cpu)) {
 376                ret = -EINVAL;
 377                goto out;
 378        }
 379
 380        idle = idle_thread_get(cpu);
 381        if (IS_ERR(idle)) {
 382                ret = PTR_ERR(idle);
 383                goto out;
 384        }
 385
 386        ret = smpboot_create_threads(cpu);
 387        if (ret)
 388                goto out;
 389
 390        ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
 391        if (ret) {
 392                nr_calls--;
 393                printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
 394                                __func__, cpu);
 395                goto out_notify;
 396        }
 397
 398        /* Arch-specific enabling code. */
 399        ret = __cpu_up(cpu, idle);
 400        if (ret != 0)
 401                goto out_notify;
 402        BUG_ON(!cpu_online(cpu));
 403
 404        /* Wake the per cpu threads */
 405        smpboot_unpark_threads(cpu);
 406
 407        /* Now call notifier in preparation. */
 408        cpu_notify(CPU_ONLINE | mod, hcpu);
 409
 410out_notify:
 411        if (ret != 0)
 412                __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 413out:
 414        cpu_hotplug_done();
 415
 416        return ret;
 417}
 418
 419int cpu_up(unsigned int cpu)
 420{
 421        int err = 0;
 422
 423#ifdef  CONFIG_MEMORY_HOTPLUG
 424        int nid;
 425        pg_data_t       *pgdat;
 426#endif
 427
 428        if (!cpu_possible(cpu)) {
 429                printk(KERN_ERR "can't online cpu %d because it is not "
 430                        "configured as may-hotadd at boot time\n", cpu);
 431#if defined(CONFIG_IA64)
 432                printk(KERN_ERR "please check additional_cpus= boot "
 433                                "parameter\n");
 434#endif
 435                return -EINVAL;
 436        }
 437
 438#ifdef  CONFIG_MEMORY_HOTPLUG
 439        nid = cpu_to_node(cpu);
 440        if (!node_online(nid)) {
 441                err = mem_online_node(nid);
 442                if (err)
 443                        return err;
 444        }
 445
 446        pgdat = NODE_DATA(nid);
 447        if (!pgdat) {
 448                printk(KERN_ERR
 449                        "Can't online cpu %d due to NULL pgdat\n", cpu);
 450                return -ENOMEM;
 451        }
 452
 453        if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
 454                mutex_lock(&zonelists_mutex);
 455                build_all_zonelists(NULL, NULL);
 456                mutex_unlock(&zonelists_mutex);
 457        }
 458#endif
 459
 460        cpu_maps_update_begin();
 461
 462        if (cpu_hotplug_disabled) {
 463                err = -EBUSY;
 464                goto out;
 465        }
 466
 467        err = _cpu_up(cpu, 0);
 468
 469out:
 470        cpu_maps_update_done();
 471        return err;
 472}
 473EXPORT_SYMBOL_GPL(cpu_up);
 474
 475#ifdef CONFIG_PM_SLEEP_SMP
 476static cpumask_var_t frozen_cpus;
 477
 478int disable_nonboot_cpus(void)
 479{
 480        int cpu, first_cpu, error = 0;
 481
 482        cpu_maps_update_begin();
 483        first_cpu = cpumask_first(cpu_online_mask);
 484        /*
 485         * We take down all of the non-boot CPUs in one shot to avoid races
 486         * with the userspace trying to use the CPU hotplug at the same time
 487         */
 488        cpumask_clear(frozen_cpus);
 489
 490        printk("Disabling non-boot CPUs ...\n");
 491        for_each_online_cpu(cpu) {
 492                if (cpu == first_cpu)
 493                        continue;
 494                error = _cpu_down(cpu, 1);
 495                if (!error)
 496                        cpumask_set_cpu(cpu, frozen_cpus);
 497                else {
 498                        printk(KERN_ERR "Error taking CPU%d down: %d\n",
 499                                cpu, error);
 500                        break;
 501                }
 502        }
 503
 504        if (!error) {
 505                BUG_ON(num_online_cpus() > 1);
 506                /* Make sure the CPUs won't be enabled by someone else */
 507                cpu_hotplug_disabled = 1;
 508        } else {
 509                printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 510        }
 511        cpu_maps_update_done();
 512        return error;
 513}
 514
 515void __weak arch_enable_nonboot_cpus_begin(void)
 516{
 517}
 518
 519void __weak arch_enable_nonboot_cpus_end(void)
 520{
 521}
 522
 523void __ref enable_nonboot_cpus(void)
 524{
 525        int cpu, error;
 526
 527        /* Allow everyone to use the CPU hotplug again */
 528        cpu_maps_update_begin();
 529        cpu_hotplug_disabled = 0;
 530        if (cpumask_empty(frozen_cpus))
 531                goto out;
 532
 533        printk(KERN_INFO "Enabling non-boot CPUs ...\n");
 534
 535        arch_enable_nonboot_cpus_begin();
 536
 537        for_each_cpu(cpu, frozen_cpus) {
 538                error = _cpu_up(cpu, 1);
 539                if (!error) {
 540                        printk(KERN_INFO "CPU%d is up\n", cpu);
 541                        continue;
 542                }
 543                printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 544        }
 545
 546        arch_enable_nonboot_cpus_end();
 547
 548        cpumask_clear(frozen_cpus);
 549out:
 550        cpu_maps_update_done();
 551}
 552
 553static int __init alloc_frozen_cpus(void)
 554{
 555        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
 556                return -ENOMEM;
 557        return 0;
 558}
 559core_initcall(alloc_frozen_cpus);
 560
 561/*
 562 * When callbacks for CPU hotplug notifications are being executed, we must
 563 * ensure that the state of the system with respect to the tasks being frozen
 564 * or not, as reported by the notification, remains unchanged *throughout the
 565 * duration* of the execution of the callbacks.
 566 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 567 *
 568 * This synchronization is implemented by mutually excluding regular CPU
 569 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 570 * Hibernate notifications.
 571 */
 572static int
 573cpu_hotplug_pm_callback(struct notifier_block *nb,
 574                        unsigned long action, void *ptr)
 575{
 576        switch (action) {
 577
 578        case PM_SUSPEND_PREPARE:
 579        case PM_HIBERNATION_PREPARE:
 580                cpu_hotplug_disable();
 581                break;
 582
 583        case PM_POST_SUSPEND:
 584        case PM_POST_HIBERNATION:
 585                cpu_hotplug_enable();
 586                break;
 587
 588        default:
 589                return NOTIFY_DONE;
 590        }
 591
 592        return NOTIFY_OK;
 593}
 594
 595
 596static int __init cpu_hotplug_pm_sync_init(void)
 597{
 598        /*
 599         * cpu_hotplug_pm_callback has higher priority than x86
 600         * bsp_pm_callback which depends on cpu_hotplug_pm_callback
 601         * to disable cpu hotplug to avoid cpu hotplug race.
 602         */
 603        pm_notifier(cpu_hotplug_pm_callback, 0);
 604        return 0;
 605}
 606core_initcall(cpu_hotplug_pm_sync_init);
 607
 608#endif /* CONFIG_PM_SLEEP_SMP */
 609
 610/**
 611 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 612 * @cpu: cpu that just started
 613 *
 614 * This function calls the cpu_chain notifiers with CPU_STARTING.
 615 * It must be called by the arch code on the new cpu, before the new cpu
 616 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 617 */
 618void notify_cpu_starting(unsigned int cpu)
 619{
 620        unsigned long val = CPU_STARTING;
 621
 622#ifdef CONFIG_PM_SLEEP_SMP
 623        if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
 624                val = CPU_STARTING_FROZEN;
 625#endif /* CONFIG_PM_SLEEP_SMP */
 626        cpu_notify(val, (void *)(long)cpu);
 627}
 628
 629#endif /* CONFIG_SMP */
 630
 631/*
 632 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 633 * represents all NR_CPUS bits binary values of 1<<nr.
 634 *
 635 * It is used by cpumask_of() to get a constant address to a CPU
 636 * mask value that has a single bit set only.
 637 */
 638
 639/* cpu_bit_bitmap[0] is empty - so we can back into it */
 640#define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
 641#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
 642#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
 643#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
 644
 645const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 646
 647        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
 648        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
 649#if BITS_PER_LONG > 32
 650        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
 651        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
 652#endif
 653};
 654EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
 655
 656const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 657EXPORT_SYMBOL(cpu_all_bits);
 658
 659#ifdef CONFIG_INIT_ALL_POSSIBLE
 660static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
 661        = CPU_BITS_ALL;
 662#else
 663static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
 664#endif
 665const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
 666EXPORT_SYMBOL(cpu_possible_mask);
 667
 668static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
 669const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
 670EXPORT_SYMBOL(cpu_online_mask);
 671
 672static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
 673const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
 674EXPORT_SYMBOL(cpu_present_mask);
 675
 676static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
 677const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
 678EXPORT_SYMBOL(cpu_active_mask);
 679
 680void set_cpu_possible(unsigned int cpu, bool possible)
 681{
 682        if (possible)
 683                cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
 684        else
 685                cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
 686}
 687
 688void set_cpu_present(unsigned int cpu, bool present)
 689{
 690        if (present)
 691                cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
 692        else
 693                cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
 694}
 695
 696void set_cpu_online(unsigned int cpu, bool online)
 697{
 698        if (online)
 699                cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
 700        else
 701                cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
 702}
 703
 704void set_cpu_active(unsigned int cpu, bool active)
 705{
 706        if (active)
 707                cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
 708        else
 709                cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
 710}
 711
 712void init_cpu_present(const struct cpumask *src)
 713{
 714        cpumask_copy(to_cpumask(cpu_present_bits), src);
 715}
 716
 717void init_cpu_possible(const struct cpumask *src)
 718{
 719        cpumask_copy(to_cpumask(cpu_possible_bits), src);
 720}
 721
 722void init_cpu_online(const struct cpumask *src)
 723{
 724        cpumask_copy(to_cpumask(cpu_online_bits), src);
 725}
 726
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.