linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/module.h>
  14#include <linux/kthread.h>
  15#include <linux/stop_machine.h>
  16#include <linux/mutex.h>
  17
  18/*
  19 * Represents all cpu's present in the system
  20 * In systems capable of hotplug, this map could dynamically grow
  21 * as new cpu's are detected in the system via any platform specific
  22 * method, such as ACPI for e.g.
  23 */
  24cpumask_t cpu_present_map __read_mostly;
  25EXPORT_SYMBOL(cpu_present_map);
  26
  27#ifndef CONFIG_SMP
  28
  29/*
  30 * Represents all cpu's that are currently online.
  31 */
  32cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
  33EXPORT_SYMBOL(cpu_online_map);
  34
  35cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
  36EXPORT_SYMBOL(cpu_possible_map);
  37
  38#else /* CONFIG_SMP */
  39
  40/* Serializes the updates to cpu_online_map, cpu_present_map */
  41static DEFINE_MUTEX(cpu_add_remove_lock);
  42
  43static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
  44
  45/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  46 * Should always be manipulated under cpu_add_remove_lock
  47 */
  48static int cpu_hotplug_disabled;
  49
  50static struct {
  51        struct task_struct *active_writer;
  52        struct mutex lock; /* Synchronizes accesses to refcount, */
  53        /*
  54         * Also blocks the new readers during
  55         * an ongoing cpu hotplug operation.
  56         */
  57        int refcount;
  58} cpu_hotplug;
  59
  60void __init cpu_hotplug_init(void)
  61{
  62        cpu_hotplug.active_writer = NULL;
  63        mutex_init(&cpu_hotplug.lock);
  64        cpu_hotplug.refcount = 0;
  65}
  66
  67cpumask_t cpu_active_map;
  68
  69#ifdef CONFIG_HOTPLUG_CPU
  70
  71void get_online_cpus(void)
  72{
  73        might_sleep();
  74        if (cpu_hotplug.active_writer == current)
  75                return;
  76        mutex_lock(&cpu_hotplug.lock);
  77        cpu_hotplug.refcount++;
  78        mutex_unlock(&cpu_hotplug.lock);
  79
  80}
  81EXPORT_SYMBOL_GPL(get_online_cpus);
  82
  83void put_online_cpus(void)
  84{
  85        if (cpu_hotplug.active_writer == current)
  86                return;
  87        mutex_lock(&cpu_hotplug.lock);
  88        if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  89                wake_up_process(cpu_hotplug.active_writer);
  90        mutex_unlock(&cpu_hotplug.lock);
  91
  92}
  93EXPORT_SYMBOL_GPL(put_online_cpus);
  94
  95#endif  /* CONFIG_HOTPLUG_CPU */
  96
  97/*
  98 * The following two API's must be used when attempting
  99 * to serialize the updates to cpu_online_map, cpu_present_map.
 100 */
 101void cpu_maps_update_begin(void)
 102{
 103        mutex_lock(&cpu_add_remove_lock);
 104}
 105
 106void cpu_maps_update_done(void)
 107{
 108        mutex_unlock(&cpu_add_remove_lock);
 109}
 110
 111/*
 112 * This ensures that the hotplug operation can begin only when the
 113 * refcount goes to zero.
 114 *
 115 * Note that during a cpu-hotplug operation, the new readers, if any,
 116 * will be blocked by the cpu_hotplug.lock
 117 *
 118 * Since cpu_hotplug_begin() is always called after invoking
 119 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 120 *
 121 * Note that theoretically, there is a possibility of a livelock:
 122 * - Refcount goes to zero, last reader wakes up the sleeping
 123 *   writer.
 124 * - Last reader unlocks the cpu_hotplug.lock.
 125 * - A new reader arrives at this moment, bumps up the refcount.
 126 * - The writer acquires the cpu_hotplug.lock finds the refcount
 127 *   non zero and goes to sleep again.
 128 *
 129 * However, this is very difficult to achieve in practice since
 130 * get_online_cpus() not an api which is called all that often.
 131 *
 132 */
 133static void cpu_hotplug_begin(void)
 134{
 135        cpu_hotplug.active_writer = current;
 136
 137        for (;;) {
 138                mutex_lock(&cpu_hotplug.lock);
 139                if (likely(!cpu_hotplug.refcount))
 140                        break;
 141                __set_current_state(TASK_UNINTERRUPTIBLE);
 142                mutex_unlock(&cpu_hotplug.lock);
 143                schedule();
 144        }
 145}
 146
 147static void cpu_hotplug_done(void)
 148{
 149        cpu_hotplug.active_writer = NULL;
 150        mutex_unlock(&cpu_hotplug.lock);
 151}
 152/* Need to know about CPUs going up/down? */
 153int __ref register_cpu_notifier(struct notifier_block *nb)
 154{
 155        int ret;
 156        cpu_maps_update_begin();
 157        ret = raw_notifier_chain_register(&cpu_chain, nb);
 158        cpu_maps_update_done();
 159        return ret;
 160}
 161
 162#ifdef CONFIG_HOTPLUG_CPU
 163
 164EXPORT_SYMBOL(register_cpu_notifier);
 165
 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
 167{
 168        cpu_maps_update_begin();
 169        raw_notifier_chain_unregister(&cpu_chain, nb);
 170        cpu_maps_update_done();
 171}
 172EXPORT_SYMBOL(unregister_cpu_notifier);
 173
 174static inline void check_for_tasks(int cpu)
 175{
 176        struct task_struct *p;
 177
 178        write_lock_irq(&tasklist_lock);
 179        for_each_process(p) {
 180                if (task_cpu(p) == cpu &&
 181                    (!cputime_eq(p->utime, cputime_zero) ||
 182                     !cputime_eq(p->stime, cputime_zero)))
 183                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
 184                                (state = %ld, flags = %x) \n",
 185                                 p->comm, task_pid_nr(p), cpu,
 186                                 p->state, p->flags);
 187        }
 188        write_unlock_irq(&tasklist_lock);
 189}
 190
 191struct take_cpu_down_param {
 192        unsigned long mod;
 193        void *hcpu;
 194};
 195
 196/* Take this CPU down. */
 197static int __ref take_cpu_down(void *_param)
 198{
 199        struct take_cpu_down_param *param = _param;
 200        int err;
 201
 202        /* Ensure this CPU doesn't handle any more interrupts. */
 203        err = __cpu_disable();
 204        if (err < 0)
 205                return err;
 206
 207        raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
 208                                param->hcpu);
 209
 210        /* Force idle task to run as soon as we yield: it should
 211           immediately notice cpu is offline and die quickly. */
 212        sched_idle_next();
 213        return 0;
 214}
 215
 216/* Requires cpu_add_remove_lock to be held */
 217static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 218{
 219        int err, nr_calls = 0;
 220        cpumask_t old_allowed, tmp;
 221        void *hcpu = (void *)(long)cpu;
 222        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 223        struct take_cpu_down_param tcd_param = {
 224                .mod = mod,
 225                .hcpu = hcpu,
 226        };
 227
 228        if (num_online_cpus() == 1)
 229                return -EBUSY;
 230
 231        if (!cpu_online(cpu))
 232                return -EINVAL;
 233
 234        cpu_hotplug_begin();
 235        err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
 236                                        hcpu, -1, &nr_calls);
 237        if (err == NOTIFY_BAD) {
 238                nr_calls--;
 239                __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
 240                                          hcpu, nr_calls, NULL);
 241                printk("%s: attempt to take down CPU %u failed\n",
 242                                __func__, cpu);
 243                err = -EINVAL;
 244                goto out_release;
 245        }
 246
 247        /* Ensure that we are not runnable on dying cpu */
 248        old_allowed = current->cpus_allowed;
 249        cpus_setall(tmp);
 250        cpu_clear(cpu, tmp);
 251        set_cpus_allowed_ptr(current, &tmp);
 252        tmp = cpumask_of_cpu(cpu);
 253
 254        err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
 255        if (err) {
 256                /* CPU didn't die: tell everyone.  Can't complain. */
 257                if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
 258                                            hcpu) == NOTIFY_BAD)
 259                        BUG();
 260
 261                goto out_allowed;
 262        }
 263        BUG_ON(cpu_online(cpu));
 264
 265        /* Wait for it to sleep (leaving idle task). */
 266        while (!idle_cpu(cpu))
 267                yield();
 268
 269        /* This actually kills the CPU. */
 270        __cpu_die(cpu);
 271
 272        /* CPU is completely dead: tell everyone.  Too late to complain. */
 273        if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
 274                                    hcpu) == NOTIFY_BAD)
 275                BUG();
 276
 277        check_for_tasks(cpu);
 278
 279out_allowed:
 280        set_cpus_allowed_ptr(current, &old_allowed);
 281out_release:
 282        cpu_hotplug_done();
 283        if (!err) {
 284                if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
 285                                            hcpu) == NOTIFY_BAD)
 286                        BUG();
 287        }
 288        return err;
 289}
 290
 291int __ref cpu_down(unsigned int cpu)
 292{
 293        int err = 0;
 294
 295        cpu_maps_update_begin();
 296
 297        if (cpu_hotplug_disabled) {
 298                err = -EBUSY;
 299                goto out;
 300        }
 301
 302        cpu_clear(cpu, cpu_active_map);
 303
 304        /*
 305         * Make sure the all cpus did the reschedule and are not
 306         * using stale version of the cpu_active_map.
 307         * This is not strictly necessary becuase stop_machine()
 308         * that we run down the line already provides the required
 309         * synchronization. But it's really a side effect and we do not
 310         * want to depend on the innards of the stop_machine here.
 311         */
 312        synchronize_sched();
 313
 314        err = _cpu_down(cpu, 0);
 315
 316        if (cpu_online(cpu))
 317                cpu_set(cpu, cpu_active_map);
 318
 319out:
 320        cpu_maps_update_done();
 321        return err;
 322}
 323EXPORT_SYMBOL(cpu_down);
 324#endif /*CONFIG_HOTPLUG_CPU*/
 325
 326/* Requires cpu_add_remove_lock to be held */
 327static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 328{
 329        int ret, nr_calls = 0;
 330        void *hcpu = (void *)(long)cpu;
 331        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 332
 333        if (cpu_online(cpu) || !cpu_present(cpu))
 334                return -EINVAL;
 335
 336        cpu_hotplug_begin();
 337        ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
 338                                                        -1, &nr_calls);
 339        if (ret == NOTIFY_BAD) {
 340                nr_calls--;
 341                printk("%s: attempt to bring up CPU %u failed\n",
 342                                __func__, cpu);
 343                ret = -EINVAL;
 344                goto out_notify;
 345        }
 346
 347        /* Arch-specific enabling code. */
 348        ret = __cpu_up(cpu);
 349        if (ret != 0)
 350                goto out_notify;
 351        BUG_ON(!cpu_online(cpu));
 352
 353        cpu_set(cpu, cpu_active_map);
 354
 355        /* Now call notifier in preparation. */
 356        raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
 357
 358out_notify:
 359        if (ret != 0)
 360                __raw_notifier_call_chain(&cpu_chain,
 361                                CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 362        cpu_hotplug_done();
 363
 364        return ret;
 365}
 366
 367int __cpuinit cpu_up(unsigned int cpu)
 368{
 369        int err = 0;
 370        if (!cpu_isset(cpu, cpu_possible_map)) {
 371                printk(KERN_ERR "can't online cpu %d because it is not "
 372                        "configured as may-hotadd at boot time\n", cpu);
 373#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
 374                printk(KERN_ERR "please check additional_cpus= boot "
 375                                "parameter\n");
 376#endif
 377                return -EINVAL;
 378        }
 379
 380        cpu_maps_update_begin();
 381
 382        if (cpu_hotplug_disabled) {
 383                err = -EBUSY;
 384                goto out;
 385        }
 386
 387        err = _cpu_up(cpu, 0);
 388
 389out:
 390        cpu_maps_update_done();
 391        return err;
 392}
 393
 394#ifdef CONFIG_PM_SLEEP_SMP
 395static cpumask_t frozen_cpus;
 396
 397int disable_nonboot_cpus(void)
 398{
 399        int cpu, first_cpu, error = 0;
 400
 401        cpu_maps_update_begin();
 402        first_cpu = first_cpu(cpu_online_map);
 403        /* We take down all of the non-boot CPUs in one shot to avoid races
 404         * with the userspace trying to use the CPU hotplug at the same time
 405         */
 406        cpus_clear(frozen_cpus);
 407        printk("Disabling non-boot CPUs ...\n");
 408        for_each_online_cpu(cpu) {
 409                if (cpu == first_cpu)
 410                        continue;
 411                error = _cpu_down(cpu, 1);
 412                if (!error) {
 413                        cpu_set(cpu, frozen_cpus);
 414                        printk("CPU%d is down\n", cpu);
 415                } else {
 416                        printk(KERN_ERR "Error taking CPU%d down: %d\n",
 417                                cpu, error);
 418                        break;
 419                }
 420        }
 421        if (!error) {
 422                BUG_ON(num_online_cpus() > 1);
 423                /* Make sure the CPUs won't be enabled by someone else */
 424                cpu_hotplug_disabled = 1;
 425        } else {
 426                printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 427        }
 428        cpu_maps_update_done();
 429        return error;
 430}
 431
 432void __ref enable_nonboot_cpus(void)
 433{
 434        int cpu, error;
 435
 436        /* Allow everyone to use the CPU hotplug again */
 437        cpu_maps_update_begin();
 438        cpu_hotplug_disabled = 0;
 439        if (cpus_empty(frozen_cpus))
 440                goto out;
 441
 442        printk("Enabling non-boot CPUs ...\n");
 443        for_each_cpu_mask_nr(cpu, frozen_cpus) {
 444                error = _cpu_up(cpu, 1);
 445                if (!error) {
 446                        printk("CPU%d is up\n", cpu);
 447                        continue;
 448                }
 449                printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 450        }
 451        cpus_clear(frozen_cpus);
 452out:
 453        cpu_maps_update_done();
 454}
 455#endif /* CONFIG_PM_SLEEP_SMP */
 456
 457/**
 458 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 459 * @cpu: cpu that just started
 460 *
 461 * This function calls the cpu_chain notifiers with CPU_STARTING.
 462 * It must be called by the arch code on the new cpu, before the new cpu
 463 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 464 */
 465void __cpuinit notify_cpu_starting(unsigned int cpu)
 466{
 467        unsigned long val = CPU_STARTING;
 468
 469#ifdef CONFIG_PM_SLEEP_SMP
 470        if (cpu_isset(cpu, frozen_cpus))
 471                val = CPU_STARTING_FROZEN;
 472#endif /* CONFIG_PM_SLEEP_SMP */
 473        raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
 474}
 475
 476#endif /* CONFIG_SMP */
 477
 478/*
 479 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 480 * represents all NR_CPUS bits binary values of 1<<nr.
 481 *
 482 * It is used by cpumask_of_cpu() to get a constant address to a CPU
 483 * mask value that has a single bit set only.
 484 */
 485
 486/* cpu_bit_bitmap[0] is empty - so we can back into it */
 487#define MASK_DECLARE_1(x)       [x+1][0] = 1UL << (x)
 488#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
 489#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
 490#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
 491
 492const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 493
 494        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
 495        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
 496#if BITS_PER_LONG > 32
 497        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
 498        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
 499#endif
 500};
 501EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
 502
 503const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 504EXPORT_SYMBOL(cpu_all_bits);
 505