linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/module.h>
  14#include <linux/kthread.h>
  15#include <linux/stop_machine.h>
  16#include <linux/mutex.h>
  17#include <linux/gfp.h>
  18
  19#ifdef CONFIG_SMP
  20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
  21static DEFINE_MUTEX(cpu_add_remove_lock);
  22
  23/*
  24 * The following two API's must be used when attempting
  25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
  26 */
  27void cpu_maps_update_begin(void)
  28{
  29        mutex_lock(&cpu_add_remove_lock);
  30}
  31
  32void cpu_maps_update_done(void)
  33{
  34        mutex_unlock(&cpu_add_remove_lock);
  35}
  36
  37static RAW_NOTIFIER_HEAD(cpu_chain);
  38
  39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  40 * Should always be manipulated under cpu_add_remove_lock
  41 */
  42static int cpu_hotplug_disabled;
  43
  44#ifdef CONFIG_HOTPLUG_CPU
  45
  46static struct {
  47        struct task_struct *active_writer;
  48        struct mutex lock; /* Synchronizes accesses to refcount, */
  49        /*
  50         * Also blocks the new readers during
  51         * an ongoing cpu hotplug operation.
  52         */
  53        int refcount;
  54} cpu_hotplug = {
  55        .active_writer = NULL,
  56        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  57        .refcount = 0,
  58};
  59
  60void get_online_cpus(void)
  61{
  62        might_sleep();
  63        if (cpu_hotplug.active_writer == current)
  64                return;
  65        mutex_lock(&cpu_hotplug.lock);
  66        cpu_hotplug.refcount++;
  67        mutex_unlock(&cpu_hotplug.lock);
  68
  69}
  70EXPORT_SYMBOL_GPL(get_online_cpus);
  71
  72void put_online_cpus(void)
  73{
  74        if (cpu_hotplug.active_writer == current)
  75                return;
  76        mutex_lock(&cpu_hotplug.lock);
  77        if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  78                wake_up_process(cpu_hotplug.active_writer);
  79        mutex_unlock(&cpu_hotplug.lock);
  80
  81}
  82EXPORT_SYMBOL_GPL(put_online_cpus);
  83
  84/*
  85 * This ensures that the hotplug operation can begin only when the
  86 * refcount goes to zero.
  87 *
  88 * Note that during a cpu-hotplug operation, the new readers, if any,
  89 * will be blocked by the cpu_hotplug.lock
  90 *
  91 * Since cpu_hotplug_begin() is always called after invoking
  92 * cpu_maps_update_begin(), we can be sure that only one writer is active.
  93 *
  94 * Note that theoretically, there is a possibility of a livelock:
  95 * - Refcount goes to zero, last reader wakes up the sleeping
  96 *   writer.
  97 * - Last reader unlocks the cpu_hotplug.lock.
  98 * - A new reader arrives at this moment, bumps up the refcount.
  99 * - The writer acquires the cpu_hotplug.lock finds the refcount
 100 *   non zero and goes to sleep again.
 101 *
 102 * However, this is very difficult to achieve in practice since
 103 * get_online_cpus() not an api which is called all that often.
 104 *
 105 */
 106static void cpu_hotplug_begin(void)
 107{
 108        cpu_hotplug.active_writer = current;
 109
 110        for (;;) {
 111                mutex_lock(&cpu_hotplug.lock);
 112                if (likely(!cpu_hotplug.refcount))
 113                        break;
 114                __set_current_state(TASK_UNINTERRUPTIBLE);
 115                mutex_unlock(&cpu_hotplug.lock);
 116                schedule();
 117        }
 118}
 119
 120static void cpu_hotplug_done(void)
 121{
 122        cpu_hotplug.active_writer = NULL;
 123        mutex_unlock(&cpu_hotplug.lock);
 124}
 125
 126#else /* #if CONFIG_HOTPLUG_CPU */
 127static void cpu_hotplug_begin(void) {}
 128static void cpu_hotplug_done(void) {}
 129#endif  /* #esle #if CONFIG_HOTPLUG_CPU */
 130
 131/* Need to know about CPUs going up/down? */
 132int __ref register_cpu_notifier(struct notifier_block *nb)
 133{
 134        int ret;
 135        cpu_maps_update_begin();
 136        ret = raw_notifier_chain_register(&cpu_chain, nb);
 137        cpu_maps_update_done();
 138        return ret;
 139}
 140
 141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
 142                        int *nr_calls)
 143{
 144        int ret;
 145
 146        ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
 147                                        nr_calls);
 148
 149        return notifier_to_errno(ret);
 150}
 151
 152static int cpu_notify(unsigned long val, void *v)
 153{
 154        return __cpu_notify(val, v, -1, NULL);
 155}
 156
 157#ifdef CONFIG_HOTPLUG_CPU
 158
 159static void cpu_notify_nofail(unsigned long val, void *v)
 160{
 161        BUG_ON(cpu_notify(val, v));
 162}
 163
 164EXPORT_SYMBOL(register_cpu_notifier);
 165
 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
 167{
 168        cpu_maps_update_begin();
 169        raw_notifier_chain_unregister(&cpu_chain, nb);
 170        cpu_maps_update_done();
 171}
 172EXPORT_SYMBOL(unregister_cpu_notifier);
 173
 174static inline void check_for_tasks(int cpu)
 175{
 176        struct task_struct *p;
 177
 178        write_lock_irq(&tasklist_lock);
 179        for_each_process(p) {
 180                if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
 181                    (!cputime_eq(p->utime, cputime_zero) ||
 182                     !cputime_eq(p->stime, cputime_zero)))
 183                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
 184                                "(state = %ld, flags = %x)\n",
 185                                p->comm, task_pid_nr(p), cpu,
 186                                p->state, p->flags);
 187        }
 188        write_unlock_irq(&tasklist_lock);
 189}
 190
 191struct take_cpu_down_param {
 192        struct task_struct *caller;
 193        unsigned long mod;
 194        void *hcpu;
 195};
 196
 197/* Take this CPU down. */
 198static int __ref take_cpu_down(void *_param)
 199{
 200        struct take_cpu_down_param *param = _param;
 201        unsigned int cpu = (unsigned long)param->hcpu;
 202        int err;
 203
 204        /* Ensure this CPU doesn't handle any more interrupts. */
 205        err = __cpu_disable();
 206        if (err < 0)
 207                return err;
 208
 209        cpu_notify(CPU_DYING | param->mod, param->hcpu);
 210
 211        if (task_cpu(param->caller) == cpu)
 212                move_task_off_dead_cpu(cpu, param->caller);
 213        /* Force idle task to run as soon as we yield: it should
 214           immediately notice cpu is offline and die quickly. */
 215        sched_idle_next();
 216        return 0;
 217}
 218
 219/* Requires cpu_add_remove_lock to be held */
 220static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 221{
 222        int err, nr_calls = 0;
 223        void *hcpu = (void *)(long)cpu;
 224        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 225        struct take_cpu_down_param tcd_param = {
 226                .caller = current,
 227                .mod = mod,
 228                .hcpu = hcpu,
 229        };
 230
 231        if (num_online_cpus() == 1)
 232                return -EBUSY;
 233
 234        if (!cpu_online(cpu))
 235                return -EINVAL;
 236
 237        cpu_hotplug_begin();
 238        err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
 239        if (err) {
 240                nr_calls--;
 241                __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
 242                printk("%s: attempt to take down CPU %u failed\n",
 243                                __func__, cpu);
 244                goto out_release;
 245        }
 246
 247        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 248        if (err) {
 249                /* CPU didn't die: tell everyone.  Can't complain. */
 250                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
 251
 252                goto out_release;
 253        }
 254        BUG_ON(cpu_online(cpu));
 255
 256        /* Wait for it to sleep (leaving idle task). */
 257        while (!idle_cpu(cpu))
 258                yield();
 259
 260        /* This actually kills the CPU. */
 261        __cpu_die(cpu);
 262
 263        /* CPU is completely dead: tell everyone.  Too late to complain. */
 264        cpu_notify_nofail(CPU_DEAD | mod, hcpu);
 265
 266        check_for_tasks(cpu);
 267
 268out_release:
 269        cpu_hotplug_done();
 270        if (!err)
 271                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 272        return err;
 273}
 274
 275int __ref cpu_down(unsigned int cpu)
 276{
 277        int err;
 278
 279        cpu_maps_update_begin();
 280
 281        if (cpu_hotplug_disabled) {
 282                err = -EBUSY;
 283                goto out;
 284        }
 285
 286        err = _cpu_down(cpu, 0);
 287
 288out:
 289        cpu_maps_update_done();
 290        return err;
 291}
 292EXPORT_SYMBOL(cpu_down);
 293#endif /*CONFIG_HOTPLUG_CPU*/
 294
 295/* Requires cpu_add_remove_lock to be held */
 296static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 297{
 298        int ret, nr_calls = 0;
 299        void *hcpu = (void *)(long)cpu;
 300        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 301
 302        if (cpu_online(cpu) || !cpu_present(cpu))
 303                return -EINVAL;
 304
 305        cpu_hotplug_begin();
 306        ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
 307        if (ret) {
 308                nr_calls--;
 309                printk("%s: attempt to bring up CPU %u failed\n",
 310                                __func__, cpu);
 311                goto out_notify;
 312        }
 313
 314        /* Arch-specific enabling code. */
 315        ret = __cpu_up(cpu);
 316        if (ret != 0)
 317                goto out_notify;
 318        BUG_ON(!cpu_online(cpu));
 319
 320        /* Now call notifier in preparation. */
 321        cpu_notify(CPU_ONLINE | mod, hcpu);
 322
 323out_notify:
 324        if (ret != 0)
 325                __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 326        cpu_hotplug_done();
 327
 328        return ret;
 329}
 330
 331int __cpuinit cpu_up(unsigned int cpu)
 332{
 333        int err = 0;
 334
 335#ifdef  CONFIG_MEMORY_HOTPLUG
 336        int nid;
 337        pg_data_t       *pgdat;
 338#endif
 339
 340        if (!cpu_possible(cpu)) {
 341                printk(KERN_ERR "can't online cpu %d because it is not "
 342                        "configured as may-hotadd at boot time\n", cpu);
 343#if defined(CONFIG_IA64)
 344                printk(KERN_ERR "please check additional_cpus= boot "
 345                                "parameter\n");
 346#endif
 347                return -EINVAL;
 348        }
 349
 350#ifdef  CONFIG_MEMORY_HOTPLUG
 351        nid = cpu_to_node(cpu);
 352        if (!node_online(nid)) {
 353                err = mem_online_node(nid);
 354                if (err)
 355                        return err;
 356        }
 357
 358        pgdat = NODE_DATA(nid);
 359        if (!pgdat) {
 360                printk(KERN_ERR
 361                        "Can't online cpu %d due to NULL pgdat\n", cpu);
 362                return -ENOMEM;
 363        }
 364
 365        if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
 366                mutex_lock(&zonelists_mutex);
 367                build_all_zonelists(NULL);
 368                mutex_unlock(&zonelists_mutex);
 369        }
 370#endif
 371
 372        cpu_maps_update_begin();
 373
 374        if (cpu_hotplug_disabled) {
 375                err = -EBUSY;
 376                goto out;
 377        }
 378
 379        err = _cpu_up(cpu, 0);
 380
 381out:
 382        cpu_maps_update_done();
 383        return err;
 384}
 385
 386#ifdef CONFIG_PM_SLEEP_SMP
 387static cpumask_var_t frozen_cpus;
 388
 389int disable_nonboot_cpus(void)
 390{
 391        int cpu, first_cpu, error = 0;
 392
 393        cpu_maps_update_begin();
 394        first_cpu = cpumask_first(cpu_online_mask);
 395        /*
 396         * We take down all of the non-boot CPUs in one shot to avoid races
 397         * with the userspace trying to use the CPU hotplug at the same time
 398         */
 399        cpumask_clear(frozen_cpus);
 400
 401        printk("Disabling non-boot CPUs ...\n");
 402        for_each_online_cpu(cpu) {
 403                if (cpu == first_cpu)
 404                        continue;
 405                error = _cpu_down(cpu, 1);
 406                if (!error)
 407                        cpumask_set_cpu(cpu, frozen_cpus);
 408                else {
 409                        printk(KERN_ERR "Error taking CPU%d down: %d\n",
 410                                cpu, error);
 411                        break;
 412                }
 413        }
 414
 415        if (!error) {
 416                BUG_ON(num_online_cpus() > 1);
 417                /* Make sure the CPUs won't be enabled by someone else */
 418                cpu_hotplug_disabled = 1;
 419        } else {
 420                printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 421        }
 422        cpu_maps_update_done();
 423        return error;
 424}
 425
 426void __weak arch_enable_nonboot_cpus_begin(void)
 427{
 428}
 429
 430void __weak arch_enable_nonboot_cpus_end(void)
 431{
 432}
 433
 434void __ref enable_nonboot_cpus(void)
 435{
 436        int cpu, error;
 437
 438        /* Allow everyone to use the CPU hotplug again */
 439        cpu_maps_update_begin();
 440        cpu_hotplug_disabled = 0;
 441        if (cpumask_empty(frozen_cpus))
 442                goto out;
 443
 444        printk("Enabling non-boot CPUs ...\n");
 445
 446        arch_enable_nonboot_cpus_begin();
 447
 448        for_each_cpu(cpu, frozen_cpus) {
 449                error = _cpu_up(cpu, 1);
 450                if (!error) {
 451                        printk("CPU%d is up\n", cpu);
 452                        continue;
 453                }
 454                printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 455        }
 456
 457        arch_enable_nonboot_cpus_end();
 458
 459        cpumask_clear(frozen_cpus);
 460out:
 461        cpu_maps_update_done();
 462}
 463
 464static int alloc_frozen_cpus(void)
 465{
 466        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
 467                return -ENOMEM;
 468        return 0;
 469}
 470core_initcall(alloc_frozen_cpus);
 471#endif /* CONFIG_PM_SLEEP_SMP */
 472
 473/**
 474 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 475 * @cpu: cpu that just started
 476 *
 477 * This function calls the cpu_chain notifiers with CPU_STARTING.
 478 * It must be called by the arch code on the new cpu, before the new cpu
 479 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 480 */
 481void __cpuinit notify_cpu_starting(unsigned int cpu)
 482{
 483        unsigned long val = CPU_STARTING;
 484
 485#ifdef CONFIG_PM_SLEEP_SMP
 486        if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
 487                val = CPU_STARTING_FROZEN;
 488#endif /* CONFIG_PM_SLEEP_SMP */
 489        cpu_notify(val, (void *)(long)cpu);
 490}
 491
 492#endif /* CONFIG_SMP */
 493
 494/*
 495 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 496 * represents all NR_CPUS bits binary values of 1<<nr.
 497 *
 498 * It is used by cpumask_of() to get a constant address to a CPU
 499 * mask value that has a single bit set only.
 500 */
 501
 502/* cpu_bit_bitmap[0] is empty - so we can back into it */
 503#define MASK_DECLARE_1(x)       [x+1][0] = 1UL << (x)
 504#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
 505#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
 506#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
 507
 508const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 509
 510        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
 511        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
 512#if BITS_PER_LONG > 32
 513        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
 514        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
 515#endif
 516};
 517EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
 518
 519const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 520EXPORT_SYMBOL(cpu_all_bits);
 521
 522#ifdef CONFIG_INIT_ALL_POSSIBLE
 523static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
 524        = CPU_BITS_ALL;
 525#else
 526static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
 527#endif
 528const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
 529EXPORT_SYMBOL(cpu_possible_mask);
 530
 531static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
 532const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
 533EXPORT_SYMBOL(cpu_online_mask);
 534
 535static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
 536const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
 537EXPORT_SYMBOL(cpu_present_mask);
 538
 539static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
 540const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
 541EXPORT_SYMBOL(cpu_active_mask);
 542
 543void set_cpu_possible(unsigned int cpu, bool possible)
 544{
 545        if (possible)
 546                cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
 547        else
 548                cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
 549}
 550
 551void set_cpu_present(unsigned int cpu, bool present)
 552{
 553        if (present)
 554                cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
 555        else
 556                cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
 557}
 558
 559void set_cpu_online(unsigned int cpu, bool online)
 560{
 561        if (online)
 562                cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
 563        else
 564                cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
 565}
 566
 567void set_cpu_active(unsigned int cpu, bool active)
 568{
 569        if (active)
 570                cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
 571        else
 572                cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
 573}
 574
 575void init_cpu_present(const struct cpumask *src)
 576{
 577        cpumask_copy(to_cpumask(cpu_present_bits), src);
 578}
 579
 580void init_cpu_possible(const struct cpumask *src)
 581{
 582        cpumask_copy(to_cpumask(cpu_possible_bits), src);
 583}
 584
 585void init_cpu_online(const struct cpumask *src)
 586{
 587        cpumask_copy(to_cpumask(cpu_online_bits), src);
 588}
 589