linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/module.h>
  14#include <linux/kthread.h>
  15#include <linux/stop_machine.h>
  16#include <linux/mutex.h>
  17#include <linux/gfp.h>
  18
  19#ifdef CONFIG_SMP
  20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
  21static DEFINE_MUTEX(cpu_add_remove_lock);
  22
  23/*
  24 * The following two API's must be used when attempting
  25 * to serialize the updates to cpu_online_mask, cpu_present_mask.
  26 */
  27void cpu_maps_update_begin(void)
  28{
  29        mutex_lock(&cpu_add_remove_lock);
  30}
  31
  32void cpu_maps_update_done(void)
  33{
  34        mutex_unlock(&cpu_add_remove_lock);
  35}
  36
  37static RAW_NOTIFIER_HEAD(cpu_chain);
  38
  39/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  40 * Should always be manipulated under cpu_add_remove_lock
  41 */
  42static int cpu_hotplug_disabled;
  43
  44#ifdef CONFIG_HOTPLUG_CPU
  45
  46static struct {
  47        struct task_struct *active_writer;
  48        struct mutex lock; /* Synchronizes accesses to refcount, */
  49        /*
  50         * Also blocks the new readers during
  51         * an ongoing cpu hotplug operation.
  52         */
  53        int refcount;
  54} cpu_hotplug = {
  55        .active_writer = NULL,
  56        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  57        .refcount = 0,
  58};
  59
  60void get_online_cpus(void)
  61{
  62        might_sleep();
  63        if (cpu_hotplug.active_writer == current)
  64                return;
  65        mutex_lock(&cpu_hotplug.lock);
  66        cpu_hotplug.refcount++;
  67        mutex_unlock(&cpu_hotplug.lock);
  68
  69}
  70EXPORT_SYMBOL_GPL(get_online_cpus);
  71
  72void put_online_cpus(void)
  73{
  74        if (cpu_hotplug.active_writer == current)
  75                return;
  76        mutex_lock(&cpu_hotplug.lock);
  77        if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  78                wake_up_process(cpu_hotplug.active_writer);
  79        mutex_unlock(&cpu_hotplug.lock);
  80
  81}
  82EXPORT_SYMBOL_GPL(put_online_cpus);
  83
  84/*
  85 * This ensures that the hotplug operation can begin only when the
  86 * refcount goes to zero.
  87 *
  88 * Note that during a cpu-hotplug operation, the new readers, if any,
  89 * will be blocked by the cpu_hotplug.lock
  90 *
  91 * Since cpu_hotplug_begin() is always called after invoking
  92 * cpu_maps_update_begin(), we can be sure that only one writer is active.
  93 *
  94 * Note that theoretically, there is a possibility of a livelock:
  95 * - Refcount goes to zero, last reader wakes up the sleeping
  96 *   writer.
  97 * - Last reader unlocks the cpu_hotplug.lock.
  98 * - A new reader arrives at this moment, bumps up the refcount.
  99 * - The writer acquires the cpu_hotplug.lock finds the refcount
 100 *   non zero and goes to sleep again.
 101 *
 102 * However, this is very difficult to achieve in practice since
 103 * get_online_cpus() not an api which is called all that often.
 104 *
 105 */
 106static void cpu_hotplug_begin(void)
 107{
 108        cpu_hotplug.active_writer = current;
 109
 110        for (;;) {
 111                mutex_lock(&cpu_hotplug.lock);
 112                if (likely(!cpu_hotplug.refcount))
 113                        break;
 114                __set_current_state(TASK_UNINTERRUPTIBLE);
 115                mutex_unlock(&cpu_hotplug.lock);
 116                schedule();
 117        }
 118}
 119
 120static void cpu_hotplug_done(void)
 121{
 122        cpu_hotplug.active_writer = NULL;
 123        mutex_unlock(&cpu_hotplug.lock);
 124}
 125
 126#else /* #if CONFIG_HOTPLUG_CPU */
 127static void cpu_hotplug_begin(void) {}
 128static void cpu_hotplug_done(void) {}
 129#endif  /* #esle #if CONFIG_HOTPLUG_CPU */
 130
 131/* Need to know about CPUs going up/down? */
 132int __ref register_cpu_notifier(struct notifier_block *nb)
 133{
 134        int ret;
 135        cpu_maps_update_begin();
 136        ret = raw_notifier_chain_register(&cpu_chain, nb);
 137        cpu_maps_update_done();
 138        return ret;
 139}
 140
 141static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
 142                        int *nr_calls)
 143{
 144        int ret;
 145
 146        ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
 147                                        nr_calls);
 148
 149        return notifier_to_errno(ret);
 150}
 151
 152static int cpu_notify(unsigned long val, void *v)
 153{
 154        return __cpu_notify(val, v, -1, NULL);
 155}
 156
 157#ifdef CONFIG_HOTPLUG_CPU
 158
 159static void cpu_notify_nofail(unsigned long val, void *v)
 160{
 161        BUG_ON(cpu_notify(val, v));
 162}
 163
 164EXPORT_SYMBOL(register_cpu_notifier);
 165
 166void __ref unregister_cpu_notifier(struct notifier_block *nb)
 167{
 168        cpu_maps_update_begin();
 169        raw_notifier_chain_unregister(&cpu_chain, nb);
 170        cpu_maps_update_done();
 171}
 172EXPORT_SYMBOL(unregister_cpu_notifier);
 173
 174static inline void check_for_tasks(int cpu)
 175{
 176        struct task_struct *p;
 177
 178        write_lock_irq(&tasklist_lock);
 179        for_each_process(p) {
 180                if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
 181                    (!cputime_eq(p->utime, cputime_zero) ||
 182                     !cputime_eq(p->stime, cputime_zero)))
 183                        printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
 184                                "(state = %ld, flags = %x)\n",
 185                                p->comm, task_pid_nr(p), cpu,
 186                                p->state, p->flags);
 187        }
 188        write_unlock_irq(&tasklist_lock);
 189}
 190
 191struct take_cpu_down_param {
 192        struct task_struct *caller;
 193        unsigned long mod;
 194        void *hcpu;
 195};
 196
 197/* Take this CPU down. */
 198static int __ref take_cpu_down(void *_param)
 199{
 200        struct take_cpu_down_param *param = _param;
 201        unsigned int cpu = (unsigned long)param->hcpu;
 202        int err;
 203
 204        /* Ensure this CPU doesn't handle any more interrupts. */
 205        err = __cpu_disable();
 206        if (err < 0)
 207                return err;
 208
 209        cpu_notify(CPU_DYING | param->mod, param->hcpu);
 210
 211        if (task_cpu(param->caller) == cpu)
 212                move_task_off_dead_cpu(cpu, param->caller);
 213        /* Force idle task to run as soon as we yield: it should
 214           immediately notice cpu is offline and die quickly. */
 215        sched_idle_next();
 216        return 0;
 217}
 218
 219/* Requires cpu_add_remove_lock to be held */
 220static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 221{
 222        int err, nr_calls = 0;
 223        void *hcpu = (void *)(long)cpu;
 224        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 225        struct take_cpu_down_param tcd_param = {
 226                .caller = current,
 227                .mod = mod,
 228                .hcpu = hcpu,
 229        };
 230
 231        if (num_online_cpus() == 1)
 232                return -EBUSY;
 233
 234        if (!cpu_online(cpu))
 235                return -EINVAL;
 236
 237        cpu_hotplug_begin();
 238        set_cpu_active(cpu, false);
 239        err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
 240        if (err) {
 241                set_cpu_active(cpu, true);
 242
 243                nr_calls--;
 244                __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
 245                printk("%s: attempt to take down CPU %u failed\n",
 246                                __func__, cpu);
 247                goto out_release;
 248        }
 249
 250        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 251        if (err) {
 252                set_cpu_active(cpu, true);
 253                /* CPU didn't die: tell everyone.  Can't complain. */
 254                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
 255
 256                goto out_release;
 257        }
 258        BUG_ON(cpu_online(cpu));
 259
 260        /* Wait for it to sleep (leaving idle task). */
 261        while (!idle_cpu(cpu))
 262                yield();
 263
 264        /* This actually kills the CPU. */
 265        __cpu_die(cpu);
 266
 267        /* CPU is completely dead: tell everyone.  Too late to complain. */
 268        cpu_notify_nofail(CPU_DEAD | mod, hcpu);
 269
 270        check_for_tasks(cpu);
 271
 272out_release:
 273        cpu_hotplug_done();
 274        if (!err)
 275                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 276        return err;
 277}
 278
 279int __ref cpu_down(unsigned int cpu)
 280{
 281        int err;
 282
 283        cpu_maps_update_begin();
 284
 285        if (cpu_hotplug_disabled) {
 286                err = -EBUSY;
 287                goto out;
 288        }
 289
 290        err = _cpu_down(cpu, 0);
 291
 292out:
 293        cpu_maps_update_done();
 294        return err;
 295}
 296EXPORT_SYMBOL(cpu_down);
 297#endif /*CONFIG_HOTPLUG_CPU*/
 298
 299/* Requires cpu_add_remove_lock to be held */
 300static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
 301{
 302        int ret, nr_calls = 0;
 303        void *hcpu = (void *)(long)cpu;
 304        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 305
 306        if (cpu_online(cpu) || !cpu_present(cpu))
 307                return -EINVAL;
 308
 309        cpu_hotplug_begin();
 310        ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
 311        if (ret) {
 312                nr_calls--;
 313                printk("%s: attempt to bring up CPU %u failed\n",
 314                                __func__, cpu);
 315                goto out_notify;
 316        }
 317
 318        /* Arch-specific enabling code. */
 319        ret = __cpu_up(cpu);
 320        if (ret != 0)
 321                goto out_notify;
 322        BUG_ON(!cpu_online(cpu));
 323
 324        set_cpu_active(cpu, true);
 325
 326        /* Now call notifier in preparation. */
 327        cpu_notify(CPU_ONLINE | mod, hcpu);
 328
 329out_notify:
 330        if (ret != 0)
 331                __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 332        cpu_hotplug_done();
 333
 334        return ret;
 335}
 336
 337int __cpuinit cpu_up(unsigned int cpu)
 338{
 339        int err = 0;
 340
 341#ifdef  CONFIG_MEMORY_HOTPLUG
 342        int nid;
 343        pg_data_t       *pgdat;
 344#endif
 345
 346        if (!cpu_possible(cpu)) {
 347                printk(KERN_ERR "can't online cpu %d because it is not "
 348                        "configured as may-hotadd at boot time\n", cpu);
 349#if defined(CONFIG_IA64)
 350                printk(KERN_ERR "please check additional_cpus= boot "
 351                                "parameter\n");
 352#endif
 353                return -EINVAL;
 354        }
 355
 356#ifdef  CONFIG_MEMORY_HOTPLUG
 357        nid = cpu_to_node(cpu);
 358        if (!node_online(nid)) {
 359                err = mem_online_node(nid);
 360                if (err)
 361                        return err;
 362        }
 363
 364        pgdat = NODE_DATA(nid);
 365        if (!pgdat) {
 366                printk(KERN_ERR
 367                        "Can't online cpu %d due to NULL pgdat\n", cpu);
 368                return -ENOMEM;
 369        }
 370
 371        if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
 372                mutex_lock(&zonelists_mutex);
 373                build_all_zonelists(NULL);
 374                mutex_unlock(&zonelists_mutex);
 375        }
 376#endif
 377
 378        cpu_maps_update_begin();
 379
 380        if (cpu_hotplug_disabled) {
 381                err = -EBUSY;
 382                goto out;
 383        }
 384
 385        err = _cpu_up(cpu, 0);
 386
 387out:
 388        cpu_maps_update_done();
 389        return err;
 390}
 391
 392#ifdef CONFIG_PM_SLEEP_SMP
 393static cpumask_var_t frozen_cpus;
 394
 395int disable_nonboot_cpus(void)
 396{
 397        int cpu, first_cpu, error = 0;
 398
 399        cpu_maps_update_begin();
 400        first_cpu = cpumask_first(cpu_online_mask);
 401        /*
 402         * We take down all of the non-boot CPUs in one shot to avoid races
 403         * with the userspace trying to use the CPU hotplug at the same time
 404         */
 405        cpumask_clear(frozen_cpus);
 406
 407        printk("Disabling non-boot CPUs ...\n");
 408        for_each_online_cpu(cpu) {
 409                if (cpu == first_cpu)
 410                        continue;
 411                error = _cpu_down(cpu, 1);
 412                if (!error)
 413                        cpumask_set_cpu(cpu, frozen_cpus);
 414                else {
 415                        printk(KERN_ERR "Error taking CPU%d down: %d\n",
 416                                cpu, error);
 417                        break;
 418                }
 419        }
 420
 421        if (!error) {
 422                BUG_ON(num_online_cpus() > 1);
 423                /* Make sure the CPUs won't be enabled by someone else */
 424                cpu_hotplug_disabled = 1;
 425        } else {
 426                printk(KERN_ERR "Non-boot CPUs are not disabled\n");
 427        }
 428        cpu_maps_update_done();
 429        return error;
 430}
 431
 432void __weak arch_enable_nonboot_cpus_begin(void)
 433{
 434}
 435
 436void __weak arch_enable_nonboot_cpus_end(void)
 437{
 438}
 439
 440void __ref enable_nonboot_cpus(void)
 441{
 442        int cpu, error;
 443
 444        /* Allow everyone to use the CPU hotplug again */
 445        cpu_maps_update_begin();
 446        cpu_hotplug_disabled = 0;
 447        if (cpumask_empty(frozen_cpus))
 448                goto out;
 449
 450        printk("Enabling non-boot CPUs ...\n");
 451
 452        arch_enable_nonboot_cpus_begin();
 453
 454        for_each_cpu(cpu, frozen_cpus) {
 455                error = _cpu_up(cpu, 1);
 456                if (!error) {
 457                        printk("CPU%d is up\n", cpu);
 458                        continue;
 459                }
 460                printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
 461        }
 462
 463        arch_enable_nonboot_cpus_end();
 464
 465        cpumask_clear(frozen_cpus);
 466out:
 467        cpu_maps_update_done();
 468}
 469
 470static int alloc_frozen_cpus(void)
 471{
 472        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
 473                return -ENOMEM;
 474        return 0;
 475}
 476core_initcall(alloc_frozen_cpus);
 477#endif /* CONFIG_PM_SLEEP_SMP */
 478
 479/**
 480 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 481 * @cpu: cpu that just started
 482 *
 483 * This function calls the cpu_chain notifiers with CPU_STARTING.
 484 * It must be called by the arch code on the new cpu, before the new cpu
 485 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 486 */
 487void __cpuinit notify_cpu_starting(unsigned int cpu)
 488{
 489        unsigned long val = CPU_STARTING;
 490
 491#ifdef CONFIG_PM_SLEEP_SMP
 492        if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
 493                val = CPU_STARTING_FROZEN;
 494#endif /* CONFIG_PM_SLEEP_SMP */
 495        cpu_notify(val, (void *)(long)cpu);
 496}
 497
 498#endif /* CONFIG_SMP */
 499
 500/*
 501 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 502 * represents all NR_CPUS bits binary values of 1<<nr.
 503 *
 504 * It is used by cpumask_of() to get a constant address to a CPU
 505 * mask value that has a single bit set only.
 506 */
 507
 508/* cpu_bit_bitmap[0] is empty - so we can back into it */
 509#define MASK_DECLARE_1(x)       [x+1][0] = 1UL << (x)
 510#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
 511#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
 512#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
 513
 514const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 515
 516        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
 517        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
 518#if BITS_PER_LONG > 32
 519        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
 520        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
 521#endif
 522};
 523EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
 524
 525const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 526EXPORT_SYMBOL(cpu_all_bits);
 527
 528#ifdef CONFIG_INIT_ALL_POSSIBLE
 529static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
 530        = CPU_BITS_ALL;
 531#else
 532static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
 533#endif
 534const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
 535EXPORT_SYMBOL(cpu_possible_mask);
 536
 537static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
 538const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
 539EXPORT_SYMBOL(cpu_online_mask);
 540
 541static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
 542const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
 543EXPORT_SYMBOL(cpu_present_mask);
 544
 545static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
 546const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
 547EXPORT_SYMBOL(cpu_active_mask);
 548
 549void set_cpu_possible(unsigned int cpu, bool possible)
 550{
 551        if (possible)
 552                cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
 553        else
 554                cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
 555}
 556
 557void set_cpu_present(unsigned int cpu, bool present)
 558{
 559        if (present)
 560                cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
 561        else
 562                cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
 563}
 564
 565void set_cpu_online(unsigned int cpu, bool online)
 566{
 567        if (online)
 568                cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
 569        else
 570                cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
 571}
 572
 573void set_cpu_active(unsigned int cpu, bool active)
 574{
 575        if (active)
 576                cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
 577        else
 578                cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
 579}
 580
 581void init_cpu_present(const struct cpumask *src)
 582{
 583        cpumask_copy(to_cpumask(cpu_present_bits), src);
 584}
 585
 586void init_cpu_possible(const struct cpumask *src)
 587{
 588        cpumask_copy(to_cpumask(cpu_possible_bits), src);
 589}
 590
 591void init_cpu_online(const struct cpumask *src)
 592{
 593        cpumask_copy(to_cpumask(cpu_online_bits), src);
 594}
 595