linux/kernel/smpboot.c
<<
>>
Prefs
   1/*
   2 * Common SMP CPU bringup/teardown functions
   3 */
   4#include <linux/cpu.h>
   5#include <linux/err.h>
   6#include <linux/smp.h>
   7#include <linux/init.h>
   8#include <linux/list.h>
   9#include <linux/slab.h>
  10#include <linux/sched.h>
  11#include <linux/export.h>
  12#include <linux/percpu.h>
  13#include <linux/kthread.h>
  14#include <linux/smpboot.h>
  15
  16#include "smpboot.h"
  17
  18#ifdef CONFIG_SMP
  19
  20#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
  21/*
  22 * For the hotplug case we keep the task structs around and reuse
  23 * them.
  24 */
  25static DEFINE_PER_CPU(struct task_struct *, idle_threads);
  26
  27struct task_struct * __cpuinit idle_thread_get(unsigned int cpu)
  28{
  29        struct task_struct *tsk = per_cpu(idle_threads, cpu);
  30
  31        if (!tsk)
  32                return ERR_PTR(-ENOMEM);
  33        init_idle(tsk, cpu);
  34        return tsk;
  35}
  36
  37void __init idle_thread_set_boot_cpu(void)
  38{
  39        per_cpu(idle_threads, smp_processor_id()) = current;
  40}
  41
  42/**
  43 * idle_init - Initialize the idle thread for a cpu
  44 * @cpu:        The cpu for which the idle thread should be initialized
  45 *
  46 * Creates the thread if it does not exist.
  47 */
  48static inline void idle_init(unsigned int cpu)
  49{
  50        struct task_struct *tsk = per_cpu(idle_threads, cpu);
  51
  52        if (!tsk) {
  53                tsk = fork_idle(cpu);
  54                if (IS_ERR(tsk))
  55                        pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
  56                else
  57                        per_cpu(idle_threads, cpu) = tsk;
  58        }
  59}
  60
  61/**
  62 * idle_threads_init - Initialize idle threads for all cpus
  63 */
  64void __init idle_threads_init(void)
  65{
  66        unsigned int cpu, boot_cpu;
  67
  68        boot_cpu = smp_processor_id();
  69
  70        for_each_possible_cpu(cpu) {
  71                if (cpu != boot_cpu)
  72                        idle_init(cpu);
  73        }
  74}
  75#endif
  76
  77#endif /* #ifdef CONFIG_SMP */
  78
  79static LIST_HEAD(hotplug_threads);
  80static DEFINE_MUTEX(smpboot_threads_lock);
  81
  82struct smpboot_thread_data {
  83        unsigned int                    cpu;
  84        unsigned int                    status;
  85        struct smp_hotplug_thread       *ht;
  86};
  87
  88enum {
  89        HP_THREAD_NONE = 0,
  90        HP_THREAD_ACTIVE,
  91        HP_THREAD_PARKED,
  92};
  93
  94/**
  95 * smpboot_thread_fn - percpu hotplug thread loop function
  96 * @data:       thread data pointer
  97 *
  98 * Checks for thread stop and park conditions. Calls the necessary
  99 * setup, cleanup, park and unpark functions for the registered
 100 * thread.
 101 *
 102 * Returns 1 when the thread should exit, 0 otherwise.
 103 */
 104static int smpboot_thread_fn(void *data)
 105{
 106        struct smpboot_thread_data *td = data;
 107        struct smp_hotplug_thread *ht = td->ht;
 108
 109        while (1) {
 110                set_current_state(TASK_INTERRUPTIBLE);
 111                preempt_disable();
 112                if (kthread_should_stop()) {
 113                        set_current_state(TASK_RUNNING);
 114                        preempt_enable();
 115                        if (ht->cleanup)
 116                                ht->cleanup(td->cpu, cpu_online(td->cpu));
 117                        kfree(td);
 118                        return 0;
 119                }
 120
 121                if (kthread_should_park()) {
 122                        __set_current_state(TASK_RUNNING);
 123                        preempt_enable();
 124                        if (ht->park && td->status == HP_THREAD_ACTIVE) {
 125                                BUG_ON(td->cpu != smp_processor_id());
 126                                ht->park(td->cpu);
 127                                td->status = HP_THREAD_PARKED;
 128                        }
 129                        kthread_parkme();
 130                        /* We might have been woken for stop */
 131                        continue;
 132                }
 133
 134                BUG_ON(td->cpu != smp_processor_id());
 135
 136                /* Check for state change setup */
 137                switch (td->status) {
 138                case HP_THREAD_NONE:
 139                        preempt_enable();
 140                        if (ht->setup)
 141                                ht->setup(td->cpu);
 142                        td->status = HP_THREAD_ACTIVE;
 143                        preempt_disable();
 144                        break;
 145                case HP_THREAD_PARKED:
 146                        preempt_enable();
 147                        if (ht->unpark)
 148                                ht->unpark(td->cpu);
 149                        td->status = HP_THREAD_ACTIVE;
 150                        preempt_disable();
 151                        break;
 152                }
 153
 154                if (!ht->thread_should_run(td->cpu)) {
 155                        preempt_enable();
 156                        schedule();
 157                } else {
 158                        set_current_state(TASK_RUNNING);
 159                        preempt_enable();
 160                        ht->thread_fn(td->cpu);
 161                }
 162        }
 163}
 164
 165static int
 166__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 167{
 168        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 169        struct smpboot_thread_data *td;
 170
 171        if (tsk)
 172                return 0;
 173
 174        td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
 175        if (!td)
 176                return -ENOMEM;
 177        td->cpu = cpu;
 178        td->ht = ht;
 179
 180        tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
 181                                    ht->thread_comm);
 182        if (IS_ERR(tsk)) {
 183                kfree(td);
 184                return PTR_ERR(tsk);
 185        }
 186
 187        get_task_struct(tsk);
 188        *per_cpu_ptr(ht->store, cpu) = tsk;
 189        return 0;
 190}
 191
 192int smpboot_create_threads(unsigned int cpu)
 193{
 194        struct smp_hotplug_thread *cur;
 195        int ret = 0;
 196
 197        mutex_lock(&smpboot_threads_lock);
 198        list_for_each_entry(cur, &hotplug_threads, list) {
 199                ret = __smpboot_create_thread(cur, cpu);
 200                if (ret)
 201                        break;
 202        }
 203        mutex_unlock(&smpboot_threads_lock);
 204        return ret;
 205}
 206
 207static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 208{
 209        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 210
 211        kthread_unpark(tsk);
 212}
 213
 214void smpboot_unpark_threads(unsigned int cpu)
 215{
 216        struct smp_hotplug_thread *cur;
 217
 218        mutex_lock(&smpboot_threads_lock);
 219        list_for_each_entry(cur, &hotplug_threads, list)
 220                smpboot_unpark_thread(cur, cpu);
 221        mutex_unlock(&smpboot_threads_lock);
 222}
 223
 224static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
 225{
 226        struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 227
 228        if (tsk)
 229                kthread_park(tsk);
 230}
 231
 232void smpboot_park_threads(unsigned int cpu)
 233{
 234        struct smp_hotplug_thread *cur;
 235
 236        mutex_lock(&smpboot_threads_lock);
 237        list_for_each_entry_reverse(cur, &hotplug_threads, list)
 238                smpboot_park_thread(cur, cpu);
 239        mutex_unlock(&smpboot_threads_lock);
 240}
 241
 242static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
 243{
 244        unsigned int cpu;
 245
 246        /* We need to destroy also the parked threads of offline cpus */
 247        for_each_possible_cpu(cpu) {
 248                struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
 249
 250                if (tsk) {
 251                        kthread_stop(tsk);
 252                        put_task_struct(tsk);
 253                        *per_cpu_ptr(ht->store, cpu) = NULL;
 254                }
 255        }
 256}
 257
 258/**
 259 * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug
 260 * @plug_thread:        Hotplug thread descriptor
 261 *
 262 * Creates and starts the threads on all online cpus.
 263 */
 264int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
 265{
 266        unsigned int cpu;
 267        int ret = 0;
 268
 269        mutex_lock(&smpboot_threads_lock);
 270        for_each_online_cpu(cpu) {
 271                ret = __smpboot_create_thread(plug_thread, cpu);
 272                if (ret) {
 273                        smpboot_destroy_threads(plug_thread);
 274                        goto out;
 275                }
 276                smpboot_unpark_thread(plug_thread, cpu);
 277        }
 278        list_add(&plug_thread->list, &hotplug_threads);
 279out:
 280        mutex_unlock(&smpboot_threads_lock);
 281        return ret;
 282}
 283EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
 284
 285/**
 286 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
 287 * @plug_thread:        Hotplug thread descriptor
 288 *
 289 * Stops all threads on all possible cpus.
 290 */
 291void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
 292{
 293        get_online_cpus();
 294        mutex_lock(&smpboot_threads_lock);
 295        list_del(&plug_thread->list);
 296        smpboot_destroy_threads(plug_thread);
 297        mutex_unlock(&smpboot_threads_lock);
 298        put_online_cpus();
 299}
 300EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
 301
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.