linux/kernel/stop_machine.c
<<
>>
Prefs
   1/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
   2 * GPL v2 and any later version.
   3 */
   4#include <linux/cpu.h>
   5#include <linux/err.h>
   6#include <linux/kthread.h>
   7#include <linux/module.h>
   8#include <linux/sched.h>
   9#include <linux/stop_machine.h>
  10#include <linux/syscalls.h>
  11#include <linux/interrupt.h>
  12
  13#include <asm/atomic.h>
  14#include <asm/uaccess.h>
  15
  16/* This controls the threads on each CPU. */
  17enum stopmachine_state {
  18        /* Dummy starting state for thread. */
  19        STOPMACHINE_NONE,
  20        /* Awaiting everyone to be scheduled. */
  21        STOPMACHINE_PREPARE,
  22        /* Disable interrupts. */
  23        STOPMACHINE_DISABLE_IRQ,
  24        /* Run the function */
  25        STOPMACHINE_RUN,
  26        /* Exit */
  27        STOPMACHINE_EXIT,
  28};
  29static enum stopmachine_state state;
  30
  31struct stop_machine_data {
  32        int (*fn)(void *);
  33        void *data;
  34        int fnret;
  35};
  36
  37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
  38static unsigned int num_threads;
  39static atomic_t thread_ack;
  40static DEFINE_MUTEX(lock);
  41/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
  42static DEFINE_MUTEX(setup_lock);
  43/* Users of stop_machine. */
  44static int refcount;
  45static struct workqueue_struct *stop_machine_wq;
  46static struct stop_machine_data active, idle;
  47static const struct cpumask *active_cpus;
  48static void *stop_machine_work;
  49
  50static void set_state(enum stopmachine_state newstate)
  51{
  52        /* Reset ack counter. */
  53        atomic_set(&thread_ack, num_threads);
  54        smp_wmb();
  55        state = newstate;
  56}
  57
  58/* Last one to ack a state moves to the next state. */
  59static void ack_state(void)
  60{
  61        if (atomic_dec_and_test(&thread_ack))
  62                set_state(state + 1);
  63}
  64
  65/* This is the actual function which stops the CPU. It runs
  66 * in the context of a dedicated stopmachine workqueue. */
  67static void stop_cpu(struct work_struct *unused)
  68{
  69        enum stopmachine_state curstate = STOPMACHINE_NONE;
  70        struct stop_machine_data *smdata = &idle;
  71        int cpu = smp_processor_id();
  72        int err;
  73
  74        if (!active_cpus) {
  75                if (cpu == cpumask_first(cpu_online_mask))
  76                        smdata = &active;
  77        } else {
  78                if (cpumask_test_cpu(cpu, active_cpus))
  79                        smdata = &active;
  80        }
  81        /* Simple state machine */
  82        do {
  83                /* Chill out and ensure we re-read stopmachine_state. */
  84                cpu_relax();
  85                if (state != curstate) {
  86                        curstate = state;
  87                        switch (curstate) {
  88                        case STOPMACHINE_DISABLE_IRQ:
  89                                local_irq_disable();
  90                                hard_irq_disable();
  91                                break;
  92                        case STOPMACHINE_RUN:
  93                                /* On multiple CPUs only a single error code
  94                                 * is needed to tell that something failed. */
  95                                err = smdata->fn(smdata->data);
  96                                if (err)
  97                                        smdata->fnret = err;
  98                                break;
  99                        default:
 100                                break;
 101                        }
 102                        ack_state();
 103                }
 104        } while (curstate != STOPMACHINE_EXIT);
 105
 106        local_irq_enable();
 107}
 108
 109/* Callback for CPUs which aren't supposed to do anything. */
 110static int chill(void *unused)
 111{
 112        return 0;
 113}
 114
 115int stop_machine_create(void)
 116{
 117        mutex_lock(&setup_lock);
 118        if (refcount)
 119                goto done;
 120        stop_machine_wq = create_rt_workqueue("kstop");
 121        if (!stop_machine_wq)
 122                goto err_out;
 123        stop_machine_work = alloc_percpu(struct work_struct);
 124        if (!stop_machine_work)
 125                goto err_out;
 126done:
 127        refcount++;
 128        mutex_unlock(&setup_lock);
 129        return 0;
 130
 131err_out:
 132        if (stop_machine_wq)
 133                destroy_workqueue(stop_machine_wq);
 134        mutex_unlock(&setup_lock);
 135        return -ENOMEM;
 136}
 137EXPORT_SYMBOL_GPL(stop_machine_create);
 138
 139void stop_machine_destroy(void)
 140{
 141        mutex_lock(&setup_lock);
 142        refcount--;
 143        if (refcount)
 144                goto done;
 145        destroy_workqueue(stop_machine_wq);
 146        free_percpu(stop_machine_work);
 147done:
 148        mutex_unlock(&setup_lock);
 149}
 150EXPORT_SYMBOL_GPL(stop_machine_destroy);
 151
 152int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
 153{
 154        struct work_struct *sm_work;
 155        int i, ret;
 156
 157        /* Set up initial state. */
 158        mutex_lock(&lock);
 159        num_threads = num_online_cpus();
 160        active_cpus = cpus;
 161        active.fn = fn;
 162        active.data = data;
 163        active.fnret = 0;
 164        idle.fn = chill;
 165        idle.data = NULL;
 166
 167        set_state(STOPMACHINE_PREPARE);
 168
 169        /* Schedule the stop_cpu work on all cpus: hold this CPU so one
 170         * doesn't hit this CPU until we're ready. */
 171        get_cpu();
 172        for_each_online_cpu(i) {
 173                sm_work = per_cpu_ptr(stop_machine_work, i);
 174                INIT_WORK(sm_work, stop_cpu);
 175                queue_work_on(i, stop_machine_wq, sm_work);
 176        }
 177        /* This will release the thread on our CPU. */
 178        put_cpu();
 179        flush_workqueue(stop_machine_wq);
 180        ret = active.fnret;
 181        mutex_unlock(&lock);
 182        return ret;
 183}
 184
 185int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
 186{
 187        int ret;
 188
 189        ret = stop_machine_create();
 190        if (ret)
 191                return ret;
 192        /* No CPUs can come up or down during this. */
 193        get_online_cpus();
 194        ret = __stop_machine(fn, data, cpus);
 195        put_online_cpus();
 196        stop_machine_destroy();
 197        return ret;
 198}
 199EXPORT_SYMBOL_GPL(stop_machine);
 200
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.