linux/drivers/cpuidle/cpuidle.c
<<
>>
Prefs
   1/*
   2 * cpuidle.c - core cpuidle infrastructure
   3 *
   4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
   5 *               Shaohua Li <shaohua.li@intel.com>
   6 *               Adam Belay <abelay@novell.com>
   7 *
   8 * This code is licenced under the GPL.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/mutex.h>
  13#include <linux/sched.h>
  14#include <linux/notifier.h>
  15#include <linux/pm_qos_params.h>
  16#include <linux/cpu.h>
  17#include <linux/cpuidle.h>
  18#include <linux/ktime.h>
  19#include <linux/hrtimer.h>
  20
  21#include "cpuidle.h"
  22
  23DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
  24
  25DEFINE_MUTEX(cpuidle_lock);
  26LIST_HEAD(cpuidle_detected_devices);
  27static void (*pm_idle_old)(void);
  28
  29static int enabled_devices;
  30
  31#if defined(CONFIG_ARCH_HAS_CPU_IDLE_WAIT)
  32static void cpuidle_kick_cpus(void)
  33{
  34        cpu_idle_wait();
  35}
  36#elif defined(CONFIG_SMP)
  37# error "Arch needs cpu_idle_wait() equivalent here"
  38#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT && !CONFIG_SMP */
  39static void cpuidle_kick_cpus(void) {}
  40#endif
  41
  42static int __cpuidle_register_device(struct cpuidle_device *dev);
  43
  44/**
  45 * cpuidle_idle_call - the main idle loop
  46 *
  47 * NOTE: no locks or semaphores should be used here
  48 */
  49static void cpuidle_idle_call(void)
  50{
  51        struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
  52        struct cpuidle_state *target_state;
  53        int next_state;
  54
  55        /* check if the device is ready */
  56        if (!dev || !dev->enabled) {
  57                if (pm_idle_old)
  58                        pm_idle_old();
  59                else
  60#if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE)
  61                        default_idle();
  62#else
  63                        local_irq_enable();
  64#endif
  65                return;
  66        }
  67
  68#if 0
  69        /* shows regressions, re-enable for 2.6.29 */
  70        /*
  71         * run any timers that can be run now, at this point
  72         * before calculating the idle duration etc.
  73         */
  74        hrtimer_peek_ahead_timers();
  75#endif
  76        /* ask the governor for the next state */
  77        next_state = cpuidle_curr_governor->select(dev);
  78        if (need_resched())
  79                return;
  80        target_state = &dev->states[next_state];
  81
  82        /* enter the state and update stats */
  83        dev->last_state = target_state;
  84        dev->last_residency = target_state->enter(dev, target_state);
  85        if (dev->last_state)
  86                target_state = dev->last_state;
  87
  88        target_state->time += (unsigned long long)dev->last_residency;
  89        target_state->usage++;
  90
  91        /* give the governor an opportunity to reflect on the outcome */
  92        if (cpuidle_curr_governor->reflect)
  93                cpuidle_curr_governor->reflect(dev);
  94}
  95
  96/**
  97 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
  98 */
  99void cpuidle_install_idle_handler(void)
 100{
 101        if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
 102                /* Make sure all changes finished before we switch to new idle */
 103                smp_wmb();
 104                pm_idle = cpuidle_idle_call;
 105        }
 106}
 107
 108/**
 109 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 110 */
 111void cpuidle_uninstall_idle_handler(void)
 112{
 113        if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) {
 114                pm_idle = pm_idle_old;
 115                cpuidle_kick_cpus();
 116        }
 117}
 118
 119/**
 120 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 121 */
 122void cpuidle_pause_and_lock(void)
 123{
 124        mutex_lock(&cpuidle_lock);
 125        cpuidle_uninstall_idle_handler();
 126}
 127
 128EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
 129
 130/**
 131 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 132 */
 133void cpuidle_resume_and_unlock(void)
 134{
 135        cpuidle_install_idle_handler();
 136        mutex_unlock(&cpuidle_lock);
 137}
 138
 139EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 140
 141/**
 142 * cpuidle_enable_device - enables idle PM for a CPU
 143 * @dev: the CPU
 144 *
 145 * This function must be called between cpuidle_pause_and_lock and
 146 * cpuidle_resume_and_unlock when used externally.
 147 */
 148int cpuidle_enable_device(struct cpuidle_device *dev)
 149{
 150        int ret, i;
 151
 152        if (dev->enabled)
 153                return 0;
 154        if (!cpuidle_curr_driver || !cpuidle_curr_governor)
 155                return -EIO;
 156        if (!dev->state_count)
 157                return -EINVAL;
 158
 159        if (dev->registered == 0) {
 160                ret = __cpuidle_register_device(dev);
 161                if (ret)
 162                        return ret;
 163        }
 164
 165        if ((ret = cpuidle_add_state_sysfs(dev)))
 166                return ret;
 167
 168        if (cpuidle_curr_governor->enable &&
 169            (ret = cpuidle_curr_governor->enable(dev)))
 170                goto fail_sysfs;
 171
 172        for (i = 0; i < dev->state_count; i++) {
 173                dev->states[i].usage = 0;
 174                dev->states[i].time = 0;
 175        }
 176        dev->last_residency = 0;
 177        dev->last_state = NULL;
 178
 179        smp_wmb();
 180
 181        dev->enabled = 1;
 182
 183        enabled_devices++;
 184        return 0;
 185
 186fail_sysfs:
 187        cpuidle_remove_state_sysfs(dev);
 188
 189        return ret;
 190}
 191
 192EXPORT_SYMBOL_GPL(cpuidle_enable_device);
 193
 194/**
 195 * cpuidle_disable_device - disables idle PM for a CPU
 196 * @dev: the CPU
 197 *
 198 * This function must be called between cpuidle_pause_and_lock and
 199 * cpuidle_resume_and_unlock when used externally.
 200 */
 201void cpuidle_disable_device(struct cpuidle_device *dev)
 202{
 203        if (!dev->enabled)
 204                return;
 205        if (!cpuidle_curr_driver || !cpuidle_curr_governor)
 206                return;
 207
 208        dev->enabled = 0;
 209
 210        if (cpuidle_curr_governor->disable)
 211                cpuidle_curr_governor->disable(dev);
 212
 213        cpuidle_remove_state_sysfs(dev);
 214        enabled_devices--;
 215}
 216
 217EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 218
 219#ifdef CONFIG_ARCH_HAS_CPU_RELAX
 220static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
 221{
 222        ktime_t t1, t2;
 223        s64 diff;
 224        int ret;
 225
 226        t1 = ktime_get();
 227        local_irq_enable();
 228        while (!need_resched())
 229                cpu_relax();
 230
 231        t2 = ktime_get();
 232        diff = ktime_to_us(ktime_sub(t2, t1));
 233        if (diff > INT_MAX)
 234                diff = INT_MAX;
 235
 236        ret = (int) diff;
 237        return ret;
 238}
 239
 240static void poll_idle_init(struct cpuidle_device *dev)
 241{
 242        struct cpuidle_state *state = &dev->states[0];
 243
 244        cpuidle_set_statedata(state, NULL);
 245
 246        snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
 247        snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
 248        state->exit_latency = 0;
 249        state->target_residency = 0;
 250        state->power_usage = -1;
 251        state->flags = CPUIDLE_FLAG_POLL;
 252        state->enter = poll_idle;
 253}
 254#else
 255static void poll_idle_init(struct cpuidle_device *dev) {}
 256#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
 257
 258/**
 259 * __cpuidle_register_device - internal register function called before register
 260 * and enable routines
 261 * @dev: the cpu
 262 *
 263 * cpuidle_lock mutex must be held before this is called
 264 */
 265static int __cpuidle_register_device(struct cpuidle_device *dev)
 266{
 267        int ret;
 268        struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
 269
 270        if (!sys_dev)
 271                return -EINVAL;
 272        if (!try_module_get(cpuidle_curr_driver->owner))
 273                return -EINVAL;
 274
 275        init_completion(&dev->kobj_unregister);
 276
 277        poll_idle_init(dev);
 278
 279        per_cpu(cpuidle_devices, dev->cpu) = dev;
 280        list_add(&dev->device_list, &cpuidle_detected_devices);
 281        if ((ret = cpuidle_add_sysfs(sys_dev))) {
 282                module_put(cpuidle_curr_driver->owner);
 283                return ret;
 284        }
 285
 286        dev->registered = 1;
 287        return 0;
 288}
 289
 290/**
 291 * cpuidle_register_device - registers a CPU's idle PM feature
 292 * @dev: the cpu
 293 */
 294int cpuidle_register_device(struct cpuidle_device *dev)
 295{
 296        int ret;
 297
 298        mutex_lock(&cpuidle_lock);
 299
 300        if ((ret = __cpuidle_register_device(dev))) {
 301                mutex_unlock(&cpuidle_lock);
 302                return ret;
 303        }
 304
 305        cpuidle_enable_device(dev);
 306        cpuidle_install_idle_handler();
 307
 308        mutex_unlock(&cpuidle_lock);
 309
 310        return 0;
 311
 312}
 313
 314EXPORT_SYMBOL_GPL(cpuidle_register_device);
 315
 316/**
 317 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 318 * @dev: the cpu
 319 */
 320void cpuidle_unregister_device(struct cpuidle_device *dev)
 321{
 322        struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
 323
 324        if (dev->registered == 0)
 325                return;
 326
 327        cpuidle_pause_and_lock();
 328
 329        cpuidle_disable_device(dev);
 330
 331        cpuidle_remove_sysfs(sys_dev);
 332        list_del(&dev->device_list);
 333        wait_for_completion(&dev->kobj_unregister);
 334        per_cpu(cpuidle_devices, dev->cpu) = NULL;
 335
 336        cpuidle_resume_and_unlock();
 337
 338        module_put(cpuidle_curr_driver->owner);
 339}
 340
 341EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
 342
 343#ifdef CONFIG_SMP
 344
 345static void smp_callback(void *v)
 346{
 347        /* we already woke the CPU up, nothing more to do */
 348}
 349
 350/*
 351 * This function gets called when a part of the kernel has a new latency
 352 * requirement.  This means we need to get all processors out of their C-state,
 353 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 354 * wakes them all right up.
 355 */
 356static int cpuidle_latency_notify(struct notifier_block *b,
 357                unsigned long l, void *v)
 358{
 359        smp_call_function(smp_callback, NULL, 1);
 360        return NOTIFY_OK;
 361}
 362
 363static struct notifier_block cpuidle_latency_notifier = {
 364        .notifier_call = cpuidle_latency_notify,
 365};
 366
 367static inline void latency_notifier_init(struct notifier_block *n)
 368{
 369        pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
 370}
 371
 372#else /* CONFIG_SMP */
 373
 374#define latency_notifier_init(x) do { } while (0)
 375
 376#endif /* CONFIG_SMP */
 377
 378/**
 379 * cpuidle_init - core initializer
 380 */
 381static int __init cpuidle_init(void)
 382{
 383        int ret;
 384
 385        pm_idle_old = pm_idle;
 386
 387        ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
 388        if (ret)
 389                return ret;
 390
 391        latency_notifier_init(&cpuidle_latency_notifier);
 392
 393        return 0;
 394}
 395
 396core_initcall(cpuidle_init);
 397