linux/drivers/cpuidle/cpuidle.c
<<
>>
Prefs
   1/*
   2 * cpuidle.c - core cpuidle infrastructure
   3 *
   4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
   5 *               Shaohua Li <shaohua.li@intel.com>
   6 *               Adam Belay <abelay@novell.com>
   7 *
   8 * This code is licenced under the GPL.
   9 */
  10
  11#include <linux/kernel.h>
  12#include <linux/mutex.h>
  13#include <linux/sched.h>
  14#include <linux/notifier.h>
  15#include <linux/pm_qos.h>
  16#include <linux/cpu.h>
  17#include <linux/cpuidle.h>
  18#include <linux/ktime.h>
  19#include <linux/hrtimer.h>
  20#include <linux/module.h>
  21#include <trace/events/power.h>
  22
  23#include "cpuidle.h"
  24
  25DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
  26
  27DEFINE_MUTEX(cpuidle_lock);
  28LIST_HEAD(cpuidle_detected_devices);
  29
  30static int enabled_devices;
  31static int off __read_mostly;
  32static int initialized __read_mostly;
  33
  34int cpuidle_disabled(void)
  35{
  36        return off;
  37}
  38void disable_cpuidle(void)
  39{
  40        off = 1;
  41}
  42
  43static int __cpuidle_register_device(struct cpuidle_device *dev);
  44
  45static inline int cpuidle_enter(struct cpuidle_device *dev,
  46                                struct cpuidle_driver *drv, int index)
  47{
  48        struct cpuidle_state *target_state = &drv->states[index];
  49        return target_state->enter(dev, drv, index);
  50}
  51
  52static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
  53                               struct cpuidle_driver *drv, int index)
  54{
  55        return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
  56}
  57
  58typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
  59                               struct cpuidle_driver *drv, int index);
  60
  61static cpuidle_enter_t cpuidle_enter_ops;
  62
  63/**
  64 * cpuidle_play_dead - cpu off-lining
  65 *
  66 * Returns in case of an error or no driver
  67 */
  68int cpuidle_play_dead(void)
  69{
  70        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
  71        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
  72        int i;
  73
  74        if (!drv)
  75                return -ENODEV;
  76
  77        /* Find lowest-power state that supports long-term idle */
  78        for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
  79                if (drv->states[i].enter_dead)
  80                        return drv->states[i].enter_dead(dev, i);
  81
  82        return -ENODEV;
  83}
  84
  85/**
  86 * cpuidle_enter_state - enter the state and update stats
  87 * @dev: cpuidle device for this cpu
  88 * @drv: cpuidle driver for this cpu
  89 * @next_state: index into drv->states of the state to enter
  90 */
  91int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
  92                int next_state)
  93{
  94        int entered_state;
  95
  96        entered_state = cpuidle_enter_ops(dev, drv, next_state);
  97
  98        if (entered_state >= 0) {
  99                /* Update cpuidle counters */
 100                /* This can be moved to within driver enter routine
 101                 * but that results in multiple copies of same code.
 102                 */
 103                dev->states_usage[entered_state].time += dev->last_residency;
 104                dev->states_usage[entered_state].usage++;
 105        } else {
 106                dev->last_residency = 0;
 107        }
 108
 109        return entered_state;
 110}
 111
 112/**
 113 * cpuidle_idle_call - the main idle loop
 114 *
 115 * NOTE: no locks or semaphores should be used here
 116 * return non-zero on failure
 117 */
 118int cpuidle_idle_call(void)
 119{
 120        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
 121        struct cpuidle_driver *drv;
 122        int next_state, entered_state;
 123
 124        if (off)
 125                return -ENODEV;
 126
 127        if (!initialized)
 128                return -ENODEV;
 129
 130        /* check if the device is ready */
 131        if (!dev || !dev->enabled)
 132                return -EBUSY;
 133
 134        drv = cpuidle_get_cpu_driver(dev);
 135
 136        /* ask the governor for the next state */
 137        next_state = cpuidle_curr_governor->select(drv, dev);
 138        if (need_resched()) {
 139                dev->last_residency = 0;
 140                /* give the governor an opportunity to reflect on the outcome */
 141                if (cpuidle_curr_governor->reflect)
 142                        cpuidle_curr_governor->reflect(dev, next_state);
 143                local_irq_enable();
 144                return 0;
 145        }
 146
 147        trace_cpu_idle_rcuidle(next_state, dev->cpu);
 148
 149        if (cpuidle_state_is_coupled(dev, drv, next_state))
 150                entered_state = cpuidle_enter_state_coupled(dev, drv,
 151                                                            next_state);
 152        else
 153                entered_state = cpuidle_enter_state(dev, drv, next_state);
 154
 155        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 156
 157        /* give the governor an opportunity to reflect on the outcome */
 158        if (cpuidle_curr_governor->reflect)
 159                cpuidle_curr_governor->reflect(dev, entered_state);
 160
 161        return 0;
 162}
 163
 164/**
 165 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
 166 */
 167void cpuidle_install_idle_handler(void)
 168{
 169        if (enabled_devices) {
 170                /* Make sure all changes finished before we switch to new idle */
 171                smp_wmb();
 172                initialized = 1;
 173        }
 174}
 175
 176/**
 177 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 178 */
 179void cpuidle_uninstall_idle_handler(void)
 180{
 181        if (enabled_devices) {
 182                initialized = 0;
 183                kick_all_cpus_sync();
 184        }
 185}
 186
 187/**
 188 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 189 */
 190void cpuidle_pause_and_lock(void)
 191{
 192        mutex_lock(&cpuidle_lock);
 193        cpuidle_uninstall_idle_handler();
 194}
 195
 196EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
 197
 198/**
 199 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 200 */
 201void cpuidle_resume_and_unlock(void)
 202{
 203        cpuidle_install_idle_handler();
 204        mutex_unlock(&cpuidle_lock);
 205}
 206
 207EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 208
 209/* Currently used in suspend/resume path to suspend cpuidle */
 210void cpuidle_pause(void)
 211{
 212        mutex_lock(&cpuidle_lock);
 213        cpuidle_uninstall_idle_handler();
 214        mutex_unlock(&cpuidle_lock);
 215}
 216
 217/* Currently used in suspend/resume path to resume cpuidle */
 218void cpuidle_resume(void)
 219{
 220        mutex_lock(&cpuidle_lock);
 221        cpuidle_install_idle_handler();
 222        mutex_unlock(&cpuidle_lock);
 223}
 224
 225/**
 226 * cpuidle_wrap_enter - performs timekeeping and irqen around enter function
 227 * @dev: pointer to a valid cpuidle_device object
 228 * @drv: pointer to a valid cpuidle_driver object
 229 * @index: index of the target cpuidle state.
 230 */
 231int cpuidle_wrap_enter(struct cpuidle_device *dev,
 232                                struct cpuidle_driver *drv, int index,
 233                                int (*enter)(struct cpuidle_device *dev,
 234                                        struct cpuidle_driver *drv, int index))
 235{
 236        ktime_t time_start, time_end;
 237        s64 diff;
 238
 239        time_start = ktime_get();
 240
 241        index = enter(dev, drv, index);
 242
 243        time_end = ktime_get();
 244
 245        local_irq_enable();
 246
 247        diff = ktime_to_us(ktime_sub(time_end, time_start));
 248        if (diff > INT_MAX)
 249                diff = INT_MAX;
 250
 251        dev->last_residency = (int) diff;
 252
 253        return index;
 254}
 255
 256#ifdef CONFIG_ARCH_HAS_CPU_RELAX
 257static int poll_idle(struct cpuidle_device *dev,
 258                struct cpuidle_driver *drv, int index)
 259{
 260        ktime_t t1, t2;
 261        s64 diff;
 262
 263        t1 = ktime_get();
 264        local_irq_enable();
 265        while (!need_resched())
 266                cpu_relax();
 267
 268        t2 = ktime_get();
 269        diff = ktime_to_us(ktime_sub(t2, t1));
 270        if (diff > INT_MAX)
 271                diff = INT_MAX;
 272
 273        dev->last_residency = (int) diff;
 274
 275        return index;
 276}
 277
 278static void poll_idle_init(struct cpuidle_driver *drv)
 279{
 280        struct cpuidle_state *state = &drv->states[0];
 281
 282        snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
 283        snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
 284        state->exit_latency = 0;
 285        state->target_residency = 0;
 286        state->power_usage = -1;
 287        state->flags = 0;
 288        state->enter = poll_idle;
 289        state->disabled = false;
 290}
 291#else
 292static void poll_idle_init(struct cpuidle_driver *drv) {}
 293#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
 294
 295/**
 296 * cpuidle_enable_device - enables idle PM for a CPU
 297 * @dev: the CPU
 298 *
 299 * This function must be called between cpuidle_pause_and_lock and
 300 * cpuidle_resume_and_unlock when used externally.
 301 */
 302int cpuidle_enable_device(struct cpuidle_device *dev)
 303{
 304        int ret, i;
 305        struct cpuidle_driver *drv;
 306
 307        if (!dev)
 308                return -EINVAL;
 309
 310        if (dev->enabled)
 311                return 0;
 312
 313        drv = cpuidle_get_cpu_driver(dev);
 314
 315        if (!drv || !cpuidle_curr_governor)
 316                return -EIO;
 317
 318        if (!dev->state_count)
 319                dev->state_count = drv->state_count;
 320
 321        if (dev->registered == 0) {
 322                ret = __cpuidle_register_device(dev);
 323                if (ret)
 324                        return ret;
 325        }
 326
 327        cpuidle_enter_ops = drv->en_core_tk_irqen ?
 328                cpuidle_enter_tk : cpuidle_enter;
 329
 330        poll_idle_init(drv);
 331
 332        ret = cpuidle_add_device_sysfs(dev);
 333        if (ret)
 334                return ret;
 335
 336        if (cpuidle_curr_governor->enable &&
 337            (ret = cpuidle_curr_governor->enable(drv, dev)))
 338                goto fail_sysfs;
 339
 340        for (i = 0; i < dev->state_count; i++) {
 341                dev->states_usage[i].usage = 0;
 342                dev->states_usage[i].time = 0;
 343        }
 344        dev->last_residency = 0;
 345
 346        smp_wmb();
 347
 348        dev->enabled = 1;
 349
 350        enabled_devices++;
 351        return 0;
 352
 353fail_sysfs:
 354        cpuidle_remove_device_sysfs(dev);
 355
 356        return ret;
 357}
 358
 359EXPORT_SYMBOL_GPL(cpuidle_enable_device);
 360
 361/**
 362 * cpuidle_disable_device - disables idle PM for a CPU
 363 * @dev: the CPU
 364 *
 365 * This function must be called between cpuidle_pause_and_lock and
 366 * cpuidle_resume_and_unlock when used externally.
 367 */
 368void cpuidle_disable_device(struct cpuidle_device *dev)
 369{
 370        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 371
 372        if (!dev || !dev->enabled)
 373                return;
 374
 375        if (!drv || !cpuidle_curr_governor)
 376                return;
 377
 378        dev->enabled = 0;
 379
 380        if (cpuidle_curr_governor->disable)
 381                cpuidle_curr_governor->disable(drv, dev);
 382
 383        cpuidle_remove_device_sysfs(dev);
 384        enabled_devices--;
 385}
 386
 387EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 388
 389/**
 390 * __cpuidle_register_device - internal register function called before register
 391 * and enable routines
 392 * @dev: the cpu
 393 *
 394 * cpuidle_lock mutex must be held before this is called
 395 */
 396static int __cpuidle_register_device(struct cpuidle_device *dev)
 397{
 398        int ret;
 399        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 400
 401        if (!try_module_get(drv->owner))
 402                return -EINVAL;
 403
 404        per_cpu(cpuidle_devices, dev->cpu) = dev;
 405        list_add(&dev->device_list, &cpuidle_detected_devices);
 406        ret = cpuidle_add_sysfs(dev);
 407        if (ret)
 408                goto err_sysfs;
 409
 410        ret = cpuidle_coupled_register_device(dev);
 411        if (ret)
 412                goto err_coupled;
 413
 414        dev->registered = 1;
 415        return 0;
 416
 417err_coupled:
 418        cpuidle_remove_sysfs(dev);
 419err_sysfs:
 420        list_del(&dev->device_list);
 421        per_cpu(cpuidle_devices, dev->cpu) = NULL;
 422        module_put(drv->owner);
 423        return ret;
 424}
 425
 426/**
 427 * cpuidle_register_device - registers a CPU's idle PM feature
 428 * @dev: the cpu
 429 */
 430int cpuidle_register_device(struct cpuidle_device *dev)
 431{
 432        int ret;
 433
 434        if (!dev)
 435                return -EINVAL;
 436
 437        mutex_lock(&cpuidle_lock);
 438
 439        if ((ret = __cpuidle_register_device(dev))) {
 440                mutex_unlock(&cpuidle_lock);
 441                return ret;
 442        }
 443
 444        cpuidle_enable_device(dev);
 445        cpuidle_install_idle_handler();
 446
 447        mutex_unlock(&cpuidle_lock);
 448
 449        return 0;
 450
 451}
 452
 453EXPORT_SYMBOL_GPL(cpuidle_register_device);
 454
 455/**
 456 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 457 * @dev: the cpu
 458 */
 459void cpuidle_unregister_device(struct cpuidle_device *dev)
 460{
 461        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 462
 463        if (dev->registered == 0)
 464                return;
 465
 466        cpuidle_pause_and_lock();
 467
 468        cpuidle_disable_device(dev);
 469
 470        cpuidle_remove_sysfs(dev);
 471        list_del(&dev->device_list);
 472        per_cpu(cpuidle_devices, dev->cpu) = NULL;
 473
 474        cpuidle_coupled_unregister_device(dev);
 475
 476        cpuidle_resume_and_unlock();
 477
 478        module_put(drv->owner);
 479}
 480
 481EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
 482
 483#ifdef CONFIG_SMP
 484
 485static void smp_callback(void *v)
 486{
 487        /* we already woke the CPU up, nothing more to do */
 488}
 489
 490/*
 491 * This function gets called when a part of the kernel has a new latency
 492 * requirement.  This means we need to get all processors out of their C-state,
 493 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 494 * wakes them all right up.
 495 */
 496static int cpuidle_latency_notify(struct notifier_block *b,
 497                unsigned long l, void *v)
 498{
 499        smp_call_function(smp_callback, NULL, 1);
 500        return NOTIFY_OK;
 501}
 502
 503static struct notifier_block cpuidle_latency_notifier = {
 504        .notifier_call = cpuidle_latency_notify,
 505};
 506
 507static inline void latency_notifier_init(struct notifier_block *n)
 508{
 509        pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
 510}
 511
 512#else /* CONFIG_SMP */
 513
 514#define latency_notifier_init(x) do { } while (0)
 515
 516#endif /* CONFIG_SMP */
 517
 518/**
 519 * cpuidle_init - core initializer
 520 */
 521static int __init cpuidle_init(void)
 522{
 523        int ret;
 524
 525        if (cpuidle_disabled())
 526                return -ENODEV;
 527
 528        ret = cpuidle_add_interface(cpu_subsys.dev_root);
 529        if (ret)
 530                return ret;
 531
 532        latency_notifier_init(&cpuidle_latency_notifier);
 533
 534        return 0;
 535}
 536
 537module_param(off, int, 0444);
 538core_initcall(cpuidle_init);
 539
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.