linux/drivers/cpuidle/cpuidle.c
<<
>>
Prefs
   1/*
   2 * cpuidle.c - core cpuidle infrastructure
   3 *
   4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
   5 *               Shaohua Li <shaohua.li@intel.com>
   6 *               Adam Belay <abelay@novell.com>
   7 *
   8 * This code is licenced under the GPL.
   9 */
  10
  11#include <linux/clockchips.h>
  12#include <linux/kernel.h>
  13#include <linux/mutex.h>
  14#include <linux/sched.h>
  15#include <linux/notifier.h>
  16#include <linux/pm_qos.h>
  17#include <linux/cpu.h>
  18#include <linux/cpuidle.h>
  19#include <linux/ktime.h>
  20#include <linux/hrtimer.h>
  21#include <linux/module.h>
  22#include <trace/events/power.h>
  23
  24#include "cpuidle.h"
  25
  26DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
  27DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
  28
  29DEFINE_MUTEX(cpuidle_lock);
  30LIST_HEAD(cpuidle_detected_devices);
  31
  32static int enabled_devices;
  33static int off __read_mostly;
  34static int initialized __read_mostly;
  35
  36int cpuidle_disabled(void)
  37{
  38        return off;
  39}
  40void disable_cpuidle(void)
  41{
  42        off = 1;
  43}
  44
  45/**
  46 * cpuidle_play_dead - cpu off-lining
  47 *
  48 * Returns in case of an error or no driver
  49 */
  50int cpuidle_play_dead(void)
  51{
  52        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
  53        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
  54        int i;
  55
  56        if (!drv)
  57                return -ENODEV;
  58
  59        /* Find lowest-power state that supports long-term idle */
  60        for (i = drv->state_count - 1; i >= CPUIDLE_DRIVER_STATE_START; i--)
  61                if (drv->states[i].enter_dead)
  62                        return drv->states[i].enter_dead(dev, i);
  63
  64        return -ENODEV;
  65}
  66
  67/**
  68 * cpuidle_enter_state - enter the state and update stats
  69 * @dev: cpuidle device for this cpu
  70 * @drv: cpuidle driver for this cpu
  71 * @next_state: index into drv->states of the state to enter
  72 */
  73int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
  74                        int index)
  75{
  76        int entered_state;
  77
  78        struct cpuidle_state *target_state = &drv->states[index];
  79        ktime_t time_start, time_end;
  80        s64 diff;
  81
  82        time_start = ktime_get();
  83
  84        entered_state = target_state->enter(dev, drv, index);
  85
  86        time_end = ktime_get();
  87
  88        local_irq_enable();
  89
  90        diff = ktime_to_us(ktime_sub(time_end, time_start));
  91        if (diff > INT_MAX)
  92                diff = INT_MAX;
  93
  94        dev->last_residency = (int) diff;
  95
  96        if (entered_state >= 0) {
  97                /* Update cpuidle counters */
  98                /* This can be moved to within driver enter routine
  99                 * but that results in multiple copies of same code.
 100                 */
 101                dev->states_usage[entered_state].time += dev->last_residency;
 102                dev->states_usage[entered_state].usage++;
 103        } else {
 104                dev->last_residency = 0;
 105        }
 106
 107        return entered_state;
 108}
 109
 110/**
 111 * cpuidle_idle_call - the main idle loop
 112 *
 113 * NOTE: no locks or semaphores should be used here
 114 * return non-zero on failure
 115 */
 116int cpuidle_idle_call(void)
 117{
 118        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
 119        struct cpuidle_driver *drv;
 120        int next_state, entered_state;
 121
 122        if (off)
 123                return -ENODEV;
 124
 125        if (!initialized)
 126                return -ENODEV;
 127
 128        /* check if the device is ready */
 129        if (!dev || !dev->enabled)
 130                return -EBUSY;
 131
 132        drv = cpuidle_get_cpu_driver(dev);
 133
 134        /* ask the governor for the next state */
 135        next_state = cpuidle_curr_governor->select(drv, dev);
 136        if (need_resched()) {
 137                dev->last_residency = 0;
 138                /* give the governor an opportunity to reflect on the outcome */
 139                if (cpuidle_curr_governor->reflect)
 140                        cpuidle_curr_governor->reflect(dev, next_state);
 141                local_irq_enable();
 142                return 0;
 143        }
 144
 145        trace_cpu_idle_rcuidle(next_state, dev->cpu);
 146
 147        if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
 148                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
 149                                   &dev->cpu);
 150
 151        if (cpuidle_state_is_coupled(dev, drv, next_state))
 152                entered_state = cpuidle_enter_state_coupled(dev, drv,
 153                                                            next_state);
 154        else
 155                entered_state = cpuidle_enter_state(dev, drv, next_state);
 156
 157        if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
 158                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
 159                                   &dev->cpu);
 160
 161        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 162
 163        /* give the governor an opportunity to reflect on the outcome */
 164        if (cpuidle_curr_governor->reflect)
 165                cpuidle_curr_governor->reflect(dev, entered_state);
 166
 167        return 0;
 168}
 169
 170/**
 171 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
 172 */
 173void cpuidle_install_idle_handler(void)
 174{
 175        if (enabled_devices) {
 176                /* Make sure all changes finished before we switch to new idle */
 177                smp_wmb();
 178                initialized = 1;
 179        }
 180}
 181
 182/**
 183 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
 184 */
 185void cpuidle_uninstall_idle_handler(void)
 186{
 187        if (enabled_devices) {
 188                initialized = 0;
 189                kick_all_cpus_sync();
 190        }
 191}
 192
 193/**
 194 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
 195 */
 196void cpuidle_pause_and_lock(void)
 197{
 198        mutex_lock(&cpuidle_lock);
 199        cpuidle_uninstall_idle_handler();
 200}
 201
 202EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
 203
 204/**
 205 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
 206 */
 207void cpuidle_resume_and_unlock(void)
 208{
 209        cpuidle_install_idle_handler();
 210        mutex_unlock(&cpuidle_lock);
 211}
 212
 213EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 214
 215/* Currently used in suspend/resume path to suspend cpuidle */
 216void cpuidle_pause(void)
 217{
 218        mutex_lock(&cpuidle_lock);
 219        cpuidle_uninstall_idle_handler();
 220        mutex_unlock(&cpuidle_lock);
 221}
 222
 223/* Currently used in suspend/resume path to resume cpuidle */
 224void cpuidle_resume(void)
 225{
 226        mutex_lock(&cpuidle_lock);
 227        cpuidle_install_idle_handler();
 228        mutex_unlock(&cpuidle_lock);
 229}
 230
 231#ifdef CONFIG_ARCH_HAS_CPU_RELAX
 232static int poll_idle(struct cpuidle_device *dev,
 233                struct cpuidle_driver *drv, int index)
 234{
 235        ktime_t t1, t2;
 236        s64 diff;
 237
 238        t1 = ktime_get();
 239        local_irq_enable();
 240        while (!need_resched())
 241                cpu_relax();
 242
 243        t2 = ktime_get();
 244        diff = ktime_to_us(ktime_sub(t2, t1));
 245        if (diff > INT_MAX)
 246                diff = INT_MAX;
 247
 248        dev->last_residency = (int) diff;
 249
 250        return index;
 251}
 252
 253static void poll_idle_init(struct cpuidle_driver *drv)
 254{
 255        struct cpuidle_state *state = &drv->states[0];
 256
 257        snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
 258        snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
 259        state->exit_latency = 0;
 260        state->target_residency = 0;
 261        state->power_usage = -1;
 262        state->flags = 0;
 263        state->enter = poll_idle;
 264        state->disabled = false;
 265}
 266#else
 267static void poll_idle_init(struct cpuidle_driver *drv) {}
 268#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
 269
 270/**
 271 * cpuidle_enable_device - enables idle PM for a CPU
 272 * @dev: the CPU
 273 *
 274 * This function must be called between cpuidle_pause_and_lock and
 275 * cpuidle_resume_and_unlock when used externally.
 276 */
 277int cpuidle_enable_device(struct cpuidle_device *dev)
 278{
 279        int ret;
 280        struct cpuidle_driver *drv;
 281
 282        if (!dev)
 283                return -EINVAL;
 284
 285        if (dev->enabled)
 286                return 0;
 287
 288        drv = cpuidle_get_cpu_driver(dev);
 289
 290        if (!drv || !cpuidle_curr_governor)
 291                return -EIO;
 292
 293        if (!dev->registered)
 294                return -EINVAL;
 295
 296        if (!dev->state_count)
 297                dev->state_count = drv->state_count;
 298
 299        poll_idle_init(drv);
 300
 301        ret = cpuidle_add_device_sysfs(dev);
 302        if (ret)
 303                return ret;
 304
 305        if (cpuidle_curr_governor->enable &&
 306            (ret = cpuidle_curr_governor->enable(drv, dev)))
 307                goto fail_sysfs;
 308
 309        smp_wmb();
 310
 311        dev->enabled = 1;
 312
 313        enabled_devices++;
 314        return 0;
 315
 316fail_sysfs:
 317        cpuidle_remove_device_sysfs(dev);
 318
 319        return ret;
 320}
 321
 322EXPORT_SYMBOL_GPL(cpuidle_enable_device);
 323
 324/**
 325 * cpuidle_disable_device - disables idle PM for a CPU
 326 * @dev: the CPU
 327 *
 328 * This function must be called between cpuidle_pause_and_lock and
 329 * cpuidle_resume_and_unlock when used externally.
 330 */
 331void cpuidle_disable_device(struct cpuidle_device *dev)
 332{
 333        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 334
 335        if (!dev || !dev->enabled)
 336                return;
 337
 338        if (!drv || !cpuidle_curr_governor)
 339                return;
 340
 341        dev->enabled = 0;
 342
 343        if (cpuidle_curr_governor->disable)
 344                cpuidle_curr_governor->disable(drv, dev);
 345
 346        cpuidle_remove_device_sysfs(dev);
 347        enabled_devices--;
 348}
 349
 350EXPORT_SYMBOL_GPL(cpuidle_disable_device);
 351
 352static void __cpuidle_unregister_device(struct cpuidle_device *dev)
 353{
 354        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 355
 356        list_del(&dev->device_list);
 357        per_cpu(cpuidle_devices, dev->cpu) = NULL;
 358        module_put(drv->owner);
 359}
 360
 361static int __cpuidle_device_init(struct cpuidle_device *dev)
 362{
 363        memset(dev->states_usage, 0, sizeof(dev->states_usage));
 364        dev->last_residency = 0;
 365
 366        return 0;
 367}
 368
 369/**
 370 * __cpuidle_register_device - internal register function called before register
 371 * and enable routines
 372 * @dev: the cpu
 373 *
 374 * cpuidle_lock mutex must be held before this is called
 375 */
 376static int __cpuidle_register_device(struct cpuidle_device *dev)
 377{
 378        int ret;
 379        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
 380
 381        if (!try_module_get(drv->owner))
 382                return -EINVAL;
 383
 384        per_cpu(cpuidle_devices, dev->cpu) = dev;
 385        list_add(&dev->device_list, &cpuidle_detected_devices);
 386
 387        ret = cpuidle_coupled_register_device(dev);
 388        if (ret) {
 389                __cpuidle_unregister_device(dev);
 390                return ret;
 391        }
 392
 393        dev->registered = 1;
 394        return 0;
 395}
 396
 397/**
 398 * cpuidle_register_device - registers a CPU's idle PM feature
 399 * @dev: the cpu
 400 */
 401int cpuidle_register_device(struct cpuidle_device *dev)
 402{
 403        int ret = -EBUSY;
 404
 405        if (!dev)
 406                return -EINVAL;
 407
 408        mutex_lock(&cpuidle_lock);
 409
 410        if (dev->registered)
 411                goto out_unlock;
 412
 413        ret = __cpuidle_device_init(dev);
 414        if (ret)
 415                goto out_unlock;
 416
 417        ret = __cpuidle_register_device(dev);
 418        if (ret)
 419                goto out_unlock;
 420
 421        ret = cpuidle_add_sysfs(dev);
 422        if (ret)
 423                goto out_unregister;
 424
 425        ret = cpuidle_enable_device(dev);
 426        if (ret)
 427                goto out_sysfs;
 428
 429        cpuidle_install_idle_handler();
 430
 431out_unlock:
 432        mutex_unlock(&cpuidle_lock);
 433
 434        return ret;
 435
 436out_sysfs:
 437        cpuidle_remove_sysfs(dev);
 438out_unregister:
 439        __cpuidle_unregister_device(dev);
 440        goto out_unlock;
 441}
 442
 443EXPORT_SYMBOL_GPL(cpuidle_register_device);
 444
 445/**
 446 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
 447 * @dev: the cpu
 448 */
 449void cpuidle_unregister_device(struct cpuidle_device *dev)
 450{
 451        if (dev->registered == 0)
 452                return;
 453
 454        cpuidle_pause_and_lock();
 455
 456        cpuidle_disable_device(dev);
 457
 458        cpuidle_remove_sysfs(dev);
 459
 460        __cpuidle_unregister_device(dev);
 461
 462        cpuidle_coupled_unregister_device(dev);
 463
 464        cpuidle_resume_and_unlock();
 465}
 466
 467EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
 468
 469/**
 470 * cpuidle_unregister: unregister a driver and the devices. This function
 471 * can be used only if the driver has been previously registered through
 472 * the cpuidle_register function.
 473 *
 474 * @drv: a valid pointer to a struct cpuidle_driver
 475 */
 476void cpuidle_unregister(struct cpuidle_driver *drv)
 477{
 478        int cpu;
 479        struct cpuidle_device *device;
 480
 481        for_each_cpu(cpu, drv->cpumask) {
 482                device = &per_cpu(cpuidle_dev, cpu);
 483                cpuidle_unregister_device(device);
 484        }
 485
 486        cpuidle_unregister_driver(drv);
 487}
 488EXPORT_SYMBOL_GPL(cpuidle_unregister);
 489
 490/**
 491 * cpuidle_register: registers the driver and the cpu devices with the
 492 * coupled_cpus passed as parameter. This function is used for all common
 493 * initialization pattern there are in the arch specific drivers. The
 494 * devices is globally defined in this file.
 495 *
 496 * @drv         : a valid pointer to a struct cpuidle_driver
 497 * @coupled_cpus: a cpumask for the coupled states
 498 *
 499 * Returns 0 on success, < 0 otherwise
 500 */
 501int cpuidle_register(struct cpuidle_driver *drv,
 502                     const struct cpumask *const coupled_cpus)
 503{
 504        int ret, cpu;
 505        struct cpuidle_device *device;
 506
 507        ret = cpuidle_register_driver(drv);
 508        if (ret) {
 509                pr_err("failed to register cpuidle driver\n");
 510                return ret;
 511        }
 512
 513        for_each_cpu(cpu, drv->cpumask) {
 514                device = &per_cpu(cpuidle_dev, cpu);
 515                device->cpu = cpu;
 516
 517#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 518                /*
 519                 * On multiplatform for ARM, the coupled idle states could
 520                 * enabled in the kernel even if the cpuidle driver does not
 521                 * use it. Note, coupled_cpus is a struct copy.
 522                 */
 523                if (coupled_cpus)
 524                        device->coupled_cpus = *coupled_cpus;
 525#endif
 526                ret = cpuidle_register_device(device);
 527                if (!ret)
 528                        continue;
 529
 530                pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
 531
 532                cpuidle_unregister(drv);
 533                break;
 534        }
 535
 536        return ret;
 537}
 538EXPORT_SYMBOL_GPL(cpuidle_register);
 539
 540#ifdef CONFIG_SMP
 541
 542static void smp_callback(void *v)
 543{
 544        /* we already woke the CPU up, nothing more to do */
 545}
 546
 547/*
 548 * This function gets called when a part of the kernel has a new latency
 549 * requirement.  This means we need to get all processors out of their C-state,
 550 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
 551 * wakes them all right up.
 552 */
 553static int cpuidle_latency_notify(struct notifier_block *b,
 554                unsigned long l, void *v)
 555{
 556        smp_call_function(smp_callback, NULL, 1);
 557        return NOTIFY_OK;
 558}
 559
 560static struct notifier_block cpuidle_latency_notifier = {
 561        .notifier_call = cpuidle_latency_notify,
 562};
 563
 564static inline void latency_notifier_init(struct notifier_block *n)
 565{
 566        pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
 567}
 568
 569#else /* CONFIG_SMP */
 570
 571#define latency_notifier_init(x) do { } while (0)
 572
 573#endif /* CONFIG_SMP */
 574
 575/**
 576 * cpuidle_init - core initializer
 577 */
 578static int __init cpuidle_init(void)
 579{
 580        int ret;
 581
 582        if (cpuidle_disabled())
 583                return -ENODEV;
 584
 585        ret = cpuidle_add_interface(cpu_subsys.dev_root);
 586        if (ret)
 587                return ret;
 588
 589        latency_notifier_init(&cpuidle_latency_notifier);
 590
 591        return 0;
 592}
 593
 594module_param(off, int, 0444);
 595core_initcall(cpuidle_init);
 596
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.