linux/arch/powerpc/platforms/pseries/processor_idle.c
<<
>>
Prefs
   1/*
   2 *  processor_idle - idle state cpuidle driver.
   3 *  Adapted from drivers/idle/intel_idle.c and
   4 *  drivers/acpi/processor_idle.c
   5 *
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/moduleparam.h>
  12#include <linux/cpuidle.h>
  13#include <linux/cpu.h>
  14
  15#include <asm/paca.h>
  16#include <asm/reg.h>
  17#include <asm/system.h>
  18#include <asm/machdep.h>
  19#include <asm/firmware.h>
  20
  21#include "plpar_wrappers.h"
  22#include "pseries.h"
  23
  24struct cpuidle_driver pseries_idle_driver = {
  25        .name =         "pseries_idle",
  26        .owner =        THIS_MODULE,
  27};
  28
  29#define MAX_IDLE_STATE_COUNT    2
  30
  31static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
  32static struct cpuidle_device __percpu *pseries_cpuidle_devices;
  33static struct cpuidle_state *cpuidle_state_table;
  34
  35void update_smt_snooze_delay(int snooze)
  36{
  37        struct cpuidle_driver *drv = cpuidle_get_driver();
  38        if (drv)
  39                drv->states[0].target_residency = snooze;
  40}
  41
  42static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
  43{
  44
  45        *kt_before = ktime_get_real();
  46        *in_purr = mfspr(SPRN_PURR);
  47        /*
  48         * Indicate to the HV that we are idle. Now would be
  49         * a good time to find other work to dispatch.
  50         */
  51        get_lppaca()->idle = 1;
  52}
  53
  54static inline  s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before)
  55{
  56        get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
  57        get_lppaca()->idle = 0;
  58
  59        return ktime_to_us(ktime_sub(ktime_get_real(), kt_before));
  60}
  61
  62static int snooze_loop(struct cpuidle_device *dev,
  63                        struct cpuidle_driver *drv,
  64                        int index)
  65{
  66        unsigned long in_purr;
  67        ktime_t kt_before;
  68        unsigned long start_snooze;
  69        long snooze = drv->states[0].target_residency;
  70
  71        idle_loop_prolog(&in_purr, &kt_before);
  72
  73        if (snooze) {
  74                start_snooze = get_tb() + snooze * tb_ticks_per_usec;
  75                local_irq_enable();
  76                set_thread_flag(TIF_POLLING_NRFLAG);
  77
  78                while ((snooze < 0) || (get_tb() < start_snooze)) {
  79                        if (need_resched() || cpu_is_offline(dev->cpu))
  80                                goto out;
  81                        ppc64_runlatch_off();
  82                        HMT_low();
  83                        HMT_very_low();
  84                }
  85
  86                HMT_medium();
  87                clear_thread_flag(TIF_POLLING_NRFLAG);
  88                smp_mb();
  89                local_irq_disable();
  90        }
  91
  92out:
  93        HMT_medium();
  94        dev->last_residency =
  95                (int)idle_loop_epilog(in_purr, kt_before);
  96        return index;
  97}
  98
  99static int dedicated_cede_loop(struct cpuidle_device *dev,
 100                                struct cpuidle_driver *drv,
 101                                int index)
 102{
 103        unsigned long in_purr;
 104        ktime_t kt_before;
 105
 106        idle_loop_prolog(&in_purr, &kt_before);
 107        get_lppaca()->donate_dedicated_cpu = 1;
 108
 109        ppc64_runlatch_off();
 110        HMT_medium();
 111        cede_processor();
 112
 113        get_lppaca()->donate_dedicated_cpu = 0;
 114        dev->last_residency =
 115                (int)idle_loop_epilog(in_purr, kt_before);
 116        return index;
 117}
 118
 119static int shared_cede_loop(struct cpuidle_device *dev,
 120                        struct cpuidle_driver *drv,
 121                        int index)
 122{
 123        unsigned long in_purr;
 124        ktime_t kt_before;
 125
 126        idle_loop_prolog(&in_purr, &kt_before);
 127
 128        /*
 129         * Yield the processor to the hypervisor.  We return if
 130         * an external interrupt occurs (which are driven prior
 131         * to returning here) or if a prod occurs from another
 132         * processor. When returning here, external interrupts
 133         * are enabled.
 134         */
 135        cede_processor();
 136
 137        dev->last_residency =
 138                (int)idle_loop_epilog(in_purr, kt_before);
 139        return index;
 140}
 141
 142/*
 143 * States for dedicated partition case.
 144 */
 145static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
 146        { /* Snooze */
 147                .name = "snooze",
 148                .desc = "snooze",
 149                .flags = CPUIDLE_FLAG_TIME_VALID,
 150                .exit_latency = 0,
 151                .target_residency = 0,
 152                .enter = &snooze_loop },
 153        { /* CEDE */
 154                .name = "CEDE",
 155                .desc = "CEDE",
 156                .flags = CPUIDLE_FLAG_TIME_VALID,
 157                .exit_latency = 1,
 158                .target_residency = 10,
 159                .enter = &dedicated_cede_loop },
 160};
 161
 162/*
 163 * States for shared partition case.
 164 */
 165static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
 166        { /* Shared Cede */
 167                .name = "Shared Cede",
 168                .desc = "Shared Cede",
 169                .flags = CPUIDLE_FLAG_TIME_VALID,
 170                .exit_latency = 0,
 171                .target_residency = 0,
 172                .enter = &shared_cede_loop },
 173};
 174
 175int pseries_notify_cpuidle_add_cpu(int cpu)
 176{
 177        struct cpuidle_device *dev =
 178                        per_cpu_ptr(pseries_cpuidle_devices, cpu);
 179        if (dev && cpuidle_get_driver()) {
 180                cpuidle_disable_device(dev);
 181                cpuidle_enable_device(dev);
 182        }
 183        return 0;
 184}
 185
 186/*
 187 * pseries_cpuidle_driver_init()
 188 */
 189static int pseries_cpuidle_driver_init(void)
 190{
 191        int idle_state;
 192        struct cpuidle_driver *drv = &pseries_idle_driver;
 193
 194        drv->state_count = 0;
 195
 196        for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {
 197
 198                if (idle_state > max_idle_state)
 199                        break;
 200
 201                /* is the state not enabled? */
 202                if (cpuidle_state_table[idle_state].enter == NULL)
 203                        continue;
 204
 205                drv->states[drv->state_count] = /* structure copy */
 206                        cpuidle_state_table[idle_state];
 207
 208                if (cpuidle_state_table == dedicated_states)
 209                        drv->states[drv->state_count].target_residency =
 210                                __get_cpu_var(smt_snooze_delay);
 211
 212                drv->state_count += 1;
 213        }
 214
 215        return 0;
 216}
 217
 218/* pseries_idle_devices_uninit(void)
 219 * unregister cpuidle devices and de-allocate memory
 220 */
 221static void pseries_idle_devices_uninit(void)
 222{
 223        int i;
 224        struct cpuidle_device *dev;
 225
 226        for_each_possible_cpu(i) {
 227                dev = per_cpu_ptr(pseries_cpuidle_devices, i);
 228                cpuidle_unregister_device(dev);
 229        }
 230
 231        free_percpu(pseries_cpuidle_devices);
 232        return;
 233}
 234
 235/* pseries_idle_devices_init()
 236 * allocate, initialize and register cpuidle device
 237 */
 238static int pseries_idle_devices_init(void)
 239{
 240        int i;
 241        struct cpuidle_driver *drv = &pseries_idle_driver;
 242        struct cpuidle_device *dev;
 243
 244        pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device);
 245        if (pseries_cpuidle_devices == NULL)
 246                return -ENOMEM;
 247
 248        for_each_possible_cpu(i) {
 249                dev = per_cpu_ptr(pseries_cpuidle_devices, i);
 250                dev->state_count = drv->state_count;
 251                dev->cpu = i;
 252                if (cpuidle_register_device(dev)) {
 253                        printk(KERN_DEBUG \
 254                                "cpuidle_register_device %d failed!\n", i);
 255                        return -EIO;
 256                }
 257        }
 258
 259        return 0;
 260}
 261
 262/*
 263 * pseries_idle_probe()
 264 * Choose state table for shared versus dedicated partition
 265 */
 266static int pseries_idle_probe(void)
 267{
 268
 269        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
 270                return -ENODEV;
 271
 272        if (cpuidle_disable != IDLE_NO_OVERRIDE)
 273                return -ENODEV;
 274
 275        if (max_idle_state == 0) {
 276                printk(KERN_DEBUG "pseries processor idle disabled.\n");
 277                return -EPERM;
 278        }
 279
 280        if (get_lppaca()->shared_proc)
 281                cpuidle_state_table = shared_states;
 282        else
 283                cpuidle_state_table = dedicated_states;
 284
 285        return 0;
 286}
 287
 288static int __init pseries_processor_idle_init(void)
 289{
 290        int retval;
 291
 292        retval = pseries_idle_probe();
 293        if (retval)
 294                return retval;
 295
 296        pseries_cpuidle_driver_init();
 297        retval = cpuidle_register_driver(&pseries_idle_driver);
 298        if (retval) {
 299                printk(KERN_DEBUG "Registration of pseries driver failed.\n");
 300                return retval;
 301        }
 302
 303        retval = pseries_idle_devices_init();
 304        if (retval) {
 305                pseries_idle_devices_uninit();
 306                cpuidle_unregister_driver(&pseries_idle_driver);
 307                return retval;
 308        }
 309
 310        printk(KERN_DEBUG "pseries_idle_driver registered\n");
 311
 312        return 0;
 313}
 314
 315static void __exit pseries_processor_idle_exit(void)
 316{
 317
 318        pseries_idle_devices_uninit();
 319        cpuidle_unregister_driver(&pseries_idle_driver);
 320
 321        return;
 322}
 323
 324module_init(pseries_processor_idle_init);
 325module_exit(pseries_processor_idle_exit);
 326
 327MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>");
 328MODULE_DESCRIPTION("Cpuidle driver for POWER");
 329MODULE_LICENSE("GPL");
 330