linux/drivers/cpufreq/cpufreq.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/cpufreq/cpufreq.c
   3 *
   4 *  Copyright (C) 2001 Russell King
   5 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
   6 *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
   7 *
   8 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
   9 *      Added handling for CPU hotplug
  10 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  11 *      Fix handling for CPU hotplug -- affected CPUs
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License version 2 as
  15 * published by the Free Software Foundation.
  16 */
  17
  18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19
  20#include <linux/cpu.h>
  21#include <linux/cpufreq.h>
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/init.h>
  25#include <linux/kernel_stat.h>
  26#include <linux/module.h>
  27#include <linux/mutex.h>
  28#include <linux/slab.h>
  29#include <linux/syscore_ops.h>
  30#include <linux/tick.h>
  31#include <trace/events/power.h>
  32
  33/**
  34 * The "cpufreq driver" - the arch- or hardware-dependent low
  35 * level driver of CPUFreq support, and its spinlock. This lock
  36 * also protects the cpufreq_cpu_data array.
  37 */
  38static struct cpufreq_driver *cpufreq_driver;
  39static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  40static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
  41static DEFINE_RWLOCK(cpufreq_driver_lock);
  42static DEFINE_MUTEX(cpufreq_governor_lock);
  43static LIST_HEAD(cpufreq_policy_list);
  44
  45#ifdef CONFIG_HOTPLUG_CPU
  46/* This one keeps track of the previously set governor of a removed CPU */
  47static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
  48#endif
  49
  50/*
  51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
  52 * all cpufreq/hotplug/workqueue/etc related lock issues.
  53 *
  54 * The rules for this semaphore:
  55 * - Any routine that wants to read from the policy structure will
  56 *   do a down_read on this semaphore.
  57 * - Any routine that will write to the policy structure and/or may take away
  58 *   the policy altogether (eg. CPU hotplug), will hold this lock in write
  59 *   mode before doing so.
  60 *
  61 * Additional rules:
  62 * - Governor routines that can be called in cpufreq hotplug path should not
  63 *   take this sem as top level hotplug notifier handler takes this.
  64 * - Lock should not be held across
  65 *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
  66 */
  67static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
  68
  69#define lock_policy_rwsem(mode, cpu)                                    \
  70static int lock_policy_rwsem_##mode(int cpu)                            \
  71{                                                                       \
  72        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
  73        BUG_ON(!policy);                                                \
  74        down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));           \
  75                                                                        \
  76        return 0;                                                       \
  77}
  78
  79lock_policy_rwsem(read, cpu);
  80lock_policy_rwsem(write, cpu);
  81
  82#define unlock_policy_rwsem(mode, cpu)                                  \
  83static void unlock_policy_rwsem_##mode(int cpu)                         \
  84{                                                                       \
  85        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
  86        BUG_ON(!policy);                                                \
  87        up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));             \
  88}
  89
  90unlock_policy_rwsem(read, cpu);
  91unlock_policy_rwsem(write, cpu);
  92
  93/*
  94 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
  95 * sections
  96 */
  97static DECLARE_RWSEM(cpufreq_rwsem);
  98
  99/* internal prototypes */
 100static int __cpufreq_governor(struct cpufreq_policy *policy,
 101                unsigned int event);
 102static unsigned int __cpufreq_get(unsigned int cpu);
 103static void handle_update(struct work_struct *work);
 104
 105/**
 106 * Two notifier lists: the "policy" list is involved in the
 107 * validation process for a new CPU frequency policy; the
 108 * "transition" list for kernel code that needs to handle
 109 * changes to devices when the CPU clock speed changes.
 110 * The mutex locks both lists.
 111 */
 112static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 113static struct srcu_notifier_head cpufreq_transition_notifier_list;
 114
 115static bool init_cpufreq_transition_notifier_list_called;
 116static int __init init_cpufreq_transition_notifier_list(void)
 117{
 118        srcu_init_notifier_head(&cpufreq_transition_notifier_list);
 119        init_cpufreq_transition_notifier_list_called = true;
 120        return 0;
 121}
 122pure_initcall(init_cpufreq_transition_notifier_list);
 123
 124static int off __read_mostly;
 125static int cpufreq_disabled(void)
 126{
 127        return off;
 128}
 129void disable_cpufreq(void)
 130{
 131        off = 1;
 132}
 133static LIST_HEAD(cpufreq_governor_list);
 134static DEFINE_MUTEX(cpufreq_governor_mutex);
 135
 136bool have_governor_per_policy(void)
 137{
 138        return cpufreq_driver->have_governor_per_policy;
 139}
 140EXPORT_SYMBOL_GPL(have_governor_per_policy);
 141
 142struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 143{
 144        if (have_governor_per_policy())
 145                return &policy->kobj;
 146        else
 147                return cpufreq_global_kobject;
 148}
 149EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 150
 151static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 152{
 153        u64 idle_time;
 154        u64 cur_wall_time;
 155        u64 busy_time;
 156
 157        cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
 158
 159        busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
 160        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
 161        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
 162        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
 163        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
 164        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 165
 166        idle_time = cur_wall_time - busy_time;
 167        if (wall)
 168                *wall = cputime_to_usecs(cur_wall_time);
 169
 170        return cputime_to_usecs(idle_time);
 171}
 172
 173u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 174{
 175        u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 176
 177        if (idle_time == -1ULL)
 178                return get_cpu_idle_time_jiffy(cpu, wall);
 179        else if (!io_busy)
 180                idle_time += get_cpu_iowait_time_us(cpu, wall);
 181
 182        return idle_time;
 183}
 184EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 185
 186struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 187{
 188        struct cpufreq_policy *policy = NULL;
 189        unsigned long flags;
 190
 191        if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
 192                return NULL;
 193
 194        if (!down_read_trylock(&cpufreq_rwsem))
 195                return NULL;
 196
 197        /* get the cpufreq driver */
 198        read_lock_irqsave(&cpufreq_driver_lock, flags);
 199
 200        if (cpufreq_driver) {
 201                /* get the CPU */
 202                policy = per_cpu(cpufreq_cpu_data, cpu);
 203                if (policy)
 204                        kobject_get(&policy->kobj);
 205        }
 206
 207        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 208
 209        if (!policy)
 210                up_read(&cpufreq_rwsem);
 211
 212        return policy;
 213}
 214EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 215
 216void cpufreq_cpu_put(struct cpufreq_policy *policy)
 217{
 218        if (cpufreq_disabled())
 219                return;
 220
 221        kobject_put(&policy->kobj);
 222        up_read(&cpufreq_rwsem);
 223}
 224EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 225
 226/*********************************************************************
 227 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
 228 *********************************************************************/
 229
 230/**
 231 * adjust_jiffies - adjust the system "loops_per_jiffy"
 232 *
 233 * This function alters the system "loops_per_jiffy" for the clock
 234 * speed change. Note that loops_per_jiffy cannot be updated on SMP
 235 * systems as each CPU might be scaled differently. So, use the arch
 236 * per-CPU loops_per_jiffy value wherever possible.
 237 */
 238#ifndef CONFIG_SMP
 239static unsigned long l_p_j_ref;
 240static unsigned int l_p_j_ref_freq;
 241
 242static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 243{
 244        if (ci->flags & CPUFREQ_CONST_LOOPS)
 245                return;
 246
 247        if (!l_p_j_ref_freq) {
 248                l_p_j_ref = loops_per_jiffy;
 249                l_p_j_ref_freq = ci->old;
 250                pr_debug("saving %lu as reference value for loops_per_jiffy; "
 251                        "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
 252        }
 253        if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
 254            (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
 255                loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
 256                                                                ci->new);
 257                pr_debug("scaling loops_per_jiffy to %lu "
 258                        "for frequency %u kHz\n", loops_per_jiffy, ci->new);
 259        }
 260}
 261#else
 262static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 263{
 264        return;
 265}
 266#endif
 267
 268static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
 269                struct cpufreq_freqs *freqs, unsigned int state)
 270{
 271        BUG_ON(irqs_disabled());
 272
 273        if (cpufreq_disabled())
 274                return;
 275
 276        freqs->flags = cpufreq_driver->flags;
 277        pr_debug("notification %u of frequency transition to %u kHz\n",
 278                state, freqs->new);
 279
 280        switch (state) {
 281
 282        case CPUFREQ_PRECHANGE:
 283                /* detect if the driver reported a value as "old frequency"
 284                 * which is not equal to what the cpufreq core thinks is
 285                 * "old frequency".
 286                 */
 287                if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
 288                        if ((policy) && (policy->cpu == freqs->cpu) &&
 289                            (policy->cur) && (policy->cur != freqs->old)) {
 290                                pr_debug("Warning: CPU frequency is"
 291                                        " %u, cpufreq assumed %u kHz.\n",
 292                                        freqs->old, policy->cur);
 293                                freqs->old = policy->cur;
 294                        }
 295                }
 296                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 297                                CPUFREQ_PRECHANGE, freqs);
 298                adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
 299                break;
 300
 301        case CPUFREQ_POSTCHANGE:
 302                adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
 303                pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
 304                        (unsigned long)freqs->cpu);
 305                trace_cpu_frequency(freqs->new, freqs->cpu);
 306                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 307                                CPUFREQ_POSTCHANGE, freqs);
 308                if (likely(policy) && likely(policy->cpu == freqs->cpu))
 309                        policy->cur = freqs->new;
 310                break;
 311        }
 312}
 313
 314/**
 315 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
 316 * on frequency transition.
 317 *
 318 * This function calls the transition notifiers and the "adjust_jiffies"
 319 * function. It is called twice on all CPU frequency changes that have
 320 * external effects.
 321 */
 322void cpufreq_notify_transition(struct cpufreq_policy *policy,
 323                struct cpufreq_freqs *freqs, unsigned int state)
 324{
 325        for_each_cpu(freqs->cpu, policy->cpus)
 326                __cpufreq_notify_transition(policy, freqs, state);
 327}
 328EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
 329
 330
 331/*********************************************************************
 332 *                          SYSFS INTERFACE                          *
 333 *********************************************************************/
 334
 335static struct cpufreq_governor *__find_governor(const char *str_governor)
 336{
 337        struct cpufreq_governor *t;
 338
 339        list_for_each_entry(t, &cpufreq_governor_list, governor_list)
 340                if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
 341                        return t;
 342
 343        return NULL;
 344}
 345
 346/**
 347 * cpufreq_parse_governor - parse a governor string
 348 */
 349static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
 350                                struct cpufreq_governor **governor)
 351{
 352        int err = -EINVAL;
 353
 354        if (!cpufreq_driver)
 355                goto out;
 356
 357        if (cpufreq_driver->setpolicy) {
 358                if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
 359                        *policy = CPUFREQ_POLICY_PERFORMANCE;
 360                        err = 0;
 361                } else if (!strnicmp(str_governor, "powersave",
 362                                                CPUFREQ_NAME_LEN)) {
 363                        *policy = CPUFREQ_POLICY_POWERSAVE;
 364                        err = 0;
 365                }
 366        } else if (cpufreq_driver->target) {
 367                struct cpufreq_governor *t;
 368
 369                mutex_lock(&cpufreq_governor_mutex);
 370
 371                t = __find_governor(str_governor);
 372
 373                if (t == NULL) {
 374                        int ret;
 375
 376                        mutex_unlock(&cpufreq_governor_mutex);
 377                        ret = request_module("cpufreq_%s", str_governor);
 378                        mutex_lock(&cpufreq_governor_mutex);
 379
 380                        if (ret == 0)
 381                                t = __find_governor(str_governor);
 382                }
 383
 384                if (t != NULL) {
 385                        *governor = t;
 386                        err = 0;
 387                }
 388
 389                mutex_unlock(&cpufreq_governor_mutex);
 390        }
 391out:
 392        return err;
 393}
 394
 395/**
 396 * cpufreq_per_cpu_attr_read() / show_##file_name() -
 397 * print out cpufreq information
 398 *
 399 * Write out information from cpufreq_driver->policy[cpu]; object must be
 400 * "unsigned int".
 401 */
 402
 403#define show_one(file_name, object)                     \
 404static ssize_t show_##file_name                         \
 405(struct cpufreq_policy *policy, char *buf)              \
 406{                                                       \
 407        return sprintf(buf, "%u\n", policy->object);    \
 408}
 409
 410show_one(cpuinfo_min_freq, cpuinfo.min_freq);
 411show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 412show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 413show_one(scaling_min_freq, min);
 414show_one(scaling_max_freq, max);
 415show_one(scaling_cur_freq, cur);
 416
 417static int __cpufreq_set_policy(struct cpufreq_policy *policy,
 418                                struct cpufreq_policy *new_policy);
 419
 420/**
 421 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
 422 */
 423#define store_one(file_name, object)                    \
 424static ssize_t store_##file_name                                        \
 425(struct cpufreq_policy *policy, const char *buf, size_t count)          \
 426{                                                                       \
 427        int ret;                                                        \
 428        struct cpufreq_policy new_policy;                               \
 429                                                                        \
 430        ret = cpufreq_get_policy(&new_policy, policy->cpu);             \
 431        if (ret)                                                        \
 432                return -EINVAL;                                         \
 433                                                                        \
 434        ret = sscanf(buf, "%u", &new_policy.object);                    \
 435        if (ret != 1)                                                   \
 436                return -EINVAL;                                         \
 437                                                                        \
 438        ret = __cpufreq_set_policy(policy, &new_policy);                \
 439        policy->user_policy.object = policy->object;                    \
 440                                                                        \
 441        return ret ? ret : count;                                       \
 442}
 443
 444store_one(scaling_min_freq, min);
 445store_one(scaling_max_freq, max);
 446
 447/**
 448 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
 449 */
 450static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
 451                                        char *buf)
 452{
 453        unsigned int cur_freq = __cpufreq_get(policy->cpu);
 454        if (!cur_freq)
 455                return sprintf(buf, "<unknown>");
 456        return sprintf(buf, "%u\n", cur_freq);
 457}
 458
 459/**
 460 * show_scaling_governor - show the current policy for the specified CPU
 461 */
 462static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 463{
 464        if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
 465                return sprintf(buf, "powersave\n");
 466        else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
 467                return sprintf(buf, "performance\n");
 468        else if (policy->governor)
 469                return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
 470                                policy->governor->name);
 471        return -EINVAL;
 472}
 473
 474/**
 475 * store_scaling_governor - store policy for the specified CPU
 476 */
 477static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
 478                                        const char *buf, size_t count)
 479{
 480        int ret;
 481        char    str_governor[16];
 482        struct cpufreq_policy new_policy;
 483
 484        ret = cpufreq_get_policy(&new_policy, policy->cpu);
 485        if (ret)
 486                return ret;
 487
 488        ret = sscanf(buf, "%15s", str_governor);
 489        if (ret != 1)
 490                return -EINVAL;
 491
 492        if (cpufreq_parse_governor(str_governor, &new_policy.policy,
 493                                                &new_policy.governor))
 494                return -EINVAL;
 495
 496        /*
 497         * Do not use cpufreq_set_policy here or the user_policy.max
 498         * will be wrongly overridden
 499         */
 500        ret = __cpufreq_set_policy(policy, &new_policy);
 501
 502        policy->user_policy.policy = policy->policy;
 503        policy->user_policy.governor = policy->governor;
 504
 505        if (ret)
 506                return ret;
 507        else
 508                return count;
 509}
 510
 511/**
 512 * show_scaling_driver - show the cpufreq driver currently loaded
 513 */
 514static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
 515{
 516        return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
 517}
 518
 519/**
 520 * show_scaling_available_governors - show the available CPUfreq governors
 521 */
 522static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
 523                                                char *buf)
 524{
 525        ssize_t i = 0;
 526        struct cpufreq_governor *t;
 527
 528        if (!cpufreq_driver->target) {
 529                i += sprintf(buf, "performance powersave");
 530                goto out;
 531        }
 532
 533        list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
 534                if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
 535                    - (CPUFREQ_NAME_LEN + 2)))
 536                        goto out;
 537                i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
 538        }
 539out:
 540        i += sprintf(&buf[i], "\n");
 541        return i;
 542}
 543
 544ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
 545{
 546        ssize_t i = 0;
 547        unsigned int cpu;
 548
 549        for_each_cpu(cpu, mask) {
 550                if (i)
 551                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
 552                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
 553                if (i >= (PAGE_SIZE - 5))
 554                        break;
 555        }
 556        i += sprintf(&buf[i], "\n");
 557        return i;
 558}
 559EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
 560
 561/**
 562 * show_related_cpus - show the CPUs affected by each transition even if
 563 * hw coordination is in use
 564 */
 565static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 566{
 567        return cpufreq_show_cpus(policy->related_cpus, buf);
 568}
 569
 570/**
 571 * show_affected_cpus - show the CPUs affected by each transition
 572 */
 573static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
 574{
 575        return cpufreq_show_cpus(policy->cpus, buf);
 576}
 577
 578static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
 579                                        const char *buf, size_t count)
 580{
 581        unsigned int freq = 0;
 582        unsigned int ret;
 583
 584        if (!policy->governor || !policy->governor->store_setspeed)
 585                return -EINVAL;
 586
 587        ret = sscanf(buf, "%u", &freq);
 588        if (ret != 1)
 589                return -EINVAL;
 590
 591        policy->governor->store_setspeed(policy, freq);
 592
 593        return count;
 594}
 595
 596static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
 597{
 598        if (!policy->governor || !policy->governor->show_setspeed)
 599                return sprintf(buf, "<unsupported>\n");
 600
 601        return policy->governor->show_setspeed(policy, buf);
 602}
 603
 604/**
 605 * show_bios_limit - show the current cpufreq HW/BIOS limitation
 606 */
 607static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
 608{
 609        unsigned int limit;
 610        int ret;
 611        if (cpufreq_driver->bios_limit) {
 612                ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
 613                if (!ret)
 614                        return sprintf(buf, "%u\n", limit);
 615        }
 616        return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
 617}
 618
 619cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
 620cpufreq_freq_attr_ro(cpuinfo_min_freq);
 621cpufreq_freq_attr_ro(cpuinfo_max_freq);
 622cpufreq_freq_attr_ro(cpuinfo_transition_latency);
 623cpufreq_freq_attr_ro(scaling_available_governors);
 624cpufreq_freq_attr_ro(scaling_driver);
 625cpufreq_freq_attr_ro(scaling_cur_freq);
 626cpufreq_freq_attr_ro(bios_limit);
 627cpufreq_freq_attr_ro(related_cpus);
 628cpufreq_freq_attr_ro(affected_cpus);
 629cpufreq_freq_attr_rw(scaling_min_freq);
 630cpufreq_freq_attr_rw(scaling_max_freq);
 631cpufreq_freq_attr_rw(scaling_governor);
 632cpufreq_freq_attr_rw(scaling_setspeed);
 633
 634static struct attribute *default_attrs[] = {
 635        &cpuinfo_min_freq.attr,
 636        &cpuinfo_max_freq.attr,
 637        &cpuinfo_transition_latency.attr,
 638        &scaling_min_freq.attr,
 639        &scaling_max_freq.attr,
 640        &affected_cpus.attr,
 641        &related_cpus.attr,
 642        &scaling_governor.attr,
 643        &scaling_driver.attr,
 644        &scaling_available_governors.attr,
 645        &scaling_setspeed.attr,
 646        NULL
 647};
 648
 649#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
 650#define to_attr(a) container_of(a, struct freq_attr, attr)
 651
 652static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 653{
 654        struct cpufreq_policy *policy = to_policy(kobj);
 655        struct freq_attr *fattr = to_attr(attr);
 656        ssize_t ret = -EINVAL;
 657
 658        if (!down_read_trylock(&cpufreq_rwsem))
 659                goto exit;
 660
 661        if (lock_policy_rwsem_read(policy->cpu) < 0)
 662                goto up_read;
 663
 664        if (fattr->show)
 665                ret = fattr->show(policy, buf);
 666        else
 667                ret = -EIO;
 668
 669        unlock_policy_rwsem_read(policy->cpu);
 670
 671up_read:
 672        up_read(&cpufreq_rwsem);
 673exit:
 674        return ret;
 675}
 676
 677static ssize_t store(struct kobject *kobj, struct attribute *attr,
 678                     const char *buf, size_t count)
 679{
 680        struct cpufreq_policy *policy = to_policy(kobj);
 681        struct freq_attr *fattr = to_attr(attr);
 682        ssize_t ret = -EINVAL;
 683
 684        get_online_cpus();
 685
 686        if (!cpu_online(policy->cpu))
 687                goto unlock;
 688
 689        if (!down_read_trylock(&cpufreq_rwsem))
 690                goto unlock;
 691
 692        if (lock_policy_rwsem_write(policy->cpu) < 0)
 693                goto up_read;
 694
 695        if (fattr->store)
 696                ret = fattr->store(policy, buf, count);
 697        else
 698                ret = -EIO;
 699
 700        unlock_policy_rwsem_write(policy->cpu);
 701
 702up_read:
 703        up_read(&cpufreq_rwsem);
 704unlock:
 705        put_online_cpus();
 706
 707        return ret;
 708}
 709
 710static void cpufreq_sysfs_release(struct kobject *kobj)
 711{
 712        struct cpufreq_policy *policy = to_policy(kobj);
 713        pr_debug("last reference is dropped\n");
 714        complete(&policy->kobj_unregister);
 715}
 716
 717static const struct sysfs_ops sysfs_ops = {
 718        .show   = show,
 719        .store  = store,
 720};
 721
 722static struct kobj_type ktype_cpufreq = {
 723        .sysfs_ops      = &sysfs_ops,
 724        .default_attrs  = default_attrs,
 725        .release        = cpufreq_sysfs_release,
 726};
 727
 728struct kobject *cpufreq_global_kobject;
 729EXPORT_SYMBOL(cpufreq_global_kobject);
 730
 731static int cpufreq_global_kobject_usage;
 732
 733int cpufreq_get_global_kobject(void)
 734{
 735        if (!cpufreq_global_kobject_usage++)
 736                return kobject_add(cpufreq_global_kobject,
 737                                &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
 738
 739        return 0;
 740}
 741EXPORT_SYMBOL(cpufreq_get_global_kobject);
 742
 743void cpufreq_put_global_kobject(void)
 744{
 745        if (!--cpufreq_global_kobject_usage)
 746                kobject_del(cpufreq_global_kobject);
 747}
 748EXPORT_SYMBOL(cpufreq_put_global_kobject);
 749
 750int cpufreq_sysfs_create_file(const struct attribute *attr)
 751{
 752        int ret = cpufreq_get_global_kobject();
 753
 754        if (!ret) {
 755                ret = sysfs_create_file(cpufreq_global_kobject, attr);
 756                if (ret)
 757                        cpufreq_put_global_kobject();
 758        }
 759
 760        return ret;
 761}
 762EXPORT_SYMBOL(cpufreq_sysfs_create_file);
 763
 764void cpufreq_sysfs_remove_file(const struct attribute *attr)
 765{
 766        sysfs_remove_file(cpufreq_global_kobject, attr);
 767        cpufreq_put_global_kobject();
 768}
 769EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
 770
 771/* symlink affected CPUs */
 772static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
 773{
 774        unsigned int j;
 775        int ret = 0;
 776
 777        for_each_cpu(j, policy->cpus) {
 778                struct device *cpu_dev;
 779
 780                if (j == policy->cpu)
 781                        continue;
 782
 783                pr_debug("Adding link for CPU: %u\n", j);
 784                cpu_dev = get_cpu_device(j);
 785                ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
 786                                        "cpufreq");
 787                if (ret)
 788                        break;
 789        }
 790        return ret;
 791}
 792
 793static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
 794                                     struct device *dev)
 795{
 796        struct freq_attr **drv_attr;
 797        int ret = 0;
 798
 799        /* prepare interface data */
 800        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
 801                                   &dev->kobj, "cpufreq");
 802        if (ret)
 803                return ret;
 804
 805        /* set up files for this cpu device */
 806        drv_attr = cpufreq_driver->attr;
 807        while ((drv_attr) && (*drv_attr)) {
 808                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
 809                if (ret)
 810                        goto err_out_kobj_put;
 811                drv_attr++;
 812        }
 813        if (cpufreq_driver->get) {
 814                ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
 815                if (ret)
 816                        goto err_out_kobj_put;
 817        }
 818        if (cpufreq_driver->target) {
 819                ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
 820                if (ret)
 821                        goto err_out_kobj_put;
 822        }
 823        if (cpufreq_driver->bios_limit) {
 824                ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
 825                if (ret)
 826                        goto err_out_kobj_put;
 827        }
 828
 829        ret = cpufreq_add_dev_symlink(policy);
 830        if (ret)
 831                goto err_out_kobj_put;
 832
 833        return ret;
 834
 835err_out_kobj_put:
 836        kobject_put(&policy->kobj);
 837        wait_for_completion(&policy->kobj_unregister);
 838        return ret;
 839}
 840
 841static void cpufreq_init_policy(struct cpufreq_policy *policy)
 842{
 843        struct cpufreq_policy new_policy;
 844        int ret = 0;
 845
 846        memcpy(&new_policy, policy, sizeof(*policy));
 847        /* assure that the starting sequence is run in __cpufreq_set_policy */
 848        policy->governor = NULL;
 849
 850        /* set default policy */
 851        ret = __cpufreq_set_policy(policy, &new_policy);
 852        policy->user_policy.policy = policy->policy;
 853        policy->user_policy.governor = policy->governor;
 854
 855        if (ret) {
 856                pr_debug("setting policy failed\n");
 857                if (cpufreq_driver->exit)
 858                        cpufreq_driver->exit(policy);
 859        }
 860}
 861
 862#ifdef CONFIG_HOTPLUG_CPU
 863static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
 864                                  unsigned int cpu, struct device *dev,
 865                                  bool frozen)
 866{
 867        int ret = 0, has_target = !!cpufreq_driver->target;
 868        unsigned long flags;
 869
 870        if (has_target) {
 871                ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
 872                if (ret) {
 873                        pr_err("%s: Failed to stop governor\n", __func__);
 874                        return ret;
 875                }
 876        }
 877
 878        lock_policy_rwsem_write(policy->cpu);
 879
 880        write_lock_irqsave(&cpufreq_driver_lock, flags);
 881
 882        cpumask_set_cpu(cpu, policy->cpus);
 883        per_cpu(cpufreq_cpu_data, cpu) = policy;
 884        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 885
 886        unlock_policy_rwsem_write(policy->cpu);
 887
 888        if (has_target) {
 889                if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
 890                        (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
 891                        pr_err("%s: Failed to start governor\n", __func__);
 892                        return ret;
 893                }
 894        }
 895
 896        /* Don't touch sysfs links during light-weight init */
 897        if (!frozen)
 898                ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 899
 900        return ret;
 901}
 902#endif
 903
 904static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
 905{
 906        struct cpufreq_policy *policy;
 907        unsigned long flags;
 908
 909        read_lock_irqsave(&cpufreq_driver_lock, flags);
 910
 911        policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
 912
 913        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 914
 915        return policy;
 916}
 917
 918static struct cpufreq_policy *cpufreq_policy_alloc(void)
 919{
 920        struct cpufreq_policy *policy;
 921
 922        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
 923        if (!policy)
 924                return NULL;
 925
 926        if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
 927                goto err_free_policy;
 928
 929        if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
 930                goto err_free_cpumask;
 931
 932        INIT_LIST_HEAD(&policy->policy_list);
 933        return policy;
 934
 935err_free_cpumask:
 936        free_cpumask_var(policy->cpus);
 937err_free_policy:
 938        kfree(policy);
 939
 940        return NULL;
 941}
 942
 943static void cpufreq_policy_free(struct cpufreq_policy *policy)
 944{
 945        free_cpumask_var(policy->related_cpus);
 946        free_cpumask_var(policy->cpus);
 947        kfree(policy);
 948}
 949
 950static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 951{
 952        if (cpu == policy->cpu)
 953                return;
 954
 955        /*
 956         * Take direct locks as lock_policy_rwsem_write wouldn't work here.
 957         * Also lock for last cpu is enough here as contention will happen only
 958         * after policy->cpu is changed and after it is changed, other threads
 959         * will try to acquire lock for new cpu. And policy is already updated
 960         * by then.
 961         */
 962        down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
 963
 964        policy->last_cpu = policy->cpu;
 965        policy->cpu = cpu;
 966
 967        up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
 968
 969#ifdef CONFIG_CPU_FREQ_TABLE
 970        cpufreq_frequency_table_update_policy_cpu(policy);
 971#endif
 972        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
 973                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 974}
 975
 976static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
 977                             bool frozen)
 978{
 979        unsigned int j, cpu = dev->id;
 980        int ret = -ENOMEM;
 981        struct cpufreq_policy *policy;
 982        unsigned long flags;
 983#ifdef CONFIG_HOTPLUG_CPU
 984        struct cpufreq_policy *tpolicy;
 985        struct cpufreq_governor *gov;
 986#endif
 987
 988        if (cpu_is_offline(cpu))
 989                return 0;
 990
 991        pr_debug("adding CPU %u\n", cpu);
 992
 993#ifdef CONFIG_SMP
 994        /* check whether a different CPU already registered this
 995         * CPU because it is in the same boat. */
 996        policy = cpufreq_cpu_get(cpu);
 997        if (unlikely(policy)) {
 998                cpufreq_cpu_put(policy);
 999                return 0;
1000        }
1001#endif
1002
1003        if (!down_read_trylock(&cpufreq_rwsem))
1004                return 0;
1005
1006#ifdef CONFIG_HOTPLUG_CPU
1007        /* Check if this cpu was hot-unplugged earlier and has siblings */
1008        read_lock_irqsave(&cpufreq_driver_lock, flags);
1009        list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1010                if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1011                        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1012                        ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
1013                        up_read(&cpufreq_rwsem);
1014                        return ret;
1015                }
1016        }
1017        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1018#endif
1019
1020        if (frozen)
1021                /* Restore the saved policy when doing light-weight init */
1022                policy = cpufreq_policy_restore(cpu);
1023        else
1024                policy = cpufreq_policy_alloc();
1025
1026        if (!policy)
1027                goto nomem_out;
1028
1029
1030        /*
1031         * In the resume path, since we restore a saved policy, the assignment
1032         * to policy->cpu is like an update of the existing policy, rather than
1033         * the creation of a brand new one. So we need to perform this update
1034         * by invoking update_policy_cpu().
1035         */
1036        if (frozen && cpu != policy->cpu)
1037                update_policy_cpu(policy, cpu);
1038        else
1039                policy->cpu = cpu;
1040
1041        policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1042        cpumask_copy(policy->cpus, cpumask_of(cpu));
1043
1044        init_completion(&policy->kobj_unregister);
1045        INIT_WORK(&policy->update, handle_update);
1046
1047        /* call driver. From then on the cpufreq must be able
1048         * to accept all calls to ->verify and ->setpolicy for this CPU
1049         */
1050        ret = cpufreq_driver->init(policy);
1051        if (ret) {
1052                pr_debug("initialization failed\n");
1053                goto err_set_policy_cpu;
1054        }
1055
1056        /* related cpus should atleast have policy->cpus */
1057        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1058
1059        /*
1060         * affected cpus must always be the one, which are online. We aren't
1061         * managing offline cpus here.
1062         */
1063        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1064
1065        policy->user_policy.min = policy->min;
1066        policy->user_policy.max = policy->max;
1067
1068        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1069                                     CPUFREQ_START, policy);
1070
1071#ifdef CONFIG_HOTPLUG_CPU
1072        gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1073        if (gov) {
1074                policy->governor = gov;
1075                pr_debug("Restoring governor %s for cpu %d\n",
1076                       policy->governor->name, cpu);
1077        }
1078#endif
1079
1080        write_lock_irqsave(&cpufreq_driver_lock, flags);
1081        for_each_cpu(j, policy->cpus)
1082                per_cpu(cpufreq_cpu_data, j) = policy;
1083        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1084
1085        if (!frozen) {
1086                ret = cpufreq_add_dev_interface(policy, dev);
1087                if (ret)
1088                        goto err_out_unregister;
1089        }
1090
1091        write_lock_irqsave(&cpufreq_driver_lock, flags);
1092        list_add(&policy->policy_list, &cpufreq_policy_list);
1093        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1094
1095        cpufreq_init_policy(policy);
1096
1097        kobject_uevent(&policy->kobj, KOBJ_ADD);
1098        up_read(&cpufreq_rwsem);
1099
1100        pr_debug("initialization complete\n");
1101
1102        return 0;
1103
1104err_out_unregister:
1105        write_lock_irqsave(&cpufreq_driver_lock, flags);
1106        for_each_cpu(j, policy->cpus)
1107                per_cpu(cpufreq_cpu_data, j) = NULL;
1108        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1109
1110err_set_policy_cpu:
1111        cpufreq_policy_free(policy);
1112nomem_out:
1113        up_read(&cpufreq_rwsem);
1114
1115        return ret;
1116}
1117
1118/**
1119 * cpufreq_add_dev - add a CPU device
1120 *
1121 * Adds the cpufreq interface for a CPU device.
1122 *
1123 * The Oracle says: try running cpufreq registration/unregistration concurrently
1124 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1125 * mess up, but more thorough testing is needed. - Mathieu
1126 */
1127static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1128{
1129        return __cpufreq_add_dev(dev, sif, false);
1130}
1131
1132static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1133                                           unsigned int old_cpu, bool frozen)
1134{
1135        struct device *cpu_dev;
1136        int ret;
1137
1138        /* first sibling now owns the new sysfs dir */
1139        cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1140
1141        /* Don't touch sysfs files during light-weight tear-down */
1142        if (frozen)
1143                return cpu_dev->id;
1144
1145        sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1146        ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1147        if (ret) {
1148                pr_err("%s: Failed to move kobj: %d", __func__, ret);
1149
1150                WARN_ON(lock_policy_rwsem_write(old_cpu));
1151                cpumask_set_cpu(old_cpu, policy->cpus);
1152                unlock_policy_rwsem_write(old_cpu);
1153
1154                ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1155                                        "cpufreq");
1156
1157                return -EINVAL;
1158        }
1159
1160        return cpu_dev->id;
1161}
1162
1163static int __cpufreq_remove_dev_prepare(struct device *dev,
1164                                        struct subsys_interface *sif,
1165                                        bool frozen)
1166{
1167        unsigned int cpu = dev->id, cpus;
1168        int new_cpu, ret;
1169        unsigned long flags;
1170        struct cpufreq_policy *policy;
1171
1172        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1173
1174        write_lock_irqsave(&cpufreq_driver_lock, flags);
1175
1176        policy = per_cpu(cpufreq_cpu_data, cpu);
1177
1178        /* Save the policy somewhere when doing a light-weight tear-down */
1179        if (frozen)
1180                per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1181
1182        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1183
1184        if (!policy) {
1185                pr_debug("%s: No cpu_data found\n", __func__);
1186                return -EINVAL;
1187        }
1188
1189        if (cpufreq_driver->target) {
1190                ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1191                if (ret) {
1192                        pr_err("%s: Failed to stop governor\n", __func__);
1193                        return ret;
1194                }
1195        }
1196
1197#ifdef CONFIG_HOTPLUG_CPU
1198        if (!cpufreq_driver->setpolicy)
1199                strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1200                        policy->governor->name, CPUFREQ_NAME_LEN);
1201#endif
1202
1203        lock_policy_rwsem_read(cpu);
1204        cpus = cpumask_weight(policy->cpus);
1205        unlock_policy_rwsem_read(cpu);
1206
1207        if (cpu != policy->cpu) {
1208                if (!frozen)
1209                        sysfs_remove_link(&dev->kobj, "cpufreq");
1210        } else if (cpus > 1) {
1211
1212                new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1213                if (new_cpu >= 0) {
1214                        update_policy_cpu(policy, new_cpu);
1215
1216                        if (!frozen) {
1217                                pr_debug("%s: policy Kobject moved to cpu: %d "
1218                                         "from: %d\n",__func__, new_cpu, cpu);
1219                        }
1220                }
1221        }
1222
1223        return 0;
1224}
1225
1226static int __cpufreq_remove_dev_finish(struct device *dev,
1227                                       struct subsys_interface *sif,
1228                                       bool frozen)
1229{
1230        unsigned int cpu = dev->id, cpus;
1231        int ret;
1232        unsigned long flags;
1233        struct cpufreq_policy *policy;
1234        struct kobject *kobj;
1235        struct completion *cmp;
1236
1237        read_lock_irqsave(&cpufreq_driver_lock, flags);
1238        policy = per_cpu(cpufreq_cpu_data, cpu);
1239        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1240
1241        if (!policy) {
1242                pr_debug("%s: No cpu_data found\n", __func__);
1243                return -EINVAL;
1244        }
1245
1246        WARN_ON(lock_policy_rwsem_write(cpu));
1247        cpus = cpumask_weight(policy->cpus);
1248
1249        if (cpus > 1)
1250                cpumask_clear_cpu(cpu, policy->cpus);
1251        unlock_policy_rwsem_write(cpu);
1252
1253        /* If cpu is last user of policy, free policy */
1254        if (cpus == 1) {
1255                if (cpufreq_driver->target) {
1256                        ret = __cpufreq_governor(policy,
1257                                        CPUFREQ_GOV_POLICY_EXIT);
1258                        if (ret) {
1259                                pr_err("%s: Failed to exit governor\n",
1260                                                __func__);
1261                                return ret;
1262                        }
1263                }
1264
1265                if (!frozen) {
1266                        lock_policy_rwsem_read(cpu);
1267                        kobj = &policy->kobj;
1268                        cmp = &policy->kobj_unregister;
1269                        unlock_policy_rwsem_read(cpu);
1270                        kobject_put(kobj);
1271
1272                        /*
1273                         * We need to make sure that the underlying kobj is
1274                         * actually not referenced anymore by anybody before we
1275                         * proceed with unloading.
1276                         */
1277                        pr_debug("waiting for dropping of refcount\n");
1278                        wait_for_completion(cmp);
1279                        pr_debug("wait complete\n");
1280                }
1281
1282                /*
1283                 * Perform the ->exit() even during light-weight tear-down,
1284                 * since this is a core component, and is essential for the
1285                 * subsequent light-weight ->init() to succeed.
1286                 */
1287                if (cpufreq_driver->exit)
1288                        cpufreq_driver->exit(policy);
1289
1290                /* Remove policy from list of active policies */
1291                write_lock_irqsave(&cpufreq_driver_lock, flags);
1292                list_del(&policy->policy_list);
1293                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1294
1295                if (!frozen)
1296                        cpufreq_policy_free(policy);
1297        } else {
1298                if (cpufreq_driver->target) {
1299                        if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1300                                        (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1301                                pr_err("%s: Failed to start governor\n",
1302                                                __func__);
1303                                return ret;
1304                        }
1305                }
1306        }
1307
1308        per_cpu(cpufreq_cpu_data, cpu) = NULL;
1309        return 0;
1310}
1311
1312/**
1313 * __cpufreq_remove_dev - remove a CPU device
1314 *
1315 * Removes the cpufreq interface for a CPU device.
1316 * Caller should already have policy_rwsem in write mode for this CPU.
1317 * This routine frees the rwsem before returning.
1318 */
1319static inline int __cpufreq_remove_dev(struct device *dev,
1320                                       struct subsys_interface *sif,
1321                                       bool frozen)
1322{
1323        int ret;
1324
1325        ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
1326
1327        if (!ret)
1328                ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
1329
1330        return ret;
1331}
1332
1333static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1334{
1335        unsigned int cpu = dev->id;
1336        int retval;
1337
1338        if (cpu_is_offline(cpu))
1339                return 0;
1340
1341        retval = __cpufreq_remove_dev(dev, sif, false);
1342        return retval;
1343}
1344
1345static void handle_update(struct work_struct *work)
1346{
1347        struct cpufreq_policy *policy =
1348                container_of(work, struct cpufreq_policy, update);
1349        unsigned int cpu = policy->cpu;
1350        pr_debug("handle_update for cpu %u called\n", cpu);
1351        cpufreq_update_policy(cpu);
1352}
1353
1354/**
1355 *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1356 *      in deep trouble.
1357 *      @cpu: cpu number
1358 *      @old_freq: CPU frequency the kernel thinks the CPU runs at
1359 *      @new_freq: CPU frequency the CPU actually runs at
1360 *
1361 *      We adjust to current frequency first, and need to clean up later.
1362 *      So either call to cpufreq_update_policy() or schedule handle_update()).
1363 */
1364static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1365                                unsigned int new_freq)
1366{
1367        struct cpufreq_policy *policy;
1368        struct cpufreq_freqs freqs;
1369        unsigned long flags;
1370
1371        pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1372               "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1373
1374        freqs.old = old_freq;
1375        freqs.new = new_freq;
1376
1377        read_lock_irqsave(&cpufreq_driver_lock, flags);
1378        policy = per_cpu(cpufreq_cpu_data, cpu);
1379        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1380
1381        cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1382        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1383}
1384
1385/**
1386 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1387 * @cpu: CPU number
1388 *
1389 * This is the last known freq, without actually getting it from the driver.
1390 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1391 */
1392unsigned int cpufreq_quick_get(unsigned int cpu)
1393{
1394        struct cpufreq_policy *policy;
1395        unsigned int ret_freq = 0;
1396
1397        if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1398                return cpufreq_driver->get(cpu);
1399
1400        policy = cpufreq_cpu_get(cpu);
1401        if (policy) {
1402                ret_freq = policy->cur;
1403                cpufreq_cpu_put(policy);
1404        }
1405
1406        return ret_freq;
1407}
1408EXPORT_SYMBOL(cpufreq_quick_get);
1409
1410/**
1411 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1412 * @cpu: CPU number
1413 *
1414 * Just return the max possible frequency for a given CPU.
1415 */
1416unsigned int cpufreq_quick_get_max(unsigned int cpu)
1417{
1418        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1419        unsigned int ret_freq = 0;
1420
1421        if (policy) {
1422                ret_freq = policy->max;
1423                cpufreq_cpu_put(policy);
1424        }
1425
1426        return ret_freq;
1427}
1428EXPORT_SYMBOL(cpufreq_quick_get_max);
1429
1430static unsigned int __cpufreq_get(unsigned int cpu)
1431{
1432        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1433        unsigned int ret_freq = 0;
1434
1435        if (!cpufreq_driver->get)
1436                return ret_freq;
1437
1438        ret_freq = cpufreq_driver->get(cpu);
1439
1440        if (ret_freq && policy->cur &&
1441                !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1442                /* verify no discrepancy between actual and
1443                                        saved value exists */
1444                if (unlikely(ret_freq != policy->cur)) {
1445                        cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1446                        schedule_work(&policy->update);
1447                }
1448        }
1449
1450        return ret_freq;
1451}
1452
1453/**
1454 * cpufreq_get - get the current CPU frequency (in kHz)
1455 * @cpu: CPU number
1456 *
1457 * Get the CPU current (static) CPU frequency
1458 */
1459unsigned int cpufreq_get(unsigned int cpu)
1460{
1461        unsigned int ret_freq = 0;
1462
1463        if (cpufreq_disabled() || !cpufreq_driver)
1464                return -ENOENT;
1465
1466        if (!down_read_trylock(&cpufreq_rwsem))
1467                return 0;
1468
1469        if (unlikely(lock_policy_rwsem_read(cpu)))
1470                goto out_policy;
1471
1472        ret_freq = __cpufreq_get(cpu);
1473
1474        unlock_policy_rwsem_read(cpu);
1475
1476out_policy:
1477        up_read(&cpufreq_rwsem);
1478
1479        return ret_freq;
1480}
1481EXPORT_SYMBOL(cpufreq_get);
1482
1483static struct subsys_interface cpufreq_interface = {
1484        .name           = "cpufreq",
1485        .subsys         = &cpu_subsys,
1486        .add_dev        = cpufreq_add_dev,
1487        .remove_dev     = cpufreq_remove_dev,
1488};
1489
1490/**
1491 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1492 *
1493 * This function is only executed for the boot processor.  The other CPUs
1494 * have been put offline by means of CPU hotplug.
1495 */
1496static int cpufreq_bp_suspend(void)
1497{
1498        int ret = 0;
1499
1500        int cpu = smp_processor_id();
1501        struct cpufreq_policy *policy;
1502
1503        pr_debug("suspending cpu %u\n", cpu);
1504
1505        /* If there's no policy for the boot CPU, we have nothing to do. */
1506        policy = cpufreq_cpu_get(cpu);
1507        if (!policy)
1508                return 0;
1509
1510        if (cpufreq_driver->suspend) {
1511                ret = cpufreq_driver->suspend(policy);
1512                if (ret)
1513                        printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1514                                        "step on CPU %u\n", policy->cpu);
1515        }
1516
1517        cpufreq_cpu_put(policy);
1518        return ret;
1519}
1520
1521/**
1522 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1523 *
1524 *      1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1525 *      2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1526 *          restored. It will verify that the current freq is in sync with
1527 *          what we believe it to be. This is a bit later than when it
1528 *          should be, but nonethteless it's better than calling
1529 *          cpufreq_driver->get() here which might re-enable interrupts...
1530 *
1531 * This function is only executed for the boot CPU.  The other CPUs have not
1532 * been turned on yet.
1533 */
1534static void cpufreq_bp_resume(void)
1535{
1536        int ret = 0;
1537
1538        int cpu = smp_processor_id();
1539        struct cpufreq_policy *policy;
1540
1541        pr_debug("resuming cpu %u\n", cpu);
1542
1543        /* If there's no policy for the boot CPU, we have nothing to do. */
1544        policy = cpufreq_cpu_get(cpu);
1545        if (!policy)
1546                return;
1547
1548        if (cpufreq_driver->resume) {
1549                ret = cpufreq_driver->resume(policy);
1550                if (ret) {
1551                        printk(KERN_ERR "cpufreq: resume failed in ->resume "
1552                                        "step on CPU %u\n", policy->cpu);
1553                        goto fail;
1554                }
1555        }
1556
1557        schedule_work(&policy->update);
1558
1559fail:
1560        cpufreq_cpu_put(policy);
1561}
1562
1563static struct syscore_ops cpufreq_syscore_ops = {
1564        .suspend        = cpufreq_bp_suspend,
1565        .resume         = cpufreq_bp_resume,
1566};
1567
1568/**
1569 *      cpufreq_get_current_driver - return current driver's name
1570 *
1571 *      Return the name string of the currently loaded cpufreq driver
1572 *      or NULL, if none.
1573 */
1574const char *cpufreq_get_current_driver(void)
1575{
1576        if (cpufreq_driver)
1577                return cpufreq_driver->name;
1578
1579        return NULL;
1580}
1581EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1582
1583/*********************************************************************
1584 *                     NOTIFIER LISTS INTERFACE                      *
1585 *********************************************************************/
1586
1587/**
1588 *      cpufreq_register_notifier - register a driver with cpufreq
1589 *      @nb: notifier function to register
1590 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1591 *
1592 *      Add a driver to one of two lists: either a list of drivers that
1593 *      are notified about clock rate changes (once before and once after
1594 *      the transition), or a list of drivers that are notified about
1595 *      changes in cpufreq policy.
1596 *
1597 *      This function may sleep, and has the same return conditions as
1598 *      blocking_notifier_chain_register.
1599 */
1600int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1601{
1602        int ret;
1603
1604        if (cpufreq_disabled())
1605                return -EINVAL;
1606
1607        WARN_ON(!init_cpufreq_transition_notifier_list_called);
1608
1609        switch (list) {
1610        case CPUFREQ_TRANSITION_NOTIFIER:
1611                ret = srcu_notifier_chain_register(
1612                                &cpufreq_transition_notifier_list, nb);
1613                break;
1614        case CPUFREQ_POLICY_NOTIFIER:
1615                ret = blocking_notifier_chain_register(
1616                                &cpufreq_policy_notifier_list, nb);
1617                break;
1618        default:
1619                ret = -EINVAL;
1620        }
1621
1622        return ret;
1623}
1624EXPORT_SYMBOL(cpufreq_register_notifier);
1625
1626/**
1627 *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1628 *      @nb: notifier block to be unregistered
1629 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1630 *
1631 *      Remove a driver from the CPU frequency notifier list.
1632 *
1633 *      This function may sleep, and has the same return conditions as
1634 *      blocking_notifier_chain_unregister.
1635 */
1636int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1637{
1638        int ret;
1639
1640        if (cpufreq_disabled())
1641                return -EINVAL;
1642
1643        switch (list) {
1644        case CPUFREQ_TRANSITION_NOTIFIER:
1645                ret = srcu_notifier_chain_unregister(
1646                                &cpufreq_transition_notifier_list, nb);
1647                break;
1648        case CPUFREQ_POLICY_NOTIFIER:
1649                ret = blocking_notifier_chain_unregister(
1650                                &cpufreq_policy_notifier_list, nb);
1651                break;
1652        default:
1653                ret = -EINVAL;
1654        }
1655
1656        return ret;
1657}
1658EXPORT_SYMBOL(cpufreq_unregister_notifier);
1659
1660
1661/*********************************************************************
1662 *                              GOVERNORS                            *
1663 *********************************************************************/
1664
1665int __cpufreq_driver_target(struct cpufreq_policy *policy,
1666                            unsigned int target_freq,
1667                            unsigned int relation)
1668{
1669        int retval = -EINVAL;
1670        unsigned int old_target_freq = target_freq;
1671
1672        if (cpufreq_disabled())
1673                return -ENODEV;
1674
1675        /* Make sure that target_freq is within supported range */
1676        if (target_freq > policy->max)
1677                target_freq = policy->max;
1678        if (target_freq < policy->min)
1679                target_freq = policy->min;
1680
1681        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1682                        policy->cpu, target_freq, relation, old_target_freq);
1683
1684        if (target_freq == policy->cur)
1685                return 0;
1686
1687        if (cpufreq_driver->target)
1688                retval = cpufreq_driver->target(policy, target_freq, relation);
1689
1690        return retval;
1691}
1692EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1693
1694int cpufreq_driver_target(struct cpufreq_policy *policy,
1695                          unsigned int target_freq,
1696                          unsigned int relation)
1697{
1698        int ret = -EINVAL;
1699
1700        if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1701                goto fail;
1702
1703        ret = __cpufreq_driver_target(policy, target_freq, relation);
1704
1705        unlock_policy_rwsem_write(policy->cpu);
1706
1707fail:
1708        return ret;
1709}
1710EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1711
1712/*
1713 * when "event" is CPUFREQ_GOV_LIMITS
1714 */
1715
1716static int __cpufreq_governor(struct cpufreq_policy *policy,
1717                                        unsigned int event)
1718{
1719        int ret;
1720
1721        /* Only must be defined when default governor is known to have latency
1722           restrictions, like e.g. conservative or ondemand.
1723           That this is the case is already ensured in Kconfig
1724        */
1725#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1726        struct cpufreq_governor *gov = &cpufreq_gov_performance;
1727#else
1728        struct cpufreq_governor *gov = NULL;
1729#endif
1730
1731        if (policy->governor->max_transition_latency &&
1732            policy->cpuinfo.transition_latency >
1733            policy->governor->max_transition_latency) {
1734                if (!gov)
1735                        return -EINVAL;
1736                else {
1737                        printk(KERN_WARNING "%s governor failed, too long"
1738                               " transition latency of HW, fallback"
1739                               " to %s governor\n",
1740                               policy->governor->name,
1741                               gov->name);
1742                        policy->governor = gov;
1743                }
1744        }
1745
1746        if (event == CPUFREQ_GOV_POLICY_INIT)
1747                if (!try_module_get(policy->governor->owner))
1748                        return -EINVAL;
1749
1750        pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1751                                                policy->cpu, event);
1752
1753        mutex_lock(&cpufreq_governor_lock);
1754        if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1755            || (!policy->governor_enabled
1756            && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1757                mutex_unlock(&cpufreq_governor_lock);
1758                return -EBUSY;
1759        }
1760
1761        if (event == CPUFREQ_GOV_STOP)
1762                policy->governor_enabled = false;
1763        else if (event == CPUFREQ_GOV_START)
1764                policy->governor_enabled = true;
1765
1766        mutex_unlock(&cpufreq_governor_lock);
1767
1768        ret = policy->governor->governor(policy, event);
1769
1770        if (!ret) {
1771                if (event == CPUFREQ_GOV_POLICY_INIT)
1772                        policy->governor->initialized++;
1773                else if (event == CPUFREQ_GOV_POLICY_EXIT)
1774                        policy->governor->initialized--;
1775        } else {
1776                /* Restore original values */
1777                mutex_lock(&cpufreq_governor_lock);
1778                if (event == CPUFREQ_GOV_STOP)
1779                        policy->governor_enabled = true;
1780                else if (event == CPUFREQ_GOV_START)
1781                        policy->governor_enabled = false;
1782                mutex_unlock(&cpufreq_governor_lock);
1783        }
1784
1785        if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1786                        ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1787                module_put(policy->governor->owner);
1788
1789        return ret;
1790}
1791
1792int cpufreq_register_governor(struct cpufreq_governor *governor)
1793{
1794        int err;
1795
1796        if (!governor)
1797                return -EINVAL;
1798
1799        if (cpufreq_disabled())
1800                return -ENODEV;
1801
1802        mutex_lock(&cpufreq_governor_mutex);
1803
1804        governor->initialized = 0;
1805        err = -EBUSY;
1806        if (__find_governor(governor->name) == NULL) {
1807                err = 0;
1808                list_add(&governor->governor_list, &cpufreq_governor_list);
1809        }
1810
1811        mutex_unlock(&cpufreq_governor_mutex);
1812        return err;
1813}
1814EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1815
1816void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1817{
1818#ifdef CONFIG_HOTPLUG_CPU
1819        int cpu;
1820#endif
1821
1822        if (!governor)
1823                return;
1824
1825        if (cpufreq_disabled())
1826                return;
1827
1828#ifdef CONFIG_HOTPLUG_CPU
1829        for_each_present_cpu(cpu) {
1830                if (cpu_online(cpu))
1831                        continue;
1832                if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1833                        strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1834        }
1835#endif
1836
1837        mutex_lock(&cpufreq_governor_mutex);
1838        list_del(&governor->governor_list);
1839        mutex_unlock(&cpufreq_governor_mutex);
1840        return;
1841}
1842EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1843
1844
1845/*********************************************************************
1846 *                          POLICY INTERFACE                         *
1847 *********************************************************************/
1848
1849/**
1850 * cpufreq_get_policy - get the current cpufreq_policy
1851 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1852 *      is written
1853 *
1854 * Reads the current cpufreq policy.
1855 */
1856int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1857{
1858        struct cpufreq_policy *cpu_policy;
1859        if (!policy)
1860                return -EINVAL;
1861
1862        cpu_policy = cpufreq_cpu_get(cpu);
1863        if (!cpu_policy)
1864                return -EINVAL;
1865
1866        memcpy(policy, cpu_policy, sizeof(*policy));
1867
1868        cpufreq_cpu_put(cpu_policy);
1869        return 0;
1870}
1871EXPORT_SYMBOL(cpufreq_get_policy);
1872
1873/*
1874 * data   : current policy.
1875 * policy : policy to be set.
1876 */
1877static int __cpufreq_set_policy(struct cpufreq_policy *policy,
1878                                struct cpufreq_policy *new_policy)
1879{
1880        int ret = 0, failed = 1;
1881
1882        pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1883                new_policy->min, new_policy->max);
1884
1885        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1886
1887        if (new_policy->min > policy->max || new_policy->max < policy->min) {
1888                ret = -EINVAL;
1889                goto error_out;
1890        }
1891
1892        /* verify the cpu speed can be set within this limit */
1893        ret = cpufreq_driver->verify(new_policy);
1894        if (ret)
1895                goto error_out;
1896
1897        /* adjust if necessary - all reasons */
1898        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1899                        CPUFREQ_ADJUST, new_policy);
1900
1901        /* adjust if necessary - hardware incompatibility*/
1902        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1903                        CPUFREQ_INCOMPATIBLE, new_policy);
1904
1905        /*
1906         * verify the cpu speed can be set within this limit, which might be
1907         * different to the first one
1908         */
1909        ret = cpufreq_driver->verify(new_policy);
1910        if (ret)
1911                goto error_out;
1912
1913        /* notification of the new policy */
1914        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1915                        CPUFREQ_NOTIFY, new_policy);
1916
1917        policy->min = new_policy->min;
1918        policy->max = new_policy->max;
1919
1920        pr_debug("new min and max freqs are %u - %u kHz\n",
1921                                        policy->min, policy->max);
1922
1923        if (cpufreq_driver->setpolicy) {
1924                policy->policy = new_policy->policy;
1925                pr_debug("setting range\n");
1926                ret = cpufreq_driver->setpolicy(new_policy);
1927        } else {
1928                if (new_policy->governor != policy->governor) {
1929                        /* save old, working values */
1930                        struct cpufreq_governor *old_gov = policy->governor;
1931
1932                        pr_debug("governor switch\n");
1933
1934                        /* end old governor */
1935                        if (policy->governor) {
1936                                __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1937                                unlock_policy_rwsem_write(new_policy->cpu);
1938                                __cpufreq_governor(policy,
1939                                                CPUFREQ_GOV_POLICY_EXIT);
1940                                lock_policy_rwsem_write(new_policy->cpu);
1941                        }
1942
1943                        /* start new governor */
1944                        policy->governor = new_policy->governor;
1945                        if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1946                                if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1947                                        failed = 0;
1948                                } else {
1949                                        unlock_policy_rwsem_write(new_policy->cpu);
1950                                        __cpufreq_governor(policy,
1951                                                        CPUFREQ_GOV_POLICY_EXIT);
1952                                        lock_policy_rwsem_write(new_policy->cpu);
1953                                }
1954                        }
1955
1956                        if (failed) {
1957                                /* new governor failed, so re-start old one */
1958                                pr_debug("starting governor %s failed\n",
1959                                                        policy->governor->name);
1960                                if (old_gov) {
1961                                        policy->governor = old_gov;
1962                                        __cpufreq_governor(policy,
1963                                                        CPUFREQ_GOV_POLICY_INIT);
1964                                        __cpufreq_governor(policy,
1965                                                           CPUFREQ_GOV_START);
1966                                }
1967                                ret = -EINVAL;
1968                                goto error_out;
1969                        }
1970                        /* might be a policy change, too, so fall through */
1971                }
1972                pr_debug("governor: change or update limits\n");
1973                ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1974        }
1975
1976error_out:
1977        return ret;
1978}
1979
1980/**
1981 *      cpufreq_update_policy - re-evaluate an existing cpufreq policy
1982 *      @cpu: CPU which shall be re-evaluated
1983 *
1984 *      Useful for policy notifiers which have different necessities
1985 *      at different times.
1986 */
1987int cpufreq_update_policy(unsigned int cpu)
1988{
1989        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1990        struct cpufreq_policy new_policy;
1991        int ret;
1992
1993        if (!policy) {
1994                ret = -ENODEV;
1995                goto no_policy;
1996        }
1997
1998        if (unlikely(lock_policy_rwsem_write(cpu))) {
1999                ret = -EINVAL;
2000                goto fail;
2001        }
2002
2003        pr_debug("updating policy for CPU %u\n", cpu);
2004        memcpy(&new_policy, policy, sizeof(*policy));
2005        new_policy.min = policy->user_policy.min;
2006        new_policy.max = policy->user_policy.max;
2007        new_policy.policy = policy->user_policy.policy;
2008        new_policy.governor = policy->user_policy.governor;
2009
2010        /*
2011         * BIOS might change freq behind our back
2012         * -> ask driver for current freq and notify governors about a change
2013         */
2014        if (cpufreq_driver->get) {
2015                new_policy.cur = cpufreq_driver->get(cpu);
2016                if (!policy->cur) {
2017                        pr_debug("Driver did not initialize current freq");
2018                        policy->cur = new_policy.cur;
2019                } else {
2020                        if (policy->cur != new_policy.cur && cpufreq_driver->target)
2021                                cpufreq_out_of_sync(cpu, policy->cur,
2022                                                                new_policy.cur);
2023                }
2024        }
2025
2026        ret = __cpufreq_set_policy(policy, &new_policy);
2027
2028        unlock_policy_rwsem_write(cpu);
2029
2030fail:
2031        cpufreq_cpu_put(policy);
2032no_policy:
2033        return ret;
2034}
2035EXPORT_SYMBOL(cpufreq_update_policy);
2036
2037static int cpufreq_cpu_callback(struct notifier_block *nfb,
2038                                        unsigned long action, void *hcpu)
2039{
2040        unsigned int cpu = (unsigned long)hcpu;
2041        struct device *dev;
2042        bool frozen = false;
2043
2044        dev = get_cpu_device(cpu);
2045        if (dev) {
2046
2047                if (action & CPU_TASKS_FROZEN)
2048                        frozen = true;
2049
2050                switch (action & ~CPU_TASKS_FROZEN) {
2051                case CPU_ONLINE:
2052                        __cpufreq_add_dev(dev, NULL, frozen);
2053                        cpufreq_update_policy(cpu);
2054                        break;
2055
2056                case CPU_DOWN_PREPARE:
2057                        __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2058                        break;
2059
2060                case CPU_POST_DEAD:
2061                        __cpufreq_remove_dev_finish(dev, NULL, frozen);
2062                        break;
2063
2064                case CPU_DOWN_FAILED:
2065                        __cpufreq_add_dev(dev, NULL, frozen);
2066                        break;
2067                }
2068        }
2069        return NOTIFY_OK;
2070}
2071
2072static struct notifier_block __refdata cpufreq_cpu_notifier = {
2073        .notifier_call = cpufreq_cpu_callback,
2074};
2075
2076/*********************************************************************
2077 *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2078 *********************************************************************/
2079
2080/**
2081 * cpufreq_register_driver - register a CPU Frequency driver
2082 * @driver_data: A struct cpufreq_driver containing the values#
2083 * submitted by the CPU Frequency driver.
2084 *
2085 * Registers a CPU Frequency driver to this core code. This code
2086 * returns zero on success, -EBUSY when another driver got here first
2087 * (and isn't unregistered in the meantime).
2088 *
2089 */
2090int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2091{
2092        unsigned long flags;
2093        int ret;
2094
2095        if (cpufreq_disabled())
2096                return -ENODEV;
2097
2098        if (!driver_data || !driver_data->verify || !driver_data->init ||
2099            ((!driver_data->setpolicy) && (!driver_data->target)))
2100                return -EINVAL;
2101
2102        pr_debug("trying to register driver %s\n", driver_data->name);
2103
2104        if (driver_data->setpolicy)
2105                driver_data->flags |= CPUFREQ_CONST_LOOPS;
2106
2107        write_lock_irqsave(&cpufreq_driver_lock, flags);
2108        if (cpufreq_driver) {
2109                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2110                return -EEXIST;
2111        }
2112        cpufreq_driver = driver_data;
2113        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2114
2115        ret = subsys_interface_register(&cpufreq_interface);
2116        if (ret)
2117                goto err_null_driver;
2118
2119        if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2120                int i;
2121                ret = -ENODEV;
2122
2123                /* check for at least one working CPU */
2124                for (i = 0; i < nr_cpu_ids; i++)
2125                        if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2126                                ret = 0;
2127                                break;
2128                        }
2129
2130                /* if all ->init() calls failed, unregister */
2131                if (ret) {
2132                        pr_debug("no CPU initialized for driver %s\n",
2133                                                        driver_data->name);
2134                        goto err_if_unreg;
2135                }
2136        }
2137
2138        register_hotcpu_notifier(&cpufreq_cpu_notifier);
2139        pr_debug("driver %s up and running\n", driver_data->name);
2140
2141        return 0;
2142err_if_unreg:
2143        subsys_interface_unregister(&cpufreq_interface);
2144err_null_driver:
2145        write_lock_irqsave(&cpufreq_driver_lock, flags);
2146        cpufreq_driver = NULL;
2147        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2148        return ret;
2149}
2150EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2151
2152/**
2153 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2154 *
2155 * Unregister the current CPUFreq driver. Only call this if you have
2156 * the right to do so, i.e. if you have succeeded in initialising before!
2157 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2158 * currently not initialised.
2159 */
2160int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2161{
2162        unsigned long flags;
2163
2164        if (!cpufreq_driver || (driver != cpufreq_driver))
2165                return -EINVAL;
2166
2167        pr_debug("unregistering driver %s\n", driver->name);
2168
2169        subsys_interface_unregister(&cpufreq_interface);
2170        unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2171
2172        down_write(&cpufreq_rwsem);
2173        write_lock_irqsave(&cpufreq_driver_lock, flags);
2174
2175        cpufreq_driver = NULL;
2176
2177        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2178        up_write(&cpufreq_rwsem);
2179
2180        return 0;
2181}
2182EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2183
2184static int __init cpufreq_core_init(void)
2185{
2186        int cpu;
2187
2188        if (cpufreq_disabled())
2189                return -ENODEV;
2190
2191        for_each_possible_cpu(cpu)
2192                init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2193
2194        cpufreq_global_kobject = kobject_create();
2195        BUG_ON(!cpufreq_global_kobject);
2196        register_syscore_ops(&cpufreq_syscore_ops);
2197
2198        return 0;
2199}
2200core_initcall(cpufreq_core_init);
2201
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.