linux/drivers/cpufreq/cpufreq_ondemand.c
<<
>>
Prefs
   1/*
   2 *  drivers/cpufreq/cpufreq_ondemand.c
   3 *
   4 *  Copyright (C)  2001 Russell King
   5 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   6 *                      Jun Nakajima <jun.nakajima@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/cpufreq.h>
  16#include <linux/init.h>
  17#include <linux/kernel.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/kobject.h>
  20#include <linux/module.h>
  21#include <linux/mutex.h>
  22#include <linux/percpu-defs.h>
  23#include <linux/slab.h>
  24#include <linux/sysfs.h>
  25#include <linux/tick.h>
  26#include <linux/types.h>
  27#include <linux/cpu.h>
  28
  29#include "cpufreq_governor.h"
  30
  31/* On-demand governor macros */
  32#define DEF_FREQUENCY_DOWN_DIFFERENTIAL         (10)
  33#define DEF_FREQUENCY_UP_THRESHOLD              (80)
  34#define DEF_SAMPLING_DOWN_FACTOR                (1)
  35#define MAX_SAMPLING_DOWN_FACTOR                (100000)
  36#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL       (3)
  37#define MICRO_FREQUENCY_UP_THRESHOLD            (95)
  38#define MICRO_FREQUENCY_MIN_SAMPLE_RATE         (10000)
  39#define MIN_FREQUENCY_UP_THRESHOLD              (11)
  40#define MAX_FREQUENCY_UP_THRESHOLD              (100)
  41
  42static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
  43
  44static struct od_ops od_ops;
  45
  46#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  47static struct cpufreq_governor cpufreq_gov_ondemand;
  48#endif
  49
  50static unsigned int default_powersave_bias;
  51
  52static void ondemand_powersave_bias_init_cpu(int cpu)
  53{
  54        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  55
  56        dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  57        dbs_info->freq_lo = 0;
  58}
  59
  60/*
  61 * Not all CPUs want IO time to be accounted as busy; this depends on how
  62 * efficient idling at a higher frequency/voltage is.
  63 * Pavel Machek says this is not so for various generations of AMD and old
  64 * Intel systems.
  65 * Mike Chan (android.com) claims this is also not true for ARM.
  66 * Because of this, whitelist specific known (series) of CPUs by default, and
  67 * leave all others up to the user.
  68 */
  69static int should_io_be_busy(void)
  70{
  71#if defined(CONFIG_X86)
  72        /*
  73         * For Intel, Core 2 (model 15) and later have an efficient idle.
  74         */
  75        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  76                        boot_cpu_data.x86 == 6 &&
  77                        boot_cpu_data.x86_model >= 15)
  78                return 1;
  79#endif
  80        return 0;
  81}
  82
  83/*
  84 * Find right freq to be set now with powersave_bias on.
  85 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  86 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  87 */
  88static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
  89                unsigned int freq_next, unsigned int relation)
  90{
  91        unsigned int freq_req, freq_reduc, freq_avg;
  92        unsigned int freq_hi, freq_lo;
  93        unsigned int index = 0;
  94        unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  95        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  96                                                   policy->cpu);
  97        struct dbs_data *dbs_data = policy->governor_data;
  98        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  99
 100        if (!dbs_info->freq_table) {
 101                dbs_info->freq_lo = 0;
 102                dbs_info->freq_lo_jiffies = 0;
 103                return freq_next;
 104        }
 105
 106        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
 107                        relation, &index);
 108        freq_req = dbs_info->freq_table[index].frequency;
 109        freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 110        freq_avg = freq_req - freq_reduc;
 111
 112        /* Find freq bounds for freq_avg in freq_table */
 113        index = 0;
 114        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
 115                        CPUFREQ_RELATION_H, &index);
 116        freq_lo = dbs_info->freq_table[index].frequency;
 117        index = 0;
 118        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
 119                        CPUFREQ_RELATION_L, &index);
 120        freq_hi = dbs_info->freq_table[index].frequency;
 121
 122        /* Find out how long we have to be in hi and lo freqs */
 123        if (freq_hi == freq_lo) {
 124                dbs_info->freq_lo = 0;
 125                dbs_info->freq_lo_jiffies = 0;
 126                return freq_lo;
 127        }
 128        jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
 129        jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
 130        jiffies_hi += ((freq_hi - freq_lo) / 2);
 131        jiffies_hi /= (freq_hi - freq_lo);
 132        jiffies_lo = jiffies_total - jiffies_hi;
 133        dbs_info->freq_lo = freq_lo;
 134        dbs_info->freq_lo_jiffies = jiffies_lo;
 135        dbs_info->freq_hi_jiffies = jiffies_hi;
 136        return freq_hi;
 137}
 138
 139static void ondemand_powersave_bias_init(void)
 140{
 141        int i;
 142        for_each_online_cpu(i) {
 143                ondemand_powersave_bias_init_cpu(i);
 144        }
 145}
 146
 147static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
 148{
 149        struct dbs_data *dbs_data = p->governor_data;
 150        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 151
 152        if (od_tuners->powersave_bias)
 153                freq = od_ops.powersave_bias_target(p, freq,
 154                                CPUFREQ_RELATION_H);
 155        else if (p->cur == p->max)
 156                return;
 157
 158        __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
 159                        CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
 160}
 161
 162/*
 163 * Every sampling_rate, we check, if current idle time is less than 20%
 164 * (default), then we try to increase frequency. Every sampling_rate, we look
 165 * for the lowest frequency which can sustain the load while keeping idle time
 166 * over 30%. If such a frequency exist, we try to decrease to this frequency.
 167 *
 168 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 169 * happens at minimum steps of 5% (default) of current frequency
 170 */
 171static void od_check_cpu(int cpu, unsigned int load_freq)
 172{
 173        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 174        struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
 175        struct dbs_data *dbs_data = policy->governor_data;
 176        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 177
 178        dbs_info->freq_lo = 0;
 179
 180        /* Check for frequency increase */
 181        if (load_freq > od_tuners->up_threshold * policy->cur) {
 182                /* If switching to max speed, apply sampling_down_factor */
 183                if (policy->cur < policy->max)
 184                        dbs_info->rate_mult =
 185                                od_tuners->sampling_down_factor;
 186                dbs_freq_increase(policy, policy->max);
 187                return;
 188        }
 189
 190        /* Check for frequency decrease */
 191        /* if we cannot reduce the frequency anymore, break out early */
 192        if (policy->cur == policy->min)
 193                return;
 194
 195        /*
 196         * The optimal frequency is the frequency that is the lowest that can
 197         * support the current CPU usage without triggering the up policy. To be
 198         * safe, we focus 10 points under the threshold.
 199         */
 200        if (load_freq < od_tuners->adj_up_threshold
 201                        * policy->cur) {
 202                unsigned int freq_next;
 203                freq_next = load_freq / od_tuners->adj_up_threshold;
 204
 205                /* No longer fully busy, reset rate_mult */
 206                dbs_info->rate_mult = 1;
 207
 208                if (freq_next < policy->min)
 209                        freq_next = policy->min;
 210
 211                if (!od_tuners->powersave_bias) {
 212                        __cpufreq_driver_target(policy, freq_next,
 213                                        CPUFREQ_RELATION_L);
 214                        return;
 215                }
 216
 217                freq_next = od_ops.powersave_bias_target(policy, freq_next,
 218                                        CPUFREQ_RELATION_L);
 219                __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
 220        }
 221}
 222
 223static void od_dbs_timer(struct work_struct *work)
 224{
 225        struct od_cpu_dbs_info_s *dbs_info =
 226                container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
 227        unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
 228        struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
 229                        cpu);
 230        struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
 231        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 232        int delay = 0, sample_type = core_dbs_info->sample_type;
 233        bool modify_all = true;
 234
 235        mutex_lock(&core_dbs_info->cdbs.timer_mutex);
 236        if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
 237                modify_all = false;
 238                goto max_delay;
 239        }
 240
 241        /* Common NORMAL_SAMPLE setup */
 242        core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 243        if (sample_type == OD_SUB_SAMPLE) {
 244                delay = core_dbs_info->freq_lo_jiffies;
 245                __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
 246                                core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
 247        } else {
 248                dbs_check_cpu(dbs_data, cpu);
 249                if (core_dbs_info->freq_lo) {
 250                        /* Setup timer for SUB_SAMPLE */
 251                        core_dbs_info->sample_type = OD_SUB_SAMPLE;
 252                        delay = core_dbs_info->freq_hi_jiffies;
 253                }
 254        }
 255
 256max_delay:
 257        if (!delay)
 258                delay = delay_for_sampling_rate(od_tuners->sampling_rate
 259                                * core_dbs_info->rate_mult);
 260
 261        gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
 262        mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 263}
 264
 265/************************** sysfs interface ************************/
 266static struct common_dbs_data od_dbs_cdata;
 267
 268/**
 269 * update_sampling_rate - update sampling rate effective immediately if needed.
 270 * @new_rate: new sampling rate
 271 *
 272 * If new rate is smaller than the old, simply updating
 273 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 274 * original sampling_rate was 1 second and the requested new sampling rate is 10
 275 * ms because the user needs immediate reaction from ondemand governor, but not
 276 * sure if higher frequency will be required or not, then, the governor may
 277 * change the sampling rate too late; up to 1 second later. Thus, if we are
 278 * reducing the sampling rate, we need to make the new value effective
 279 * immediately.
 280 */
 281static void update_sampling_rate(struct dbs_data *dbs_data,
 282                unsigned int new_rate)
 283{
 284        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 285        int cpu;
 286
 287        od_tuners->sampling_rate = new_rate = max(new_rate,
 288                        dbs_data->min_sampling_rate);
 289
 290        for_each_online_cpu(cpu) {
 291                struct cpufreq_policy *policy;
 292                struct od_cpu_dbs_info_s *dbs_info;
 293                unsigned long next_sampling, appointed_at;
 294
 295                policy = cpufreq_cpu_get(cpu);
 296                if (!policy)
 297                        continue;
 298                if (policy->governor != &cpufreq_gov_ondemand) {
 299                        cpufreq_cpu_put(policy);
 300                        continue;
 301                }
 302                dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 303                cpufreq_cpu_put(policy);
 304
 305                mutex_lock(&dbs_info->cdbs.timer_mutex);
 306
 307                if (!delayed_work_pending(&dbs_info->cdbs.work)) {
 308                        mutex_unlock(&dbs_info->cdbs.timer_mutex);
 309                        continue;
 310                }
 311
 312                next_sampling = jiffies + usecs_to_jiffies(new_rate);
 313                appointed_at = dbs_info->cdbs.work.timer.expires;
 314
 315                if (time_before(next_sampling, appointed_at)) {
 316
 317                        mutex_unlock(&dbs_info->cdbs.timer_mutex);
 318                        cancel_delayed_work_sync(&dbs_info->cdbs.work);
 319                        mutex_lock(&dbs_info->cdbs.timer_mutex);
 320
 321                        gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
 322                                        usecs_to_jiffies(new_rate), true);
 323
 324                }
 325                mutex_unlock(&dbs_info->cdbs.timer_mutex);
 326        }
 327}
 328
 329static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
 330                size_t count)
 331{
 332        unsigned int input;
 333        int ret;
 334        ret = sscanf(buf, "%u", &input);
 335        if (ret != 1)
 336                return -EINVAL;
 337
 338        update_sampling_rate(dbs_data, input);
 339        return count;
 340}
 341
 342static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
 343                size_t count)
 344{
 345        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 346        unsigned int input;
 347        int ret;
 348        unsigned int j;
 349
 350        ret = sscanf(buf, "%u", &input);
 351        if (ret != 1)
 352                return -EINVAL;
 353        od_tuners->io_is_busy = !!input;
 354
 355        /* we need to re-evaluate prev_cpu_idle */
 356        for_each_online_cpu(j) {
 357                struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 358                                                                        j);
 359                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 360                        &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
 361        }
 362        return count;
 363}
 364
 365static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
 366                size_t count)
 367{
 368        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 369        unsigned int input;
 370        int ret;
 371        ret = sscanf(buf, "%u", &input);
 372
 373        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
 374                        input < MIN_FREQUENCY_UP_THRESHOLD) {
 375                return -EINVAL;
 376        }
 377        /* Calculate the new adj_up_threshold */
 378        od_tuners->adj_up_threshold += input;
 379        od_tuners->adj_up_threshold -= od_tuners->up_threshold;
 380
 381        od_tuners->up_threshold = input;
 382        return count;
 383}
 384
 385static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
 386                const char *buf, size_t count)
 387{
 388        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 389        unsigned int input, j;
 390        int ret;
 391        ret = sscanf(buf, "%u", &input);
 392
 393        if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 394                return -EINVAL;
 395        od_tuners->sampling_down_factor = input;
 396
 397        /* Reset down sampling multiplier in case it was active */
 398        for_each_online_cpu(j) {
 399                struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 400                                j);
 401                dbs_info->rate_mult = 1;
 402        }
 403        return count;
 404}
 405
 406static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
 407                const char *buf, size_t count)
 408{
 409        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 410        unsigned int input;
 411        int ret;
 412
 413        unsigned int j;
 414
 415        ret = sscanf(buf, "%u", &input);
 416        if (ret != 1)
 417                return -EINVAL;
 418
 419        if (input > 1)
 420                input = 1;
 421
 422        if (input == od_tuners->ignore_nice_load) { /* nothing to do */
 423                return count;
 424        }
 425        od_tuners->ignore_nice_load = input;
 426
 427        /* we need to re-evaluate prev_cpu_idle */
 428        for_each_online_cpu(j) {
 429                struct od_cpu_dbs_info_s *dbs_info;
 430                dbs_info = &per_cpu(od_cpu_dbs_info, j);
 431                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 432                        &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
 433                if (od_tuners->ignore_nice_load)
 434                        dbs_info->cdbs.prev_cpu_nice =
 435                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 436
 437        }
 438        return count;
 439}
 440
 441static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
 442                size_t count)
 443{
 444        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 445        unsigned int input;
 446        int ret;
 447        ret = sscanf(buf, "%u", &input);
 448
 449        if (ret != 1)
 450                return -EINVAL;
 451
 452        if (input > 1000)
 453                input = 1000;
 454
 455        od_tuners->powersave_bias = input;
 456        ondemand_powersave_bias_init();
 457        return count;
 458}
 459
 460show_store_one(od, sampling_rate);
 461show_store_one(od, io_is_busy);
 462show_store_one(od, up_threshold);
 463show_store_one(od, sampling_down_factor);
 464show_store_one(od, ignore_nice_load);
 465show_store_one(od, powersave_bias);
 466declare_show_sampling_rate_min(od);
 467
 468gov_sys_pol_attr_rw(sampling_rate);
 469gov_sys_pol_attr_rw(io_is_busy);
 470gov_sys_pol_attr_rw(up_threshold);
 471gov_sys_pol_attr_rw(sampling_down_factor);
 472gov_sys_pol_attr_rw(ignore_nice_load);
 473gov_sys_pol_attr_rw(powersave_bias);
 474gov_sys_pol_attr_ro(sampling_rate_min);
 475
 476static struct attribute *dbs_attributes_gov_sys[] = {
 477        &sampling_rate_min_gov_sys.attr,
 478        &sampling_rate_gov_sys.attr,
 479        &up_threshold_gov_sys.attr,
 480        &sampling_down_factor_gov_sys.attr,
 481        &ignore_nice_load_gov_sys.attr,
 482        &powersave_bias_gov_sys.attr,
 483        &io_is_busy_gov_sys.attr,
 484        NULL
 485};
 486
 487static struct attribute_group od_attr_group_gov_sys = {
 488        .attrs = dbs_attributes_gov_sys,
 489        .name = "ondemand",
 490};
 491
 492static struct attribute *dbs_attributes_gov_pol[] = {
 493        &sampling_rate_min_gov_pol.attr,
 494        &sampling_rate_gov_pol.attr,
 495        &up_threshold_gov_pol.attr,
 496        &sampling_down_factor_gov_pol.attr,
 497        &ignore_nice_load_gov_pol.attr,
 498        &powersave_bias_gov_pol.attr,
 499        &io_is_busy_gov_pol.attr,
 500        NULL
 501};
 502
 503static struct attribute_group od_attr_group_gov_pol = {
 504        .attrs = dbs_attributes_gov_pol,
 505        .name = "ondemand",
 506};
 507
 508/************************** sysfs end ************************/
 509
 510static int od_init(struct dbs_data *dbs_data)
 511{
 512        struct od_dbs_tuners *tuners;
 513        u64 idle_time;
 514        int cpu;
 515
 516        tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
 517        if (!tuners) {
 518                pr_err("%s: kzalloc failed\n", __func__);
 519                return -ENOMEM;
 520        }
 521
 522        cpu = get_cpu();
 523        idle_time = get_cpu_idle_time_us(cpu, NULL);
 524        put_cpu();
 525        if (idle_time != -1ULL) {
 526                /* Idle micro accounting is supported. Use finer thresholds */
 527                tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
 528                tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
 529                        MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
 530                /*
 531                 * In nohz/micro accounting case we set the minimum frequency
 532                 * not depending on HZ, but fixed (very low). The deferred
 533                 * timer might skip some samples if idle/sleeping as needed.
 534                */
 535                dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
 536        } else {
 537                tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
 538                tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
 539                        DEF_FREQUENCY_DOWN_DIFFERENTIAL;
 540
 541                /* For correct statistics, we need 10 ticks for each measure */
 542                dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
 543                        jiffies_to_usecs(10);
 544        }
 545
 546        tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
 547        tuners->ignore_nice_load = 0;
 548        tuners->powersave_bias = default_powersave_bias;
 549        tuners->io_is_busy = should_io_be_busy();
 550
 551        dbs_data->tuners = tuners;
 552        mutex_init(&dbs_data->mutex);
 553        return 0;
 554}
 555
 556static void od_exit(struct dbs_data *dbs_data)
 557{
 558        kfree(dbs_data->tuners);
 559}
 560
 561define_get_cpu_dbs_routines(od_cpu_dbs_info);
 562
 563static struct od_ops od_ops = {
 564        .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
 565        .powersave_bias_target = generic_powersave_bias_target,
 566        .freq_increase = dbs_freq_increase,
 567};
 568
 569static struct common_dbs_data od_dbs_cdata = {
 570        .governor = GOV_ONDEMAND,
 571        .attr_group_gov_sys = &od_attr_group_gov_sys,
 572        .attr_group_gov_pol = &od_attr_group_gov_pol,
 573        .get_cpu_cdbs = get_cpu_cdbs,
 574        .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
 575        .gov_dbs_timer = od_dbs_timer,
 576        .gov_check_cpu = od_check_cpu,
 577        .gov_ops = &od_ops,
 578        .init = od_init,
 579        .exit = od_exit,
 580};
 581
 582static void od_set_powersave_bias(unsigned int powersave_bias)
 583{
 584        struct cpufreq_policy *policy;
 585        struct dbs_data *dbs_data;
 586        struct od_dbs_tuners *od_tuners;
 587        unsigned int cpu;
 588        cpumask_t done;
 589
 590        default_powersave_bias = powersave_bias;
 591        cpumask_clear(&done);
 592
 593        get_online_cpus();
 594        for_each_online_cpu(cpu) {
 595                if (cpumask_test_cpu(cpu, &done))
 596                        continue;
 597
 598                policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
 599                if (!policy)
 600                        continue;
 601
 602                cpumask_or(&done, &done, policy->cpus);
 603
 604                if (policy->governor != &cpufreq_gov_ondemand)
 605                        continue;
 606
 607                dbs_data = policy->governor_data;
 608                od_tuners = dbs_data->tuners;
 609                od_tuners->powersave_bias = default_powersave_bias;
 610        }
 611        put_online_cpus();
 612}
 613
 614void od_register_powersave_bias_handler(unsigned int (*f)
 615                (struct cpufreq_policy *, unsigned int, unsigned int),
 616                unsigned int powersave_bias)
 617{
 618        od_ops.powersave_bias_target = f;
 619        od_set_powersave_bias(powersave_bias);
 620}
 621EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
 622
 623void od_unregister_powersave_bias_handler(void)
 624{
 625        od_ops.powersave_bias_target = generic_powersave_bias_target;
 626        od_set_powersave_bias(0);
 627}
 628EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 629
 630static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
 631                unsigned int event)
 632{
 633        return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
 634}
 635
 636#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 637static
 638#endif
 639struct cpufreq_governor cpufreq_gov_ondemand = {
 640        .name                   = "ondemand",
 641        .governor               = od_cpufreq_governor_dbs,
 642        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
 643        .owner                  = THIS_MODULE,
 644};
 645
 646static int __init cpufreq_gov_dbs_init(void)
 647{
 648        return cpufreq_register_governor(&cpufreq_gov_ondemand);
 649}
 650
 651static void __exit cpufreq_gov_dbs_exit(void)
 652{
 653        cpufreq_unregister_governor(&cpufreq_gov_ondemand);
 654}
 655
 656MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
 657MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
 658MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
 659        "Low Latency Frequency Transition capable processors");
 660MODULE_LICENSE("GPL");
 661
 662#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 663fs_initcall(cpufreq_gov_dbs_init);
 664#else
 665module_init(cpufreq_gov_dbs_init);
 666#endif
 667module_exit(cpufreq_gov_dbs_exit);
 668
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.