linux/drivers/cpufreq/cpufreq_conservative.c
<<
>>
Prefs
   1/*
   2 *  drivers/cpufreq/cpufreq_conservative.c
   3 *
   4 *  Copyright (C)  2001 Russell King
   5 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   6 *                      Jun Nakajima <jun.nakajima@intel.com>
   7 *            (C)  2009 Alexander Clouter <alex@digriz.org.uk>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/cpufreq.h>
  15#include <linux/init.h>
  16#include <linux/kernel.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/kobject.h>
  19#include <linux/module.h>
  20#include <linux/mutex.h>
  21#include <linux/notifier.h>
  22#include <linux/percpu-defs.h>
  23#include <linux/sysfs.h>
  24#include <linux/types.h>
  25
  26#include "cpufreq_governor.h"
  27
  28/* Conservative governor macros */
  29#define DEF_FREQUENCY_UP_THRESHOLD              (80)
  30#define DEF_FREQUENCY_DOWN_THRESHOLD            (20)
  31#define DEF_SAMPLING_DOWN_FACTOR                (1)
  32#define MAX_SAMPLING_DOWN_FACTOR                (10)
  33
  34static struct dbs_data cs_dbs_data;
  35static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
  36
  37static struct cs_dbs_tuners cs_tuners = {
  38        .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
  39        .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
  40        .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
  41        .ignore_nice = 0,
  42        .freq_step = 5,
  43};
  44
  45/*
  46 * Every sampling_rate, we check, if current idle time is less than 20%
  47 * (default), then we try to increase frequency Every sampling_rate *
  48 * sampling_down_factor, we check, if current idle time is more than 80%, then
  49 * we try to decrease frequency
  50 *
  51 * Any frequency increase takes it to the maximum frequency. Frequency reduction
  52 * happens at minimum steps of 5% (default) of maximum frequency
  53 */
  54static void cs_check_cpu(int cpu, unsigned int load)
  55{
  56        struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
  57        struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
  58        unsigned int freq_target;
  59
  60        /*
  61         * break out if we 'cannot' reduce the speed as the user might
  62         * want freq_step to be zero
  63         */
  64        if (cs_tuners.freq_step == 0)
  65                return;
  66
  67        /* Check for frequency increase */
  68        if (load > cs_tuners.up_threshold) {
  69                dbs_info->down_skip = 0;
  70
  71                /* if we are already at full speed then break out early */
  72                if (dbs_info->requested_freq == policy->max)
  73                        return;
  74
  75                freq_target = (cs_tuners.freq_step * policy->max) / 100;
  76
  77                /* max freq cannot be less than 100. But who knows.... */
  78                if (unlikely(freq_target == 0))
  79                        freq_target = 5;
  80
  81                dbs_info->requested_freq += freq_target;
  82                if (dbs_info->requested_freq > policy->max)
  83                        dbs_info->requested_freq = policy->max;
  84
  85                __cpufreq_driver_target(policy, dbs_info->requested_freq,
  86                        CPUFREQ_RELATION_H);
  87                return;
  88        }
  89
  90        /*
  91         * The optimal frequency is the frequency that is the lowest that can
  92         * support the current CPU usage without triggering the up policy. To be
  93         * safe, we focus 10 points under the threshold.
  94         */
  95        if (load < (cs_tuners.down_threshold - 10)) {
  96                freq_target = (cs_tuners.freq_step * policy->max) / 100;
  97
  98                dbs_info->requested_freq -= freq_target;
  99                if (dbs_info->requested_freq < policy->min)
 100                        dbs_info->requested_freq = policy->min;
 101
 102                /*
 103                 * if we cannot reduce the frequency anymore, break out early
 104                 */
 105                if (policy->cur == policy->min)
 106                        return;
 107
 108                __cpufreq_driver_target(policy, dbs_info->requested_freq,
 109                                CPUFREQ_RELATION_H);
 110                return;
 111        }
 112}
 113
 114static void cs_dbs_timer(struct work_struct *work)
 115{
 116        struct delayed_work *dw = to_delayed_work(work);
 117        struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
 118                        struct cs_cpu_dbs_info_s, cdbs.work.work);
 119        unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
 120        struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
 121                        cpu);
 122        int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
 123
 124        mutex_lock(&core_dbs_info->cdbs.timer_mutex);
 125        if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
 126                dbs_check_cpu(&cs_dbs_data, cpu);
 127
 128        schedule_delayed_work_on(smp_processor_id(), dw, delay);
 129        mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 130}
 131
 132static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 133                void *data)
 134{
 135        struct cpufreq_freqs *freq = data;
 136        struct cs_cpu_dbs_info_s *dbs_info =
 137                                        &per_cpu(cs_cpu_dbs_info, freq->cpu);
 138        struct cpufreq_policy *policy;
 139
 140        if (!dbs_info->enable)
 141                return 0;
 142
 143        policy = dbs_info->cdbs.cur_policy;
 144
 145        /*
 146         * we only care if our internally tracked freq moves outside the 'valid'
 147         * ranges of frequency available to us otherwise we do not change it
 148        */
 149        if (dbs_info->requested_freq > policy->max
 150                        || dbs_info->requested_freq < policy->min)
 151                dbs_info->requested_freq = freq->new;
 152
 153        return 0;
 154}
 155
 156/************************** sysfs interface ************************/
 157static ssize_t show_sampling_rate_min(struct kobject *kobj,
 158                                      struct attribute *attr, char *buf)
 159{
 160        return sprintf(buf, "%u\n", cs_dbs_data.min_sampling_rate);
 161}
 162
 163static ssize_t store_sampling_down_factor(struct kobject *a,
 164                                          struct attribute *b,
 165                                          const char *buf, size_t count)
 166{
 167        unsigned int input;
 168        int ret;
 169        ret = sscanf(buf, "%u", &input);
 170
 171        if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 172                return -EINVAL;
 173
 174        cs_tuners.sampling_down_factor = input;
 175        return count;
 176}
 177
 178static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
 179                                   const char *buf, size_t count)
 180{
 181        unsigned int input;
 182        int ret;
 183        ret = sscanf(buf, "%u", &input);
 184
 185        if (ret != 1)
 186                return -EINVAL;
 187
 188        cs_tuners.sampling_rate = max(input, cs_dbs_data.min_sampling_rate);
 189        return count;
 190}
 191
 192static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
 193                                  const char *buf, size_t count)
 194{
 195        unsigned int input;
 196        int ret;
 197        ret = sscanf(buf, "%u", &input);
 198
 199        if (ret != 1 || input > 100 || input <= cs_tuners.down_threshold)
 200                return -EINVAL;
 201
 202        cs_tuners.up_threshold = input;
 203        return count;
 204}
 205
 206static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
 207                                    const char *buf, size_t count)
 208{
 209        unsigned int input;
 210        int ret;
 211        ret = sscanf(buf, "%u", &input);
 212
 213        /* cannot be lower than 11 otherwise freq will not fall */
 214        if (ret != 1 || input < 11 || input > 100 ||
 215                        input >= cs_tuners.up_threshold)
 216                return -EINVAL;
 217
 218        cs_tuners.down_threshold = input;
 219        return count;
 220}
 221
 222static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
 223                                      const char *buf, size_t count)
 224{
 225        unsigned int input, j;
 226        int ret;
 227
 228        ret = sscanf(buf, "%u", &input);
 229        if (ret != 1)
 230                return -EINVAL;
 231
 232        if (input > 1)
 233                input = 1;
 234
 235        if (input == cs_tuners.ignore_nice) /* nothing to do */
 236                return count;
 237
 238        cs_tuners.ignore_nice = input;
 239
 240        /* we need to re-evaluate prev_cpu_idle */
 241        for_each_online_cpu(j) {
 242                struct cs_cpu_dbs_info_s *dbs_info;
 243                dbs_info = &per_cpu(cs_cpu_dbs_info, j);
 244                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 245                                                &dbs_info->cdbs.prev_cpu_wall);
 246                if (cs_tuners.ignore_nice)
 247                        dbs_info->cdbs.prev_cpu_nice =
 248                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 249        }
 250        return count;
 251}
 252
 253static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
 254                               const char *buf, size_t count)
 255{
 256        unsigned int input;
 257        int ret;
 258        ret = sscanf(buf, "%u", &input);
 259
 260        if (ret != 1)
 261                return -EINVAL;
 262
 263        if (input > 100)
 264                input = 100;
 265
 266        /*
 267         * no need to test here if freq_step is zero as the user might actually
 268         * want this, they would be crazy though :)
 269         */
 270        cs_tuners.freq_step = input;
 271        return count;
 272}
 273
 274show_one(cs, sampling_rate, sampling_rate);
 275show_one(cs, sampling_down_factor, sampling_down_factor);
 276show_one(cs, up_threshold, up_threshold);
 277show_one(cs, down_threshold, down_threshold);
 278show_one(cs, ignore_nice_load, ignore_nice);
 279show_one(cs, freq_step, freq_step);
 280
 281define_one_global_rw(sampling_rate);
 282define_one_global_rw(sampling_down_factor);
 283define_one_global_rw(up_threshold);
 284define_one_global_rw(down_threshold);
 285define_one_global_rw(ignore_nice_load);
 286define_one_global_rw(freq_step);
 287define_one_global_ro(sampling_rate_min);
 288
 289static struct attribute *dbs_attributes[] = {
 290        &sampling_rate_min.attr,
 291        &sampling_rate.attr,
 292        &sampling_down_factor.attr,
 293        &up_threshold.attr,
 294        &down_threshold.attr,
 295        &ignore_nice_load.attr,
 296        &freq_step.attr,
 297        NULL
 298};
 299
 300static struct attribute_group cs_attr_group = {
 301        .attrs = dbs_attributes,
 302        .name = "conservative",
 303};
 304
 305/************************** sysfs end ************************/
 306
 307define_get_cpu_dbs_routines(cs_cpu_dbs_info);
 308
 309static struct notifier_block cs_cpufreq_notifier_block = {
 310        .notifier_call = dbs_cpufreq_notifier,
 311};
 312
 313static struct cs_ops cs_ops = {
 314        .notifier_block = &cs_cpufreq_notifier_block,
 315};
 316
 317static struct dbs_data cs_dbs_data = {
 318        .governor = GOV_CONSERVATIVE,
 319        .attr_group = &cs_attr_group,
 320        .tuners = &cs_tuners,
 321        .get_cpu_cdbs = get_cpu_cdbs,
 322        .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
 323        .gov_dbs_timer = cs_dbs_timer,
 324        .gov_check_cpu = cs_check_cpu,
 325        .gov_ops = &cs_ops,
 326};
 327
 328static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
 329                                   unsigned int event)
 330{
 331        return cpufreq_governor_dbs(&cs_dbs_data, policy, event);
 332}
 333
 334#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
 335static
 336#endif
 337struct cpufreq_governor cpufreq_gov_conservative = {
 338        .name                   = "conservative",
 339        .governor               = cs_cpufreq_governor_dbs,
 340        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
 341        .owner                  = THIS_MODULE,
 342};
 343
 344static int __init cpufreq_gov_dbs_init(void)
 345{
 346        mutex_init(&cs_dbs_data.mutex);
 347        return cpufreq_register_governor(&cpufreq_gov_conservative);
 348}
 349
 350static void __exit cpufreq_gov_dbs_exit(void)
 351{
 352        cpufreq_unregister_governor(&cpufreq_gov_conservative);
 353}
 354
 355MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
 356MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
 357                "Low Latency Frequency Transition capable processors "
 358                "optimised for use in a battery environment");
 359MODULE_LICENSE("GPL");
 360
 361#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
 362fs_initcall(cpufreq_gov_dbs_init);
 363#else
 364module_init(cpufreq_gov_dbs_init);
 365#endif
 366module_exit(cpufreq_gov_dbs_exit);
 367
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.