linux/drivers/cpufreq/arm_big_little.c
<<
>>
Prefs
   1/*
   2 * ARM big.LITTLE Platforms CPUFreq support
   3 *
   4 * Copyright (C) 2013 ARM Ltd.
   5 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
   6 *
   7 * Copyright (C) 2013 Linaro.
   8 * Viresh Kumar <viresh.kumar@linaro.org>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 *
  14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  15 * kind, whether express or implied; without even the implied warranty
  16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17 * GNU General Public License for more details.
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/clk.h>
  23#include <linux/cpu.h>
  24#include <linux/cpufreq.h>
  25#include <linux/cpumask.h>
  26#include <linux/export.h>
  27#include <linux/of_platform.h>
  28#include <linux/opp.h>
  29#include <linux/slab.h>
  30#include <linux/topology.h>
  31#include <linux/types.h>
  32
  33#include "arm_big_little.h"
  34
  35/* Currently we support only two clusters */
  36#define MAX_CLUSTERS    2
  37
  38static struct cpufreq_arm_bL_ops *arm_bL_ops;
  39static struct clk *clk[MAX_CLUSTERS];
  40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
  41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
  42
  43static unsigned int bL_cpufreq_get(unsigned int cpu)
  44{
  45        u32 cur_cluster = cpu_to_cluster(cpu);
  46
  47        return clk_get_rate(clk[cur_cluster]) / 1000;
  48}
  49
  50/* Validate policy frequency range */
  51static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
  52{
  53        u32 cur_cluster = cpu_to_cluster(policy->cpu);
  54
  55        return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
  56}
  57
  58/* Set clock frequency */
  59static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
  60                unsigned int target_freq, unsigned int relation)
  61{
  62        struct cpufreq_freqs freqs;
  63        u32 cpu = policy->cpu, freq_tab_idx, cur_cluster;
  64        int ret = 0;
  65
  66        cur_cluster = cpu_to_cluster(policy->cpu);
  67
  68        freqs.old = bL_cpufreq_get(policy->cpu);
  69
  70        /* Determine valid target frequency using freq_table */
  71        cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
  72                        target_freq, relation, &freq_tab_idx);
  73        freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
  74
  75        pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
  76                        __func__, cpu, cur_cluster, freqs.old, target_freq,
  77                        freqs.new);
  78
  79        if (freqs.old == freqs.new)
  80                return 0;
  81
  82        cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
  83
  84        ret = clk_set_rate(clk[cur_cluster], freqs.new * 1000);
  85        if (ret) {
  86                pr_err("clk_set_rate failed: %d\n", ret);
  87                return ret;
  88        }
  89
  90        policy->cur = freqs.new;
  91
  92        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
  93
  94        return ret;
  95}
  96
  97static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
  98{
  99        u32 cluster = cpu_to_cluster(cpu_dev->id);
 100
 101        if (!atomic_dec_return(&cluster_usage[cluster])) {
 102                clk_put(clk[cluster]);
 103                opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 104                dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
 105        }
 106}
 107
 108static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
 109{
 110        u32 cluster = cpu_to_cluster(cpu_dev->id);
 111        char name[14] = "cpu-cluster.";
 112        int ret;
 113
 114        if (atomic_inc_return(&cluster_usage[cluster]) != 1)
 115                return 0;
 116
 117        ret = arm_bL_ops->init_opp_table(cpu_dev);
 118        if (ret) {
 119                dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
 120                                __func__, cpu_dev->id, ret);
 121                goto atomic_dec;
 122        }
 123
 124        ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
 125        if (ret) {
 126                dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
 127                                __func__, cpu_dev->id, ret);
 128                goto atomic_dec;
 129        }
 130
 131        name[12] = cluster + '0';
 132        clk[cluster] = clk_get_sys(name, NULL);
 133        if (!IS_ERR(clk[cluster])) {
 134                dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
 135                                __func__, clk[cluster], freq_table[cluster],
 136                                cluster);
 137                return 0;
 138        }
 139
 140        dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
 141                        __func__, cpu_dev->id, cluster);
 142        ret = PTR_ERR(clk[cluster]);
 143        opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 144
 145atomic_dec:
 146        atomic_dec(&cluster_usage[cluster]);
 147        dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
 148                        cluster);
 149        return ret;
 150}
 151
 152/* Per-CPU initialization */
 153static int bL_cpufreq_init(struct cpufreq_policy *policy)
 154{
 155        u32 cur_cluster = cpu_to_cluster(policy->cpu);
 156        struct device *cpu_dev;
 157        int ret;
 158
 159        cpu_dev = get_cpu_device(policy->cpu);
 160        if (!cpu_dev) {
 161                pr_err("%s: failed to get cpu%d device\n", __func__,
 162                                policy->cpu);
 163                return -ENODEV;
 164        }
 165
 166        ret = get_cluster_clk_and_freq_table(cpu_dev);
 167        if (ret)
 168                return ret;
 169
 170        ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
 171        if (ret) {
 172                dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
 173                                policy->cpu, cur_cluster);
 174                put_cluster_clk_and_freq_table(cpu_dev);
 175                return ret;
 176        }
 177
 178        cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
 179
 180        if (arm_bL_ops->get_transition_latency)
 181                policy->cpuinfo.transition_latency =
 182                        arm_bL_ops->get_transition_latency(cpu_dev);
 183        else
 184                policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 185
 186        policy->cur = bL_cpufreq_get(policy->cpu);
 187
 188        cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 189
 190        dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
 191        return 0;
 192}
 193
 194static int bL_cpufreq_exit(struct cpufreq_policy *policy)
 195{
 196        struct device *cpu_dev;
 197
 198        cpu_dev = get_cpu_device(policy->cpu);
 199        if (!cpu_dev) {
 200                pr_err("%s: failed to get cpu%d device\n", __func__,
 201                                policy->cpu);
 202                return -ENODEV;
 203        }
 204
 205        put_cluster_clk_and_freq_table(cpu_dev);
 206        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 207
 208        return 0;
 209}
 210
 211/* Export freq_table to sysfs */
 212static struct freq_attr *bL_cpufreq_attr[] = {
 213        &cpufreq_freq_attr_scaling_available_freqs,
 214        NULL,
 215};
 216
 217static struct cpufreq_driver bL_cpufreq_driver = {
 218        .name                   = "arm-big-little",
 219        .flags                  = CPUFREQ_STICKY,
 220        .verify                 = bL_cpufreq_verify_policy,
 221        .target                 = bL_cpufreq_set_target,
 222        .get                    = bL_cpufreq_get,
 223        .init                   = bL_cpufreq_init,
 224        .exit                   = bL_cpufreq_exit,
 225        .have_governor_per_policy = true,
 226        .attr                   = bL_cpufreq_attr,
 227};
 228
 229int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
 230{
 231        int ret;
 232
 233        if (arm_bL_ops) {
 234                pr_debug("%s: Already registered: %s, exiting\n", __func__,
 235                                arm_bL_ops->name);
 236                return -EBUSY;
 237        }
 238
 239        if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
 240                pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
 241                return -ENODEV;
 242        }
 243
 244        arm_bL_ops = ops;
 245
 246        ret = cpufreq_register_driver(&bL_cpufreq_driver);
 247        if (ret) {
 248                pr_info("%s: Failed registering platform driver: %s, err: %d\n",
 249                                __func__, ops->name, ret);
 250                arm_bL_ops = NULL;
 251        } else {
 252                pr_info("%s: Registered platform driver: %s\n", __func__,
 253                                ops->name);
 254        }
 255
 256        return ret;
 257}
 258EXPORT_SYMBOL_GPL(bL_cpufreq_register);
 259
 260void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
 261{
 262        if (arm_bL_ops != ops) {
 263                pr_err("%s: Registered with: %s, can't unregister, exiting\n",
 264                                __func__, arm_bL_ops->name);
 265                return;
 266        }
 267
 268        cpufreq_unregister_driver(&bL_cpufreq_driver);
 269        pr_info("%s: Un-registered platform driver: %s\n", __func__,
 270                        arm_bL_ops->name);
 271        arm_bL_ops = NULL;
 272}
 273EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
 274
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.