linux/drivers/cpufreq/cpufreq_stats.c
<<
>>
Prefs
   1/*
   2 *  drivers/cpufreq/cpufreq_stats.c
   3 *
   4 *  Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   5 *  (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/slab.h>
  14#include <linux/cpu.h>
  15#include <linux/sysfs.h>
  16#include <linux/cpufreq.h>
  17#include <linux/module.h>
  18#include <linux/jiffies.h>
  19#include <linux/percpu.h>
  20#include <linux/kobject.h>
  21#include <linux/spinlock.h>
  22#include <linux/notifier.h>
  23#include <asm/cputime.h>
  24
  25static spinlock_t cpufreq_stats_lock;
  26
  27struct cpufreq_stats {
  28        unsigned int cpu;
  29        unsigned int total_trans;
  30        unsigned long long last_time;
  31        unsigned int max_state;
  32        unsigned int state_num;
  33        unsigned int last_index;
  34        u64 *time_in_state;
  35        unsigned int *freq_table;
  36#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  37        unsigned int *trans_table;
  38#endif
  39};
  40
  41static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
  42
  43struct cpufreq_stats_attribute {
  44        struct attribute attr;
  45        ssize_t(*show) (struct cpufreq_stats *, char *);
  46};
  47
  48static int cpufreq_stats_update(unsigned int cpu)
  49{
  50        struct cpufreq_stats *stat;
  51        unsigned long long cur_time;
  52
  53        cur_time = get_jiffies_64();
  54        spin_lock(&cpufreq_stats_lock);
  55        stat = per_cpu(cpufreq_stats_table, cpu);
  56        if (stat->time_in_state)
  57                stat->time_in_state[stat->last_index] +=
  58                        cur_time - stat->last_time;
  59        stat->last_time = cur_time;
  60        spin_unlock(&cpufreq_stats_lock);
  61        return 0;
  62}
  63
  64static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  65{
  66        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  67        if (!stat)
  68                return 0;
  69        return sprintf(buf, "%d\n",
  70                        per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
  71}
  72
  73static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  74{
  75        ssize_t len = 0;
  76        int i;
  77        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  78        if (!stat)
  79                return 0;
  80        cpufreq_stats_update(stat->cpu);
  81        for (i = 0; i < stat->state_num; i++) {
  82                len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
  83                        (unsigned long long)
  84                        cputime64_to_clock_t(stat->time_in_state[i]));
  85        }
  86        return len;
  87}
  88
  89#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  90static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  91{
  92        ssize_t len = 0;
  93        int i, j;
  94
  95        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  96        if (!stat)
  97                return 0;
  98        cpufreq_stats_update(stat->cpu);
  99        len += snprintf(buf + len, PAGE_SIZE - len, "   From  :    To\n");
 100        len += snprintf(buf + len, PAGE_SIZE - len, "         : ");
 101        for (i = 0; i < stat->state_num; i++) {
 102                if (len >= PAGE_SIZE)
 103                        break;
 104                len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
 105                                stat->freq_table[i]);
 106        }
 107        if (len >= PAGE_SIZE)
 108                return PAGE_SIZE;
 109
 110        len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 111
 112        for (i = 0; i < stat->state_num; i++) {
 113                if (len >= PAGE_SIZE)
 114                        break;
 115
 116                len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
 117                                stat->freq_table[i]);
 118
 119                for (j = 0; j < stat->state_num; j++) {
 120                        if (len >= PAGE_SIZE)
 121                                break;
 122                        len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
 123                                        stat->trans_table[i*stat->max_state+j]);
 124                }
 125                if (len >= PAGE_SIZE)
 126                        break;
 127                len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 128        }
 129        if (len >= PAGE_SIZE)
 130                return PAGE_SIZE;
 131        return len;
 132}
 133cpufreq_freq_attr_ro(trans_table);
 134#endif
 135
 136cpufreq_freq_attr_ro(total_trans);
 137cpufreq_freq_attr_ro(time_in_state);
 138
 139static struct attribute *default_attrs[] = {
 140        &total_trans.attr,
 141        &time_in_state.attr,
 142#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 143        &trans_table.attr,
 144#endif
 145        NULL
 146};
 147static struct attribute_group stats_attr_group = {
 148        .attrs = default_attrs,
 149        .name = "stats"
 150};
 151
 152static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
 153{
 154        int index;
 155        for (index = 0; index < stat->max_state; index++)
 156                if (stat->freq_table[index] == freq)
 157                        return index;
 158        return -1;
 159}
 160
 161/* should be called late in the CPU removal sequence so that the stats
 162 * memory is still available in case someone tries to use it.
 163 */
 164static void cpufreq_stats_free_table(unsigned int cpu)
 165{
 166        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
 167
 168        if (stat) {
 169                pr_debug("%s: Free stat table\n", __func__);
 170                kfree(stat->time_in_state);
 171                kfree(stat);
 172                per_cpu(cpufreq_stats_table, cpu) = NULL;
 173        }
 174}
 175
 176/* must be called early in the CPU removal sequence (before
 177 * cpufreq_remove_dev) so that policy is still valid.
 178 */
 179static void cpufreq_stats_free_sysfs(unsigned int cpu)
 180{
 181        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 182
 183        if (!policy)
 184                return;
 185
 186        if (!cpufreq_frequency_get_table(cpu))
 187                goto put_ref;
 188
 189        if (!policy_is_shared(policy)) {
 190                pr_debug("%s: Free sysfs stat\n", __func__);
 191                sysfs_remove_group(&policy->kobj, &stats_attr_group);
 192        }
 193
 194put_ref:
 195        cpufreq_cpu_put(policy);
 196}
 197
 198static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
 199                struct cpufreq_frequency_table *table)
 200{
 201        unsigned int i, j, count = 0, ret = 0;
 202        struct cpufreq_stats *stat;
 203        struct cpufreq_policy *data;
 204        unsigned int alloc_size;
 205        unsigned int cpu = policy->cpu;
 206        if (per_cpu(cpufreq_stats_table, cpu))
 207                return -EBUSY;
 208        stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
 209        if ((stat) == NULL)
 210                return -ENOMEM;
 211
 212        data = cpufreq_cpu_get(cpu);
 213        if (data == NULL) {
 214                ret = -EINVAL;
 215                goto error_get_fail;
 216        }
 217
 218        ret = sysfs_create_group(&data->kobj, &stats_attr_group);
 219        if (ret)
 220                goto error_out;
 221
 222        stat->cpu = cpu;
 223        per_cpu(cpufreq_stats_table, cpu) = stat;
 224
 225        for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
 226                unsigned int freq = table[i].frequency;
 227                if (freq == CPUFREQ_ENTRY_INVALID)
 228                        continue;
 229                count++;
 230        }
 231
 232        alloc_size = count * sizeof(int) + count * sizeof(u64);
 233
 234#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 235        alloc_size += count * count * sizeof(int);
 236#endif
 237        stat->max_state = count;
 238        stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
 239        if (!stat->time_in_state) {
 240                ret = -ENOMEM;
 241                goto error_out;
 242        }
 243        stat->freq_table = (unsigned int *)(stat->time_in_state + count);
 244
 245#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 246        stat->trans_table = stat->freq_table + count;
 247#endif
 248        j = 0;
 249        for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
 250                unsigned int freq = table[i].frequency;
 251                if (freq == CPUFREQ_ENTRY_INVALID)
 252                        continue;
 253                if (freq_table_get_index(stat, freq) == -1)
 254                        stat->freq_table[j++] = freq;
 255        }
 256        stat->state_num = j;
 257        spin_lock(&cpufreq_stats_lock);
 258        stat->last_time = get_jiffies_64();
 259        stat->last_index = freq_table_get_index(stat, policy->cur);
 260        spin_unlock(&cpufreq_stats_lock);
 261        cpufreq_cpu_put(data);
 262        return 0;
 263error_out:
 264        cpufreq_cpu_put(data);
 265error_get_fail:
 266        kfree(stat);
 267        per_cpu(cpufreq_stats_table, cpu) = NULL;
 268        return ret;
 269}
 270
 271static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
 272{
 273        struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
 274                        policy->last_cpu);
 275
 276        pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
 277                        policy->cpu, policy->last_cpu);
 278        per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
 279                        policy->last_cpu);
 280        per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
 281        stat->cpu = policy->cpu;
 282}
 283
 284static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
 285                unsigned long val, void *data)
 286{
 287        int ret;
 288        struct cpufreq_policy *policy = data;
 289        struct cpufreq_frequency_table *table;
 290        unsigned int cpu = policy->cpu;
 291
 292        if (val == CPUFREQ_UPDATE_POLICY_CPU) {
 293                cpufreq_stats_update_policy_cpu(policy);
 294                return 0;
 295        }
 296
 297        if (val != CPUFREQ_NOTIFY)
 298                return 0;
 299        table = cpufreq_frequency_get_table(cpu);
 300        if (!table)
 301                return 0;
 302        ret = cpufreq_stats_create_table(policy, table);
 303        if (ret)
 304                return ret;
 305        return 0;
 306}
 307
 308static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
 309                unsigned long val, void *data)
 310{
 311        struct cpufreq_freqs *freq = data;
 312        struct cpufreq_stats *stat;
 313        int old_index, new_index;
 314
 315        if (val != CPUFREQ_POSTCHANGE)
 316                return 0;
 317
 318        stat = per_cpu(cpufreq_stats_table, freq->cpu);
 319        if (!stat)
 320                return 0;
 321
 322        old_index = stat->last_index;
 323        new_index = freq_table_get_index(stat, freq->new);
 324
 325        /* We can't do stat->time_in_state[-1]= .. */
 326        if (old_index == -1 || new_index == -1)
 327                return 0;
 328
 329        cpufreq_stats_update(freq->cpu);
 330
 331        if (old_index == new_index)
 332                return 0;
 333
 334        spin_lock(&cpufreq_stats_lock);
 335        stat->last_index = new_index;
 336#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
 337        stat->trans_table[old_index * stat->max_state + new_index]++;
 338#endif
 339        stat->total_trans++;
 340        spin_unlock(&cpufreq_stats_lock);
 341        return 0;
 342}
 343
 344static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
 345                                               unsigned long action,
 346                                               void *hcpu)
 347{
 348        unsigned int cpu = (unsigned long)hcpu;
 349
 350        switch (action) {
 351        case CPU_ONLINE:
 352        case CPU_ONLINE_FROZEN:
 353                cpufreq_update_policy(cpu);
 354                break;
 355        case CPU_DOWN_PREPARE:
 356        case CPU_DOWN_PREPARE_FROZEN:
 357                cpufreq_stats_free_sysfs(cpu);
 358                break;
 359        case CPU_DEAD:
 360        case CPU_DEAD_FROZEN:
 361                cpufreq_stats_free_table(cpu);
 362                break;
 363        }
 364        return NOTIFY_OK;
 365}
 366
 367/* priority=1 so this will get called before cpufreq_remove_dev */
 368static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
 369        .notifier_call = cpufreq_stat_cpu_callback,
 370        .priority = 1,
 371};
 372
 373static struct notifier_block notifier_policy_block = {
 374        .notifier_call = cpufreq_stat_notifier_policy
 375};
 376
 377static struct notifier_block notifier_trans_block = {
 378        .notifier_call = cpufreq_stat_notifier_trans
 379};
 380
 381static int __init cpufreq_stats_init(void)
 382{
 383        int ret;
 384        unsigned int cpu;
 385
 386        spin_lock_init(&cpufreq_stats_lock);
 387        ret = cpufreq_register_notifier(&notifier_policy_block,
 388                                CPUFREQ_POLICY_NOTIFIER);
 389        if (ret)
 390                return ret;
 391
 392        register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
 393        for_each_online_cpu(cpu)
 394                cpufreq_update_policy(cpu);
 395
 396        ret = cpufreq_register_notifier(&notifier_trans_block,
 397                                CPUFREQ_TRANSITION_NOTIFIER);
 398        if (ret) {
 399                cpufreq_unregister_notifier(&notifier_policy_block,
 400                                CPUFREQ_POLICY_NOTIFIER);
 401                unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
 402                for_each_online_cpu(cpu)
 403                        cpufreq_stats_free_table(cpu);
 404                return ret;
 405        }
 406
 407        return 0;
 408}
 409static void __exit cpufreq_stats_exit(void)
 410{
 411        unsigned int cpu;
 412
 413        cpufreq_unregister_notifier(&notifier_policy_block,
 414                        CPUFREQ_POLICY_NOTIFIER);
 415        cpufreq_unregister_notifier(&notifier_trans_block,
 416                        CPUFREQ_TRANSITION_NOTIFIER);
 417        unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
 418        for_each_online_cpu(cpu) {
 419                cpufreq_stats_free_table(cpu);
 420                cpufreq_stats_free_sysfs(cpu);
 421        }
 422}
 423
 424MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
 425MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
 426                                "through sysfs filesystem");
 427MODULE_LICENSE("GPL");
 428
 429module_init(cpufreq_stats_init);
 430module_exit(cpufreq_stats_exit);
 431
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.