linux/drivers/devfreq/devfreq.c
<<
>>
Prefs
   1/*
   2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
   3 *          for Non-CPU Devices.
   4 *
   5 * Copyright (C) 2011 Samsung Electronics
   6 *      MyungJoo Ham <myungjoo.ham@samsung.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/errno.h>
  16#include <linux/err.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/stat.h>
  21#include <linux/opp.h>
  22#include <linux/devfreq.h>
  23#include <linux/workqueue.h>
  24#include <linux/platform_device.h>
  25#include <linux/list.h>
  26#include <linux/printk.h>
  27#include <linux/hrtimer.h>
  28#include "governor.h"
  29
  30static struct class *devfreq_class;
  31
  32/*
  33 * devfreq core provides delayed work based load monitoring helper
  34 * functions. Governors can use these or can implement their own
  35 * monitoring mechanism.
  36 */
  37static struct workqueue_struct *devfreq_wq;
  38
  39/* The list of all device-devfreq governors */
  40static LIST_HEAD(devfreq_governor_list);
  41/* The list of all device-devfreq */
  42static LIST_HEAD(devfreq_list);
  43static DEFINE_MUTEX(devfreq_list_lock);
  44
  45/**
  46 * find_device_devfreq() - find devfreq struct using device pointer
  47 * @dev:        device pointer used to lookup device devfreq.
  48 *
  49 * Search the list of device devfreqs and return the matched device's
  50 * devfreq info. devfreq_list_lock should be held by the caller.
  51 */
  52static struct devfreq *find_device_devfreq(struct device *dev)
  53{
  54        struct devfreq *tmp_devfreq;
  55
  56        if (unlikely(IS_ERR_OR_NULL(dev))) {
  57                pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
  58                return ERR_PTR(-EINVAL);
  59        }
  60        WARN(!mutex_is_locked(&devfreq_list_lock),
  61             "devfreq_list_lock must be locked.");
  62
  63        list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
  64                if (tmp_devfreq->dev.parent == dev)
  65                        return tmp_devfreq;
  66        }
  67
  68        return ERR_PTR(-ENODEV);
  69}
  70
  71/**
  72 * devfreq_get_freq_level() - Lookup freq_table for the frequency
  73 * @devfreq:    the devfreq instance
  74 * @freq:       the target frequency
  75 */
  76static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
  77{
  78        int lev;
  79
  80        for (lev = 0; lev < devfreq->profile->max_state; lev++)
  81                if (freq == devfreq->profile->freq_table[lev])
  82                        return lev;
  83
  84        return -EINVAL;
  85}
  86
  87/**
  88 * devfreq_update_status() - Update statistics of devfreq behavior
  89 * @devfreq:    the devfreq instance
  90 * @freq:       the update target frequency
  91 */
  92static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
  93{
  94        int lev, prev_lev;
  95        unsigned long cur_time;
  96
  97        lev = devfreq_get_freq_level(devfreq, freq);
  98        if (lev < 0)
  99                return lev;
 100
 101        cur_time = jiffies;
 102        devfreq->time_in_state[lev] +=
 103                         cur_time - devfreq->last_stat_updated;
 104        if (freq != devfreq->previous_freq) {
 105                prev_lev = devfreq_get_freq_level(devfreq,
 106                                                devfreq->previous_freq);
 107                devfreq->trans_table[(prev_lev *
 108                                devfreq->profile->max_state) + lev]++;
 109                devfreq->total_trans++;
 110        }
 111        devfreq->last_stat_updated = cur_time;
 112
 113        return 0;
 114}
 115
 116/**
 117 * find_devfreq_governor() - find devfreq governor from name
 118 * @name:       name of the governor
 119 *
 120 * Search the list of devfreq governors and return the matched
 121 * governor's pointer. devfreq_list_lock should be held by the caller.
 122 */
 123static struct devfreq_governor *find_devfreq_governor(const char *name)
 124{
 125        struct devfreq_governor *tmp_governor;
 126
 127        if (unlikely(IS_ERR_OR_NULL(name))) {
 128                pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
 129                return ERR_PTR(-EINVAL);
 130        }
 131        WARN(!mutex_is_locked(&devfreq_list_lock),
 132             "devfreq_list_lock must be locked.");
 133
 134        list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
 135                if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
 136                        return tmp_governor;
 137        }
 138
 139        return ERR_PTR(-ENODEV);
 140}
 141
 142/* Load monitoring helper functions for governors use */
 143
 144/**
 145 * update_devfreq() - Reevaluate the device and configure frequency.
 146 * @devfreq:    the devfreq instance.
 147 *
 148 * Note: Lock devfreq->lock before calling update_devfreq
 149 *       This function is exported for governors.
 150 */
 151int update_devfreq(struct devfreq *devfreq)
 152{
 153        unsigned long freq;
 154        int err = 0;
 155        u32 flags = 0;
 156
 157        if (!mutex_is_locked(&devfreq->lock)) {
 158                WARN(true, "devfreq->lock must be locked by the caller.\n");
 159                return -EINVAL;
 160        }
 161
 162        if (!devfreq->governor)
 163                return -EINVAL;
 164
 165        /* Reevaluate the proper frequency */
 166        err = devfreq->governor->get_target_freq(devfreq, &freq);
 167        if (err)
 168                return err;
 169
 170        /*
 171         * Adjust the freuqency with user freq and QoS.
 172         *
 173         * List from the highest proiority
 174         * max_freq (probably called by thermal when it's too hot)
 175         * min_freq
 176         */
 177
 178        if (devfreq->min_freq && freq < devfreq->min_freq) {
 179                freq = devfreq->min_freq;
 180                flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
 181        }
 182        if (devfreq->max_freq && freq > devfreq->max_freq) {
 183                freq = devfreq->max_freq;
 184                flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
 185        }
 186
 187        err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
 188        if (err)
 189                return err;
 190
 191        if (devfreq->profile->freq_table)
 192                if (devfreq_update_status(devfreq, freq))
 193                        dev_err(&devfreq->dev,
 194                                "Couldn't update frequency transition information.\n");
 195
 196        devfreq->previous_freq = freq;
 197        return err;
 198}
 199EXPORT_SYMBOL(update_devfreq);
 200
 201/**
 202 * devfreq_monitor() - Periodically poll devfreq objects.
 203 * @work:       the work struct used to run devfreq_monitor periodically.
 204 *
 205 */
 206static void devfreq_monitor(struct work_struct *work)
 207{
 208        int err;
 209        struct devfreq *devfreq = container_of(work,
 210                                        struct devfreq, work.work);
 211
 212        mutex_lock(&devfreq->lock);
 213        err = update_devfreq(devfreq);
 214        if (err)
 215                dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
 216
 217        queue_delayed_work(devfreq_wq, &devfreq->work,
 218                                msecs_to_jiffies(devfreq->profile->polling_ms));
 219        mutex_unlock(&devfreq->lock);
 220}
 221
 222/**
 223 * devfreq_monitor_start() - Start load monitoring of devfreq instance
 224 * @devfreq:    the devfreq instance.
 225 *
 226 * Helper function for starting devfreq device load monitoing. By
 227 * default delayed work based monitoring is supported. Function
 228 * to be called from governor in response to DEVFREQ_GOV_START
 229 * event when device is added to devfreq framework.
 230 */
 231void devfreq_monitor_start(struct devfreq *devfreq)
 232{
 233        INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
 234        if (devfreq->profile->polling_ms)
 235                queue_delayed_work(devfreq_wq, &devfreq->work,
 236                        msecs_to_jiffies(devfreq->profile->polling_ms));
 237}
 238EXPORT_SYMBOL(devfreq_monitor_start);
 239
 240/**
 241 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
 242 * @devfreq:    the devfreq instance.
 243 *
 244 * Helper function to stop devfreq device load monitoing. Function
 245 * to be called from governor in response to DEVFREQ_GOV_STOP
 246 * event when device is removed from devfreq framework.
 247 */
 248void devfreq_monitor_stop(struct devfreq *devfreq)
 249{
 250        cancel_delayed_work_sync(&devfreq->work);
 251}
 252EXPORT_SYMBOL(devfreq_monitor_stop);
 253
 254/**
 255 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
 256 * @devfreq:    the devfreq instance.
 257 *
 258 * Helper function to suspend devfreq device load monitoing. Function
 259 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
 260 * event or when polling interval is set to zero.
 261 *
 262 * Note: Though this function is same as devfreq_monitor_stop(),
 263 * intentionally kept separate to provide hooks for collecting
 264 * transition statistics.
 265 */
 266void devfreq_monitor_suspend(struct devfreq *devfreq)
 267{
 268        mutex_lock(&devfreq->lock);
 269        if (devfreq->stop_polling) {
 270                mutex_unlock(&devfreq->lock);
 271                return;
 272        }
 273
 274        devfreq->stop_polling = true;
 275        mutex_unlock(&devfreq->lock);
 276        cancel_delayed_work_sync(&devfreq->work);
 277}
 278EXPORT_SYMBOL(devfreq_monitor_suspend);
 279
 280/**
 281 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
 282 * @devfreq:    the devfreq instance.
 283 *
 284 * Helper function to resume devfreq device load monitoing. Function
 285 * to be called from governor in response to DEVFREQ_GOV_RESUME
 286 * event or when polling interval is set to non-zero.
 287 */
 288void devfreq_monitor_resume(struct devfreq *devfreq)
 289{
 290        mutex_lock(&devfreq->lock);
 291        if (!devfreq->stop_polling)
 292                goto out;
 293
 294        if (!delayed_work_pending(&devfreq->work) &&
 295                        devfreq->profile->polling_ms)
 296                queue_delayed_work(devfreq_wq, &devfreq->work,
 297                        msecs_to_jiffies(devfreq->profile->polling_ms));
 298        devfreq->stop_polling = false;
 299
 300out:
 301        mutex_unlock(&devfreq->lock);
 302}
 303EXPORT_SYMBOL(devfreq_monitor_resume);
 304
 305/**
 306 * devfreq_interval_update() - Update device devfreq monitoring interval
 307 * @devfreq:    the devfreq instance.
 308 * @delay:      new polling interval to be set.
 309 *
 310 * Helper function to set new load monitoring polling interval. Function
 311 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
 312 */
 313void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
 314{
 315        unsigned int cur_delay = devfreq->profile->polling_ms;
 316        unsigned int new_delay = *delay;
 317
 318        mutex_lock(&devfreq->lock);
 319        devfreq->profile->polling_ms = new_delay;
 320
 321        if (devfreq->stop_polling)
 322                goto out;
 323
 324        /* if new delay is zero, stop polling */
 325        if (!new_delay) {
 326                mutex_unlock(&devfreq->lock);
 327                cancel_delayed_work_sync(&devfreq->work);
 328                return;
 329        }
 330
 331        /* if current delay is zero, start polling with new delay */
 332        if (!cur_delay) {
 333                queue_delayed_work(devfreq_wq, &devfreq->work,
 334                        msecs_to_jiffies(devfreq->profile->polling_ms));
 335                goto out;
 336        }
 337
 338        /* if current delay is greater than new delay, restart polling */
 339        if (cur_delay > new_delay) {
 340                mutex_unlock(&devfreq->lock);
 341                cancel_delayed_work_sync(&devfreq->work);
 342                mutex_lock(&devfreq->lock);
 343                if (!devfreq->stop_polling)
 344                        queue_delayed_work(devfreq_wq, &devfreq->work,
 345                              msecs_to_jiffies(devfreq->profile->polling_ms));
 346        }
 347out:
 348        mutex_unlock(&devfreq->lock);
 349}
 350EXPORT_SYMBOL(devfreq_interval_update);
 351
 352/**
 353 * devfreq_notifier_call() - Notify that the device frequency requirements
 354 *                         has been changed out of devfreq framework.
 355 * @nb:         the notifier_block (supposed to be devfreq->nb)
 356 * @type:       not used
 357 * @devp:       not used
 358 *
 359 * Called by a notifier that uses devfreq->nb.
 360 */
 361static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
 362                                 void *devp)
 363{
 364        struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
 365        int ret;
 366
 367        mutex_lock(&devfreq->lock);
 368        ret = update_devfreq(devfreq);
 369        mutex_unlock(&devfreq->lock);
 370
 371        return ret;
 372}
 373
 374/**
 375 * _remove_devfreq() - Remove devfreq from the list and release its resources.
 376 * @devfreq:    the devfreq struct
 377 * @skip:       skip calling device_unregister().
 378 */
 379static void _remove_devfreq(struct devfreq *devfreq, bool skip)
 380{
 381        mutex_lock(&devfreq_list_lock);
 382        if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
 383                mutex_unlock(&devfreq_list_lock);
 384                dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
 385                return;
 386        }
 387        list_del(&devfreq->node);
 388        mutex_unlock(&devfreq_list_lock);
 389
 390        if (devfreq->governor)
 391                devfreq->governor->event_handler(devfreq,
 392                                                 DEVFREQ_GOV_STOP, NULL);
 393
 394        if (devfreq->profile->exit)
 395                devfreq->profile->exit(devfreq->dev.parent);
 396
 397        if (!skip && get_device(&devfreq->dev)) {
 398                device_unregister(&devfreq->dev);
 399                put_device(&devfreq->dev);
 400        }
 401
 402        mutex_destroy(&devfreq->lock);
 403        kfree(devfreq);
 404}
 405
 406/**
 407 * devfreq_dev_release() - Callback for struct device to release the device.
 408 * @dev:        the devfreq device
 409 *
 410 * This calls _remove_devfreq() if _remove_devfreq() is not called.
 411 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
 412 * well as by others unregistering the device.
 413 */
 414static void devfreq_dev_release(struct device *dev)
 415{
 416        struct devfreq *devfreq = to_devfreq(dev);
 417
 418        _remove_devfreq(devfreq, true);
 419}
 420
 421/**
 422 * devfreq_add_device() - Add devfreq feature to the device
 423 * @dev:        the device to add devfreq feature.
 424 * @profile:    device-specific profile to run devfreq.
 425 * @governor_name:      name of the policy to choose frequency.
 426 * @data:       private data for the governor. The devfreq framework does not
 427 *              touch this value.
 428 */
 429struct devfreq *devfreq_add_device(struct device *dev,
 430                                   struct devfreq_dev_profile *profile,
 431                                   const char *governor_name,
 432                                   void *data)
 433{
 434        struct devfreq *devfreq;
 435        struct devfreq_governor *governor;
 436        int err = 0;
 437
 438        if (!dev || !profile || !governor_name) {
 439                dev_err(dev, "%s: Invalid parameters.\n", __func__);
 440                return ERR_PTR(-EINVAL);
 441        }
 442
 443        mutex_lock(&devfreq_list_lock);
 444        devfreq = find_device_devfreq(dev);
 445        mutex_unlock(&devfreq_list_lock);
 446        if (!IS_ERR(devfreq)) {
 447                dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
 448                err = -EINVAL;
 449                goto err_out;
 450        }
 451
 452        devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
 453        if (!devfreq) {
 454                dev_err(dev, "%s: Unable to create devfreq for the device\n",
 455                        __func__);
 456                err = -ENOMEM;
 457                goto err_out;
 458        }
 459
 460        mutex_init(&devfreq->lock);
 461        mutex_lock(&devfreq->lock);
 462        devfreq->dev.parent = dev;
 463        devfreq->dev.class = devfreq_class;
 464        devfreq->dev.release = devfreq_dev_release;
 465        devfreq->profile = profile;
 466        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
 467        devfreq->previous_freq = profile->initial_freq;
 468        devfreq->data = data;
 469        devfreq->nb.notifier_call = devfreq_notifier_call;
 470
 471        devfreq->trans_table =  devm_kzalloc(dev, sizeof(unsigned int) *
 472                                                devfreq->profile->max_state *
 473                                                devfreq->profile->max_state,
 474                                                GFP_KERNEL);
 475        devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
 476                                                devfreq->profile->max_state,
 477                                                GFP_KERNEL);
 478        devfreq->last_stat_updated = jiffies;
 479
 480        dev_set_name(&devfreq->dev, dev_name(dev));
 481        err = device_register(&devfreq->dev);
 482        if (err) {
 483                put_device(&devfreq->dev);
 484                mutex_unlock(&devfreq->lock);
 485                goto err_dev;
 486        }
 487
 488        mutex_unlock(&devfreq->lock);
 489
 490        mutex_lock(&devfreq_list_lock);
 491        list_add(&devfreq->node, &devfreq_list);
 492
 493        governor = find_devfreq_governor(devfreq->governor_name);
 494        if (!IS_ERR(governor))
 495                devfreq->governor = governor;
 496        if (devfreq->governor)
 497                err = devfreq->governor->event_handler(devfreq,
 498                                        DEVFREQ_GOV_START, NULL);
 499        mutex_unlock(&devfreq_list_lock);
 500        if (err) {
 501                dev_err(dev, "%s: Unable to start governor for the device\n",
 502                        __func__);
 503                goto err_init;
 504        }
 505
 506        return devfreq;
 507
 508err_init:
 509        list_del(&devfreq->node);
 510        device_unregister(&devfreq->dev);
 511err_dev:
 512        kfree(devfreq);
 513err_out:
 514        return ERR_PTR(err);
 515}
 516EXPORT_SYMBOL(devfreq_add_device);
 517
 518/**
 519 * devfreq_remove_device() - Remove devfreq feature from a device.
 520 * @devfreq:    the devfreq instance to be removed
 521 */
 522int devfreq_remove_device(struct devfreq *devfreq)
 523{
 524        if (!devfreq)
 525                return -EINVAL;
 526
 527        _remove_devfreq(devfreq, false);
 528
 529        return 0;
 530}
 531EXPORT_SYMBOL(devfreq_remove_device);
 532
 533/**
 534 * devfreq_suspend_device() - Suspend devfreq of a device.
 535 * @devfreq: the devfreq instance to be suspended
 536 */
 537int devfreq_suspend_device(struct devfreq *devfreq)
 538{
 539        if (!devfreq)
 540                return -EINVAL;
 541
 542        if (!devfreq->governor)
 543                return 0;
 544
 545        return devfreq->governor->event_handler(devfreq,
 546                                DEVFREQ_GOV_SUSPEND, NULL);
 547}
 548EXPORT_SYMBOL(devfreq_suspend_device);
 549
 550/**
 551 * devfreq_resume_device() - Resume devfreq of a device.
 552 * @devfreq: the devfreq instance to be resumed
 553 */
 554int devfreq_resume_device(struct devfreq *devfreq)
 555{
 556        if (!devfreq)
 557                return -EINVAL;
 558
 559        if (!devfreq->governor)
 560                return 0;
 561
 562        return devfreq->governor->event_handler(devfreq,
 563                                DEVFREQ_GOV_RESUME, NULL);
 564}
 565EXPORT_SYMBOL(devfreq_resume_device);
 566
 567/**
 568 * devfreq_add_governor() - Add devfreq governor
 569 * @governor:   the devfreq governor to be added
 570 */
 571int devfreq_add_governor(struct devfreq_governor *governor)
 572{
 573        struct devfreq_governor *g;
 574        struct devfreq *devfreq;
 575        int err = 0;
 576
 577        if (!governor) {
 578                pr_err("%s: Invalid parameters.\n", __func__);
 579                return -EINVAL;
 580        }
 581
 582        mutex_lock(&devfreq_list_lock);
 583        g = find_devfreq_governor(governor->name);
 584        if (!IS_ERR(g)) {
 585                pr_err("%s: governor %s already registered\n", __func__,
 586                       g->name);
 587                err = -EINVAL;
 588                goto err_out;
 589        }
 590
 591        list_add(&governor->node, &devfreq_governor_list);
 592
 593        list_for_each_entry(devfreq, &devfreq_list, node) {
 594                int ret = 0;
 595                struct device *dev = devfreq->dev.parent;
 596
 597                if (!strncmp(devfreq->governor_name, governor->name,
 598                             DEVFREQ_NAME_LEN)) {
 599                        /* The following should never occur */
 600                        if (devfreq->governor) {
 601                                dev_warn(dev,
 602                                         "%s: Governor %s already present\n",
 603                                         __func__, devfreq->governor->name);
 604                                ret = devfreq->governor->event_handler(devfreq,
 605                                                        DEVFREQ_GOV_STOP, NULL);
 606                                if (ret) {
 607                                        dev_warn(dev,
 608                                                 "%s: Governor %s stop = %d\n",
 609                                                 __func__,
 610                                                 devfreq->governor->name, ret);
 611                                }
 612                                /* Fall through */
 613                        }
 614                        devfreq->governor = governor;
 615                        ret = devfreq->governor->event_handler(devfreq,
 616                                                DEVFREQ_GOV_START, NULL);
 617                        if (ret) {
 618                                dev_warn(dev, "%s: Governor %s start=%d\n",
 619                                         __func__, devfreq->governor->name,
 620                                         ret);
 621                        }
 622                }
 623        }
 624
 625err_out:
 626        mutex_unlock(&devfreq_list_lock);
 627
 628        return err;
 629}
 630EXPORT_SYMBOL(devfreq_add_governor);
 631
 632/**
 633 * devfreq_remove_device() - Remove devfreq feature from a device.
 634 * @governor:   the devfreq governor to be removed
 635 */
 636int devfreq_remove_governor(struct devfreq_governor *governor)
 637{
 638        struct devfreq_governor *g;
 639        struct devfreq *devfreq;
 640        int err = 0;
 641
 642        if (!governor) {
 643                pr_err("%s: Invalid parameters.\n", __func__);
 644                return -EINVAL;
 645        }
 646
 647        mutex_lock(&devfreq_list_lock);
 648        g = find_devfreq_governor(governor->name);
 649        if (IS_ERR(g)) {
 650                pr_err("%s: governor %s not registered\n", __func__,
 651                       governor->name);
 652                err = PTR_ERR(g);
 653                goto err_out;
 654        }
 655        list_for_each_entry(devfreq, &devfreq_list, node) {
 656                int ret;
 657                struct device *dev = devfreq->dev.parent;
 658
 659                if (!strncmp(devfreq->governor_name, governor->name,
 660                             DEVFREQ_NAME_LEN)) {
 661                        /* we should have a devfreq governor! */
 662                        if (!devfreq->governor) {
 663                                dev_warn(dev, "%s: Governor %s NOT present\n",
 664                                         __func__, governor->name);
 665                                continue;
 666                                /* Fall through */
 667                        }
 668                        ret = devfreq->governor->event_handler(devfreq,
 669                                                DEVFREQ_GOV_STOP, NULL);
 670                        if (ret) {
 671                                dev_warn(dev, "%s: Governor %s stop=%d\n",
 672                                         __func__, devfreq->governor->name,
 673                                         ret);
 674                        }
 675                        devfreq->governor = NULL;
 676                }
 677        }
 678
 679        list_del(&governor->node);
 680err_out:
 681        mutex_unlock(&devfreq_list_lock);
 682
 683        return err;
 684}
 685EXPORT_SYMBOL(devfreq_remove_governor);
 686
 687static ssize_t show_governor(struct device *dev,
 688                             struct device_attribute *attr, char *buf)
 689{
 690        if (!to_devfreq(dev)->governor)
 691                return -EINVAL;
 692
 693        return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
 694}
 695
 696static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
 697                              const char *buf, size_t count)
 698{
 699        struct devfreq *df = to_devfreq(dev);
 700        int ret;
 701        char str_governor[DEVFREQ_NAME_LEN + 1];
 702        struct devfreq_governor *governor;
 703
 704        ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
 705        if (ret != 1)
 706                return -EINVAL;
 707
 708        mutex_lock(&devfreq_list_lock);
 709        governor = find_devfreq_governor(str_governor);
 710        if (IS_ERR(governor)) {
 711                ret = PTR_ERR(governor);
 712                goto out;
 713        }
 714        if (df->governor == governor)
 715                goto out;
 716
 717        if (df->governor) {
 718                ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
 719                if (ret) {
 720                        dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
 721                                 __func__, df->governor->name, ret);
 722                        goto out;
 723                }
 724        }
 725        df->governor = governor;
 726        strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
 727        ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
 728        if (ret)
 729                dev_warn(dev, "%s: Governor %s not started(%d)\n",
 730                         __func__, df->governor->name, ret);
 731out:
 732        mutex_unlock(&devfreq_list_lock);
 733
 734        if (!ret)
 735                ret = count;
 736        return ret;
 737}
 738static ssize_t show_available_governors(struct device *d,
 739                                    struct device_attribute *attr,
 740                                    char *buf)
 741{
 742        struct devfreq_governor *tmp_governor;
 743        ssize_t count = 0;
 744
 745        mutex_lock(&devfreq_list_lock);
 746        list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
 747                count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 748                                   "%s ", tmp_governor->name);
 749        mutex_unlock(&devfreq_list_lock);
 750
 751        /* Truncate the trailing space */
 752        if (count)
 753                count--;
 754
 755        count += sprintf(&buf[count], "\n");
 756
 757        return count;
 758}
 759
 760static ssize_t show_freq(struct device *dev,
 761                         struct device_attribute *attr, char *buf)
 762{
 763        unsigned long freq;
 764        struct devfreq *devfreq = to_devfreq(dev);
 765
 766        if (devfreq->profile->get_cur_freq &&
 767                !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
 768                        return sprintf(buf, "%lu\n", freq);
 769
 770        return sprintf(buf, "%lu\n", devfreq->previous_freq);
 771}
 772
 773static ssize_t show_target_freq(struct device *dev,
 774                        struct device_attribute *attr, char *buf)
 775{
 776        return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
 777}
 778
 779static ssize_t show_polling_interval(struct device *dev,
 780                                     struct device_attribute *attr, char *buf)
 781{
 782        return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
 783}
 784
 785static ssize_t store_polling_interval(struct device *dev,
 786                                      struct device_attribute *attr,
 787                                      const char *buf, size_t count)
 788{
 789        struct devfreq *df = to_devfreq(dev);
 790        unsigned int value;
 791        int ret;
 792
 793        if (!df->governor)
 794                return -EINVAL;
 795
 796        ret = sscanf(buf, "%u", &value);
 797        if (ret != 1)
 798                return -EINVAL;
 799
 800        df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
 801        ret = count;
 802
 803        return ret;
 804}
 805
 806static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
 807                              const char *buf, size_t count)
 808{
 809        struct devfreq *df = to_devfreq(dev);
 810        unsigned long value;
 811        int ret;
 812        unsigned long max;
 813
 814        ret = sscanf(buf, "%lu", &value);
 815        if (ret != 1)
 816                return -EINVAL;
 817
 818        mutex_lock(&df->lock);
 819        max = df->max_freq;
 820        if (value && max && value > max) {
 821                ret = -EINVAL;
 822                goto unlock;
 823        }
 824
 825        df->min_freq = value;
 826        update_devfreq(df);
 827        ret = count;
 828unlock:
 829        mutex_unlock(&df->lock);
 830        return ret;
 831}
 832
 833static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
 834                             char *buf)
 835{
 836        return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
 837}
 838
 839static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
 840                              const char *buf, size_t count)
 841{
 842        struct devfreq *df = to_devfreq(dev);
 843        unsigned long value;
 844        int ret;
 845        unsigned long min;
 846
 847        ret = sscanf(buf, "%lu", &value);
 848        if (ret != 1)
 849                return -EINVAL;
 850
 851        mutex_lock(&df->lock);
 852        min = df->min_freq;
 853        if (value && min && value < min) {
 854                ret = -EINVAL;
 855                goto unlock;
 856        }
 857
 858        df->max_freq = value;
 859        update_devfreq(df);
 860        ret = count;
 861unlock:
 862        mutex_unlock(&df->lock);
 863        return ret;
 864}
 865
 866static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
 867                             char *buf)
 868{
 869        return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
 870}
 871
 872static ssize_t show_available_freqs(struct device *d,
 873                                    struct device_attribute *attr,
 874                                    char *buf)
 875{
 876        struct devfreq *df = to_devfreq(d);
 877        struct device *dev = df->dev.parent;
 878        struct opp *opp;
 879        ssize_t count = 0;
 880        unsigned long freq = 0;
 881
 882        rcu_read_lock();
 883        do {
 884                opp = opp_find_freq_ceil(dev, &freq);
 885                if (IS_ERR(opp))
 886                        break;
 887
 888                count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 889                                   "%lu ", freq);
 890                freq++;
 891        } while (1);
 892        rcu_read_unlock();
 893
 894        /* Truncate the trailing space */
 895        if (count)
 896                count--;
 897
 898        count += sprintf(&buf[count], "\n");
 899
 900        return count;
 901}
 902
 903static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
 904                                char *buf)
 905{
 906        struct devfreq *devfreq = to_devfreq(dev);
 907        ssize_t len;
 908        int i, j, err;
 909        unsigned int max_state = devfreq->profile->max_state;
 910
 911        err = devfreq_update_status(devfreq, devfreq->previous_freq);
 912        if (err)
 913                return 0;
 914
 915        len = sprintf(buf, "   From  :   To\n");
 916        len += sprintf(buf + len, "         :");
 917        for (i = 0; i < max_state; i++)
 918                len += sprintf(buf + len, "%8u",
 919                                devfreq->profile->freq_table[i]);
 920
 921        len += sprintf(buf + len, "   time(ms)\n");
 922
 923        for (i = 0; i < max_state; i++) {
 924                if (devfreq->profile->freq_table[i]
 925                                        == devfreq->previous_freq) {
 926                        len += sprintf(buf + len, "*");
 927                } else {
 928                        len += sprintf(buf + len, " ");
 929                }
 930                len += sprintf(buf + len, "%8u:",
 931                                devfreq->profile->freq_table[i]);
 932                for (j = 0; j < max_state; j++)
 933                        len += sprintf(buf + len, "%8u",
 934                                devfreq->trans_table[(i * max_state) + j]);
 935                len += sprintf(buf + len, "%10u\n",
 936                        jiffies_to_msecs(devfreq->time_in_state[i]));
 937        }
 938
 939        len += sprintf(buf + len, "Total transition : %u\n",
 940                                        devfreq->total_trans);
 941        return len;
 942}
 943
 944static struct device_attribute devfreq_attrs[] = {
 945        __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
 946        __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
 947        __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
 948        __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
 949        __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
 950        __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
 951               store_polling_interval),
 952        __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
 953        __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
 954        __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
 955        { },
 956};
 957
 958static int __init devfreq_init(void)
 959{
 960        devfreq_class = class_create(THIS_MODULE, "devfreq");
 961        if (IS_ERR(devfreq_class)) {
 962                pr_err("%s: couldn't create class\n", __FILE__);
 963                return PTR_ERR(devfreq_class);
 964        }
 965
 966        devfreq_wq = create_freezable_workqueue("devfreq_wq");
 967        if (IS_ERR(devfreq_wq)) {
 968                class_destroy(devfreq_class);
 969                pr_err("%s: couldn't create workqueue\n", __FILE__);
 970                return PTR_ERR(devfreq_wq);
 971        }
 972        devfreq_class->dev_attrs = devfreq_attrs;
 973
 974        return 0;
 975}
 976subsys_initcall(devfreq_init);
 977
 978static void __exit devfreq_exit(void)
 979{
 980        class_destroy(devfreq_class);
 981        destroy_workqueue(devfreq_wq);
 982}
 983module_exit(devfreq_exit);
 984
 985/*
 986 * The followings are helper functions for devfreq user device drivers with
 987 * OPP framework.
 988 */
 989
 990/**
 991 * devfreq_recommended_opp() - Helper function to get proper OPP for the
 992 *                           freq value given to target callback.
 993 * @dev:        The devfreq user device. (parent of devfreq)
 994 * @freq:       The frequency given to target function
 995 * @flags:      Flags handed from devfreq framework.
 996 *
 997 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 998 * protected pointer. The reason for the same is that the opp pointer which is
 999 * returned will remain valid for use with opp_get_{voltage, freq} only while
1000 * under the locked area. The pointer returned must be used prior to unlocking
1001 * with rcu_read_unlock() to maintain the integrity of the pointer.
1002 */
1003struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
1004                                    u32 flags)
1005{
1006        struct opp *opp;
1007
1008        if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1009                /* The freq is an upper bound. opp should be lower */
1010                opp = opp_find_freq_floor(dev, freq);
1011
1012                /* If not available, use the closest opp */
1013                if (opp == ERR_PTR(-ERANGE))
1014                        opp = opp_find_freq_ceil(dev, freq);
1015        } else {
1016                /* The freq is an lower bound. opp should be higher */
1017                opp = opp_find_freq_ceil(dev, freq);
1018
1019                /* If not available, use the closest opp */
1020                if (opp == ERR_PTR(-ERANGE))
1021                        opp = opp_find_freq_floor(dev, freq);
1022        }
1023
1024        return opp;
1025}
1026
1027/**
1028 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1029 *                                 for any changes in the OPP availability
1030 *                                 changes
1031 * @dev:        The devfreq user device. (parent of devfreq)
1032 * @devfreq:    The devfreq object.
1033 */
1034int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1035{
1036        struct srcu_notifier_head *nh;
1037        int ret = 0;
1038
1039        rcu_read_lock();
1040        nh = opp_get_notifier(dev);
1041        if (IS_ERR(nh))
1042                ret = PTR_ERR(nh);
1043        rcu_read_unlock();
1044        if (!ret)
1045                ret = srcu_notifier_chain_register(nh, &devfreq->nb);
1046
1047        return ret;
1048}
1049
1050/**
1051 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1052 *                                   notified for any changes in the OPP
1053 *                                   availability changes anymore.
1054 * @dev:        The devfreq user device. (parent of devfreq)
1055 * @devfreq:    The devfreq object.
1056 *
1057 * At exit() callback of devfreq_dev_profile, this must be included if
1058 * devfreq_recommended_opp is used.
1059 */
1060int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1061{
1062        struct srcu_notifier_head *nh;
1063        int ret = 0;
1064
1065        rcu_read_lock();
1066        nh = opp_get_notifier(dev);
1067        if (IS_ERR(nh))
1068                ret = PTR_ERR(nh);
1069        rcu_read_unlock();
1070        if (!ret)
1071                ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
1072
1073        return ret;
1074}
1075
1076MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1077MODULE_DESCRIPTION("devfreq class support");
1078MODULE_LICENSE("GPL");
1079
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.