linux/drivers/acpi/processor_thermal.c
<<
>>
Prefs
   1/*
   2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
   3 *
   4 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   5 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   6 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
   7 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   8 *                      - Added processor hotplug support
   9 *
  10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  11 *
  12 *  This program is free software; you can redistribute it and/or modify
  13 *  it under the terms of the GNU General Public License as published by
  14 *  the Free Software Foundation; either version 2 of the License, or (at
  15 *  your option) any later version.
  16 *
  17 *  This program is distributed in the hope that it will be useful, but
  18 *  WITHOUT ANY WARRANTY; without even the implied warranty of
  19 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 *  General Public License for more details.
  21 *
  22 *  You should have received a copy of the GNU General Public License along
  23 *  with this program; if not, write to the Free Software Foundation, Inc.,
  24 *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  25 *
  26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  27 */
  28
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31#include <linux/init.h>
  32#include <linux/cpufreq.h>
  33#include <linux/proc_fs.h>
  34#include <linux/seq_file.h>
  35#include <linux/sysdev.h>
  36
  37#include <asm/uaccess.h>
  38
  39#include <acpi/acpi_bus.h>
  40#include <acpi/processor.h>
  41#include <acpi/acpi_drivers.h>
  42
  43#define ACPI_PROCESSOR_CLASS            "processor"
  44#define _COMPONENT              ACPI_PROCESSOR_COMPONENT
  45ACPI_MODULE_NAME("processor_thermal");
  46
  47/* --------------------------------------------------------------------------
  48                                 Limit Interface
  49   -------------------------------------------------------------------------- */
  50static int acpi_processor_apply_limit(struct acpi_processor *pr)
  51{
  52        int result = 0;
  53        u16 px = 0;
  54        u16 tx = 0;
  55
  56
  57        if (!pr)
  58                return -EINVAL;
  59
  60        if (!pr->flags.limit)
  61                return -ENODEV;
  62
  63        if (pr->flags.throttling) {
  64                if (pr->limit.user.tx > tx)
  65                        tx = pr->limit.user.tx;
  66                if (pr->limit.thermal.tx > tx)
  67                        tx = pr->limit.thermal.tx;
  68
  69                result = acpi_processor_set_throttling(pr, tx);
  70                if (result)
  71                        goto end;
  72        }
  73
  74        pr->limit.state.px = px;
  75        pr->limit.state.tx = tx;
  76
  77        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  78                          "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
  79                          pr->limit.state.px, pr->limit.state.tx));
  80
  81      end:
  82        if (result)
  83                printk(KERN_ERR PREFIX "Unable to set limit\n");
  84
  85        return result;
  86}
  87
  88#ifdef CONFIG_CPU_FREQ
  89
  90/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
  91 * offers (in most cases) voltage scaling in addition to frequency scaling, and
  92 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
  93 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
  94 */
  95
  96#define CPUFREQ_THERMAL_MIN_STEP 0
  97#define CPUFREQ_THERMAL_MAX_STEP 3
  98
  99static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
 100static unsigned int acpi_thermal_cpufreq_is_init = 0;
 101
 102static int cpu_has_cpufreq(unsigned int cpu)
 103{
 104        struct cpufreq_policy policy;
 105        if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
 106                return 0;
 107        return 1;
 108}
 109
 110static int acpi_thermal_cpufreq_increase(unsigned int cpu)
 111{
 112        if (!cpu_has_cpufreq(cpu))
 113                return -ENODEV;
 114
 115        if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
 116                CPUFREQ_THERMAL_MAX_STEP) {
 117                per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
 118                cpufreq_update_policy(cpu);
 119                return 0;
 120        }
 121
 122        return -ERANGE;
 123}
 124
 125static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
 126{
 127        if (!cpu_has_cpufreq(cpu))
 128                return -ENODEV;
 129
 130        if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
 131                (CPUFREQ_THERMAL_MIN_STEP + 1))
 132                per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
 133        else
 134                per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
 135        cpufreq_update_policy(cpu);
 136        /* We reached max freq again and can leave passive mode */
 137        return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
 138}
 139
 140static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
 141                                         unsigned long event, void *data)
 142{
 143        struct cpufreq_policy *policy = data;
 144        unsigned long max_freq = 0;
 145
 146        if (event != CPUFREQ_ADJUST)
 147                goto out;
 148
 149        max_freq = (
 150            policy->cpuinfo.max_freq *
 151            (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
 152        ) / 100;
 153
 154        cpufreq_verify_within_limits(policy, 0, max_freq);
 155
 156      out:
 157        return 0;
 158}
 159
 160static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
 161        .notifier_call = acpi_thermal_cpufreq_notifier,
 162};
 163
 164static int cpufreq_get_max_state(unsigned int cpu)
 165{
 166        if (!cpu_has_cpufreq(cpu))
 167                return 0;
 168
 169        return CPUFREQ_THERMAL_MAX_STEP;
 170}
 171
 172static int cpufreq_get_cur_state(unsigned int cpu)
 173{
 174        if (!cpu_has_cpufreq(cpu))
 175                return 0;
 176
 177        return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
 178}
 179
 180static int cpufreq_set_cur_state(unsigned int cpu, int state)
 181{
 182        if (!cpu_has_cpufreq(cpu))
 183                return 0;
 184
 185        per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
 186        cpufreq_update_policy(cpu);
 187        return 0;
 188}
 189
 190void acpi_thermal_cpufreq_init(void)
 191{
 192        int i;
 193
 194        for (i = 0; i < nr_cpu_ids; i++)
 195                if (cpu_present(i))
 196                        per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
 197
 198        i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
 199                                      CPUFREQ_POLICY_NOTIFIER);
 200        if (!i)
 201                acpi_thermal_cpufreq_is_init = 1;
 202}
 203
 204void acpi_thermal_cpufreq_exit(void)
 205{
 206        if (acpi_thermal_cpufreq_is_init)
 207                cpufreq_unregister_notifier
 208                    (&acpi_thermal_cpufreq_notifier_block,
 209                     CPUFREQ_POLICY_NOTIFIER);
 210
 211        acpi_thermal_cpufreq_is_init = 0;
 212}
 213
 214#else                           /* ! CONFIG_CPU_FREQ */
 215static int cpufreq_get_max_state(unsigned int cpu)
 216{
 217        return 0;
 218}
 219
 220static int cpufreq_get_cur_state(unsigned int cpu)
 221{
 222        return 0;
 223}
 224
 225static int cpufreq_set_cur_state(unsigned int cpu, int state)
 226{
 227        return 0;
 228}
 229
 230static int acpi_thermal_cpufreq_increase(unsigned int cpu)
 231{
 232        return -ENODEV;
 233}
 234static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
 235{
 236        return -ENODEV;
 237}
 238
 239#endif
 240
 241int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
 242{
 243        int result = 0;
 244        struct acpi_processor *pr = NULL;
 245        struct acpi_device *device = NULL;
 246        int tx = 0, max_tx_px = 0;
 247
 248
 249        if ((type < ACPI_PROCESSOR_LIMIT_NONE)
 250            || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
 251                return -EINVAL;
 252
 253        result = acpi_bus_get_device(handle, &device);
 254        if (result)
 255                return result;
 256
 257        pr = acpi_driver_data(device);
 258        if (!pr)
 259                return -ENODEV;
 260
 261        /* Thermal limits are always relative to the current Px/Tx state. */
 262        if (pr->flags.throttling)
 263                pr->limit.thermal.tx = pr->throttling.state;
 264
 265        /*
 266         * Our default policy is to only use throttling at the lowest
 267         * performance state.
 268         */
 269
 270        tx = pr->limit.thermal.tx;
 271
 272        switch (type) {
 273
 274        case ACPI_PROCESSOR_LIMIT_NONE:
 275                do {
 276                        result = acpi_thermal_cpufreq_decrease(pr->id);
 277                } while (!result);
 278                tx = 0;
 279                break;
 280
 281        case ACPI_PROCESSOR_LIMIT_INCREMENT:
 282                /* if going up: P-states first, T-states later */
 283
 284                result = acpi_thermal_cpufreq_increase(pr->id);
 285                if (!result)
 286                        goto end;
 287                else if (result == -ERANGE)
 288                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 289                                          "At maximum performance state\n"));
 290
 291                if (pr->flags.throttling) {
 292                        if (tx == (pr->throttling.state_count - 1))
 293                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 294                                                  "At maximum throttling state\n"));
 295                        else
 296                                tx++;
 297                }
 298                break;
 299
 300        case ACPI_PROCESSOR_LIMIT_DECREMENT:
 301                /* if going down: T-states first, P-states later */
 302
 303                if (pr->flags.throttling) {
 304                        if (tx == 0) {
 305                                max_tx_px = 1;
 306                                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 307                                                  "At minimum throttling state\n"));
 308                        } else {
 309                                tx--;
 310                                goto end;
 311                        }
 312                }
 313
 314                result = acpi_thermal_cpufreq_decrease(pr->id);
 315                if (result) {
 316                        /*
 317                         * We only could get -ERANGE, 1 or 0.
 318                         * In the first two cases we reached max freq again.
 319                         */
 320                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
 321                                          "At minimum performance state\n"));
 322                        max_tx_px = 1;
 323                } else
 324                        max_tx_px = 0;
 325
 326                break;
 327        }
 328
 329      end:
 330        if (pr->flags.throttling) {
 331                pr->limit.thermal.px = 0;
 332                pr->limit.thermal.tx = tx;
 333
 334                result = acpi_processor_apply_limit(pr);
 335                if (result)
 336                        printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
 337
 338                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
 339                                  pr->limit.thermal.px, pr->limit.thermal.tx));
 340        } else
 341                result = 0;
 342        if (max_tx_px)
 343                return 1;
 344        else
 345                return result;
 346}
 347
 348int acpi_processor_get_limit_info(struct acpi_processor *pr)
 349{
 350
 351        if (!pr)
 352                return -EINVAL;
 353
 354        if (pr->flags.throttling)
 355                pr->flags.limit = 1;
 356
 357        return 0;
 358}
 359
 360/* thermal coolign device callbacks */
 361static int acpi_processor_max_state(struct acpi_processor *pr)
 362{
 363        int max_state = 0;
 364
 365        /*
 366         * There exists four states according to
 367         * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
 368         */
 369        max_state += cpufreq_get_max_state(pr->id);
 370        if (pr->flags.throttling)
 371                max_state += (pr->throttling.state_count -1);
 372
 373        return max_state;
 374}
 375static int
 376processor_get_max_state(struct thermal_cooling_device *cdev,
 377                        unsigned long *state)
 378{
 379        struct acpi_device *device = cdev->devdata;
 380        struct acpi_processor *pr = acpi_driver_data(device);
 381
 382        if (!device || !pr)
 383                return -EINVAL;
 384
 385        *state = acpi_processor_max_state(pr);
 386        return 0;
 387}
 388
 389static int
 390processor_get_cur_state(struct thermal_cooling_device *cdev,
 391                        unsigned long *cur_state)
 392{
 393        struct acpi_device *device = cdev->devdata;
 394        struct acpi_processor *pr = acpi_driver_data(device);
 395
 396        if (!device || !pr)
 397                return -EINVAL;
 398
 399        *cur_state = cpufreq_get_cur_state(pr->id);
 400        if (pr->flags.throttling)
 401                *cur_state += pr->throttling.state;
 402        return 0;
 403}
 404
 405static int
 406processor_set_cur_state(struct thermal_cooling_device *cdev,
 407                        unsigned long state)
 408{
 409        struct acpi_device *device = cdev->devdata;
 410        struct acpi_processor *pr = acpi_driver_data(device);
 411        int result = 0;
 412        int max_pstate;
 413
 414        if (!device || !pr)
 415                return -EINVAL;
 416
 417        max_pstate = cpufreq_get_max_state(pr->id);
 418
 419        if (state > acpi_processor_max_state(pr))
 420                return -EINVAL;
 421
 422        if (state <= max_pstate) {
 423                if (pr->flags.throttling && pr->throttling.state)
 424                        result = acpi_processor_set_throttling(pr, 0);
 425                cpufreq_set_cur_state(pr->id, state);
 426        } else {
 427                cpufreq_set_cur_state(pr->id, max_pstate);
 428                result = acpi_processor_set_throttling(pr,
 429                                state - max_pstate);
 430        }
 431        return result;
 432}
 433
 434struct thermal_cooling_device_ops processor_cooling_ops = {
 435        .get_max_state = processor_get_max_state,
 436        .get_cur_state = processor_get_cur_state,
 437        .set_cur_state = processor_set_cur_state,
 438};
 439
 440/* /proc interface */
 441
 442static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
 443{
 444        struct acpi_processor *pr = (struct acpi_processor *)seq->private;
 445
 446
 447        if (!pr)
 448                goto end;
 449
 450        if (!pr->flags.limit) {
 451                seq_puts(seq, "<not supported>\n");
 452                goto end;
 453        }
 454
 455        seq_printf(seq, "active limit:            P%d:T%d\n"
 456                   "user limit:              P%d:T%d\n"
 457                   "thermal limit:           P%d:T%d\n",
 458                   pr->limit.state.px, pr->limit.state.tx,
 459                   pr->limit.user.px, pr->limit.user.tx,
 460                   pr->limit.thermal.px, pr->limit.thermal.tx);
 461
 462      end:
 463        return 0;
 464}
 465
 466static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
 467{
 468        return single_open(file, acpi_processor_limit_seq_show,
 469                           PDE(inode)->data);
 470}
 471
 472static ssize_t acpi_processor_write_limit(struct file * file,
 473                                          const char __user * buffer,
 474                                          size_t count, loff_t * data)
 475{
 476        int result = 0;
 477        struct seq_file *m = file->private_data;
 478        struct acpi_processor *pr = m->private;
 479        char limit_string[25] = { '\0' };
 480        int px = 0;
 481        int tx = 0;
 482
 483
 484        if (!pr || (count > sizeof(limit_string) - 1)) {
 485                return -EINVAL;
 486        }
 487
 488        if (copy_from_user(limit_string, buffer, count)) {
 489                return -EFAULT;
 490        }
 491
 492        limit_string[count] = '\0';
 493
 494        if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
 495                printk(KERN_ERR PREFIX "Invalid data format\n");
 496                return -EINVAL;
 497        }
 498
 499        if (pr->flags.throttling) {
 500                if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
 501                        printk(KERN_ERR PREFIX "Invalid tx\n");
 502                        return -EINVAL;
 503                }
 504                pr->limit.user.tx = tx;
 505        }
 506
 507        result = acpi_processor_apply_limit(pr);
 508
 509        return count;
 510}
 511
 512const struct file_operations acpi_processor_limit_fops = {
 513        .owner = THIS_MODULE,
 514        .open = acpi_processor_limit_open_fs,
 515        .read = seq_read,
 516        .write = acpi_processor_write_limit,
 517        .llseek = seq_lseek,
 518        .release = single_release,
 519};
 520