linux/drivers/cpufreq/cppc_cpufreq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * CPPC (Collaborative Processor Performance Control) driver for
   4 * interfacing with the CPUfreq layer and governors. See
   5 * cppc_acpi.c for CPPC specific methods.
   6 *
   7 * (C) Copyright 2014, 2015 Linaro Ltd.
   8 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   9 */
  10
  11#define pr_fmt(fmt)     "CPPC Cpufreq:" fmt
  12
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/delay.h>
  16#include <linux/cpu.h>
  17#include <linux/cpufreq.h>
  18#include <linux/dmi.h>
  19#include <linux/time.h>
  20#include <linux/vmalloc.h>
  21
  22#include <asm/unaligned.h>
  23
  24#include <acpi/cppc_acpi.h>
  25
  26/* Minimum struct length needed for the DMI processor entry we want */
  27#define DMI_ENTRY_PROCESSOR_MIN_LENGTH  48
  28
  29/* Offset in the DMI processor structure for the max frequency */
  30#define DMI_PROCESSOR_MAX_SPEED         0x14
  31
  32/*
  33 * This list contains information parsed from per CPU ACPI _CPC and _PSD
  34 * structures: e.g. the highest and lowest supported performance, capabilities,
  35 * desired performance, level requested etc. Depending on the share_type, not
  36 * all CPUs will have an entry in the list.
  37 */
  38static LIST_HEAD(cpu_data_list);
  39
  40static bool boost_supported;
  41
  42struct cppc_workaround_oem_info {
  43        char oem_id[ACPI_OEM_ID_SIZE + 1];
  44        char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1];
  45        u32 oem_revision;
  46};
  47
  48static struct cppc_workaround_oem_info wa_info[] = {
  49        {
  50                .oem_id         = "HISI  ",
  51                .oem_table_id   = "HIP07   ",
  52                .oem_revision   = 0,
  53        }, {
  54                .oem_id         = "HISI  ",
  55                .oem_table_id   = "HIP08   ",
  56                .oem_revision   = 0,
  57        }
  58};
  59
  60/* Callback function used to retrieve the max frequency from DMI */
  61static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  62{
  63        const u8 *dmi_data = (const u8 *)dm;
  64        u16 *mhz = (u16 *)private;
  65
  66        if (dm->type == DMI_ENTRY_PROCESSOR &&
  67            dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  68                u16 val = (u16)get_unaligned((const u16 *)
  69                                (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  70                *mhz = val > *mhz ? val : *mhz;
  71        }
  72}
  73
  74/* Look up the max frequency in DMI */
  75static u64 cppc_get_dmi_max_khz(void)
  76{
  77        u16 mhz = 0;
  78
  79        dmi_walk(cppc_find_dmi_mhz, &mhz);
  80
  81        /*
  82         * Real stupid fallback value, just in case there is no
  83         * actual value set.
  84         */
  85        mhz = mhz ? mhz : 1;
  86
  87        return (1000 * mhz);
  88}
  89
  90/*
  91 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
  92 * use them to convert perf to freq and vice versa
  93 *
  94 * If the perf/freq point lies between Nominal and Lowest, we can treat
  95 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
  96 * and extrapolate the rest
  97 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
  98 */
  99static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
 100                                             unsigned int perf)
 101{
 102        struct cppc_perf_caps *caps = &cpu_data->perf_caps;
 103        static u64 max_khz;
 104        u64 mul, div;
 105
 106        if (caps->lowest_freq && caps->nominal_freq) {
 107                if (perf >= caps->nominal_perf) {
 108                        mul = caps->nominal_freq;
 109                        div = caps->nominal_perf;
 110                } else {
 111                        mul = caps->nominal_freq - caps->lowest_freq;
 112                        div = caps->nominal_perf - caps->lowest_perf;
 113                }
 114        } else {
 115                if (!max_khz)
 116                        max_khz = cppc_get_dmi_max_khz();
 117                mul = max_khz;
 118                div = caps->highest_perf;
 119        }
 120        return (u64)perf * mul / div;
 121}
 122
 123static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
 124                                             unsigned int freq)
 125{
 126        struct cppc_perf_caps *caps = &cpu_data->perf_caps;
 127        static u64 max_khz;
 128        u64  mul, div;
 129
 130        if (caps->lowest_freq && caps->nominal_freq) {
 131                if (freq >= caps->nominal_freq) {
 132                        mul = caps->nominal_perf;
 133                        div = caps->nominal_freq;
 134                } else {
 135                        mul = caps->lowest_perf;
 136                        div = caps->lowest_freq;
 137                }
 138        } else {
 139                if (!max_khz)
 140                        max_khz = cppc_get_dmi_max_khz();
 141                mul = caps->highest_perf;
 142                div = max_khz;
 143        }
 144
 145        return (u64)freq * mul / div;
 146}
 147
 148static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
 149                                   unsigned int target_freq,
 150                                   unsigned int relation)
 151
 152{
 153        struct cppc_cpudata *cpu_data = policy->driver_data;
 154        unsigned int cpu = policy->cpu;
 155        struct cpufreq_freqs freqs;
 156        u32 desired_perf;
 157        int ret = 0;
 158
 159        desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
 160        /* Return if it is exactly the same perf */
 161        if (desired_perf == cpu_data->perf_ctrls.desired_perf)
 162                return ret;
 163
 164        cpu_data->perf_ctrls.desired_perf = desired_perf;
 165        freqs.old = policy->cur;
 166        freqs.new = target_freq;
 167
 168        cpufreq_freq_transition_begin(policy, &freqs);
 169        ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
 170        cpufreq_freq_transition_end(policy, &freqs, ret != 0);
 171
 172        if (ret)
 173                pr_debug("Failed to set target on CPU:%d. ret:%d\n",
 174                         cpu, ret);
 175
 176        return ret;
 177}
 178
 179static int cppc_verify_policy(struct cpufreq_policy_data *policy)
 180{
 181        cpufreq_verify_within_cpu_limits(policy);
 182        return 0;
 183}
 184
 185static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
 186{
 187        struct cppc_cpudata *cpu_data = policy->driver_data;
 188
 189        list_del(&cpu_data->node);
 190        free_cpumask_var(cpu_data->shared_cpu_map);
 191        kfree(cpu_data);
 192        policy->driver_data = NULL;
 193}
 194
 195static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 196{
 197        struct cppc_cpudata *cpu_data = policy->driver_data;
 198        struct cppc_perf_caps *caps = &cpu_data->perf_caps;
 199        unsigned int cpu = policy->cpu;
 200        int ret;
 201
 202        cpu_data->perf_ctrls.desired_perf = caps->lowest_perf;
 203
 204        ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
 205        if (ret)
 206                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
 207                         caps->lowest_perf, cpu, ret);
 208
 209        cppc_cpufreq_put_cpu_data(policy);
 210}
 211
 212/*
 213 * The PCC subspace describes the rate at which platform can accept commands
 214 * on the shared PCC channel (including READs which do not count towards freq
 215 * transition requests), so ideally we need to use the PCC values as a fallback
 216 * if we don't have a platform specific transition_delay_us
 217 */
 218#ifdef CONFIG_ARM64
 219#include <asm/cputype.h>
 220
 221static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
 222{
 223        unsigned long implementor = read_cpuid_implementor();
 224        unsigned long part_num = read_cpuid_part_number();
 225
 226        switch (implementor) {
 227        case ARM_CPU_IMP_QCOM:
 228                switch (part_num) {
 229                case QCOM_CPU_PART_FALKOR_V1:
 230                case QCOM_CPU_PART_FALKOR:
 231                        return 10000;
 232                }
 233        }
 234        return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 235}
 236
 237#else
 238
 239static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
 240{
 241        return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 242}
 243#endif
 244
 245
 246static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
 247{
 248        struct cppc_cpudata *cpu_data;
 249        int ret;
 250
 251        cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
 252        if (!cpu_data)
 253                goto out;
 254
 255        if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
 256                goto free_cpu;
 257
 258        ret = acpi_get_psd_map(cpu, cpu_data);
 259        if (ret) {
 260                pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
 261                goto free_mask;
 262        }
 263
 264        ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
 265        if (ret) {
 266                pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
 267                goto free_mask;
 268        }
 269
 270        /* Convert the lowest and nominal freq from MHz to KHz */
 271        cpu_data->perf_caps.lowest_freq *= 1000;
 272        cpu_data->perf_caps.nominal_freq *= 1000;
 273
 274        list_add(&cpu_data->node, &cpu_data_list);
 275
 276        return cpu_data;
 277
 278free_mask:
 279        free_cpumask_var(cpu_data->shared_cpu_map);
 280free_cpu:
 281        kfree(cpu_data);
 282out:
 283        return NULL;
 284}
 285
 286static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 287{
 288        unsigned int cpu = policy->cpu;
 289        struct cppc_cpudata *cpu_data;
 290        struct cppc_perf_caps *caps;
 291        int ret;
 292
 293        cpu_data = cppc_cpufreq_get_cpu_data(cpu);
 294        if (!cpu_data) {
 295                pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
 296                return -ENODEV;
 297        }
 298        caps = &cpu_data->perf_caps;
 299        policy->driver_data = cpu_data;
 300
 301        /*
 302         * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
 303         * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
 304         */
 305        policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
 306                                               caps->lowest_nonlinear_perf);
 307        policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
 308                                               caps->nominal_perf);
 309
 310        /*
 311         * Set cpuinfo.min_freq to Lowest to make the full range of performance
 312         * available if userspace wants to use any perf between lowest & lowest
 313         * nonlinear perf
 314         */
 315        policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
 316                                                            caps->lowest_perf);
 317        policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
 318                                                            caps->nominal_perf);
 319
 320        policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
 321        policy->shared_type = cpu_data->shared_type;
 322
 323        switch (policy->shared_type) {
 324        case CPUFREQ_SHARED_TYPE_HW:
 325        case CPUFREQ_SHARED_TYPE_NONE:
 326                /* Nothing to be done - we'll have a policy for each CPU */
 327                break;
 328        case CPUFREQ_SHARED_TYPE_ANY:
 329                /*
 330                 * All CPUs in the domain will share a policy and all cpufreq
 331                 * operations will use a single cppc_cpudata structure stored
 332                 * in policy->driver_data.
 333                 */
 334                cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
 335                break;
 336        default:
 337                pr_debug("Unsupported CPU co-ord type: %d\n",
 338                         policy->shared_type);
 339                ret = -EFAULT;
 340                goto out;
 341        }
 342
 343        /*
 344         * If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
 345         * is supported.
 346         */
 347        if (caps->highest_perf > caps->nominal_perf)
 348                boost_supported = true;
 349
 350        /* Set policy->cur to max now. The governors will adjust later. */
 351        policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
 352        cpu_data->perf_ctrls.desired_perf =  caps->highest_perf;
 353
 354        ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
 355        if (ret) {
 356                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
 357                         caps->highest_perf, cpu, ret);
 358                goto out;
 359        }
 360
 361        return 0;
 362
 363out:
 364        cppc_cpufreq_put_cpu_data(policy);
 365        return ret;
 366}
 367
 368static inline u64 get_delta(u64 t1, u64 t0)
 369{
 370        if (t1 > t0 || t0 > ~(u32)0)
 371                return t1 - t0;
 372
 373        return (u32)t1 - (u32)t0;
 374}
 375
 376static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
 377                                     struct cppc_perf_fb_ctrs fb_ctrs_t0,
 378                                     struct cppc_perf_fb_ctrs fb_ctrs_t1)
 379{
 380        u64 delta_reference, delta_delivered;
 381        u64 reference_perf, delivered_perf;
 382
 383        reference_perf = fb_ctrs_t0.reference_perf;
 384
 385        delta_reference = get_delta(fb_ctrs_t1.reference,
 386                                    fb_ctrs_t0.reference);
 387        delta_delivered = get_delta(fb_ctrs_t1.delivered,
 388                                    fb_ctrs_t0.delivered);
 389
 390        /* Check to avoid divide-by zero */
 391        if (delta_reference || delta_delivered)
 392                delivered_perf = (reference_perf * delta_delivered) /
 393                                        delta_reference;
 394        else
 395                delivered_perf = cpu_data->perf_ctrls.desired_perf;
 396
 397        return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
 398}
 399
 400static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
 401{
 402        struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
 403        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 404        struct cppc_cpudata *cpu_data = policy->driver_data;
 405        int ret;
 406
 407        cpufreq_cpu_put(policy);
 408
 409        ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
 410        if (ret)
 411                return ret;
 412
 413        udelay(2); /* 2usec delay between sampling */
 414
 415        ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t1);
 416        if (ret)
 417                return ret;
 418
 419        return cppc_get_rate_from_fbctrs(cpu_data, fb_ctrs_t0, fb_ctrs_t1);
 420}
 421
 422static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
 423{
 424        struct cppc_cpudata *cpu_data = policy->driver_data;
 425        struct cppc_perf_caps *caps = &cpu_data->perf_caps;
 426        int ret;
 427
 428        if (!boost_supported) {
 429                pr_err("BOOST not supported by CPU or firmware\n");
 430                return -EINVAL;
 431        }
 432
 433        if (state)
 434                policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
 435                                                       caps->highest_perf);
 436        else
 437                policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
 438                                                       caps->nominal_perf);
 439        policy->cpuinfo.max_freq = policy->max;
 440
 441        ret = freq_qos_update_request(policy->max_freq_req, policy->max);
 442        if (ret < 0)
 443                return ret;
 444
 445        return 0;
 446}
 447
 448static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
 449{
 450        struct cppc_cpudata *cpu_data = policy->driver_data;
 451
 452        return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
 453}
 454cpufreq_freq_attr_ro(freqdomain_cpus);
 455
 456static struct freq_attr *cppc_cpufreq_attr[] = {
 457        &freqdomain_cpus,
 458        NULL,
 459};
 460
 461static struct cpufreq_driver cppc_cpufreq_driver = {
 462        .flags = CPUFREQ_CONST_LOOPS,
 463        .verify = cppc_verify_policy,
 464        .target = cppc_cpufreq_set_target,
 465        .get = cppc_cpufreq_get_rate,
 466        .init = cppc_cpufreq_cpu_init,
 467        .stop_cpu = cppc_cpufreq_stop_cpu,
 468        .set_boost = cppc_cpufreq_set_boost,
 469        .attr = cppc_cpufreq_attr,
 470        .name = "cppc_cpufreq",
 471};
 472
 473/*
 474 * HISI platform does not support delivered performance counter and
 475 * reference performance counter. It can calculate the performance using the
 476 * platform specific mechanism. We reuse the desired performance register to
 477 * store the real performance calculated by the platform.
 478 */
 479static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
 480{
 481        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 482        struct cppc_cpudata *cpu_data = policy->driver_data;
 483        u64 desired_perf;
 484        int ret;
 485
 486        cpufreq_cpu_put(policy);
 487
 488        ret = cppc_get_desired_perf(cpu, &desired_perf);
 489        if (ret < 0)
 490                return -EIO;
 491
 492        return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
 493}
 494
 495static void cppc_check_hisi_workaround(void)
 496{
 497        struct acpi_table_header *tbl;
 498        acpi_status status = AE_OK;
 499        int i;
 500
 501        status = acpi_get_table(ACPI_SIG_PCCT, 0, &tbl);
 502        if (ACPI_FAILURE(status) || !tbl)
 503                return;
 504
 505        for (i = 0; i < ARRAY_SIZE(wa_info); i++) {
 506                if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) &&
 507                    !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
 508                    wa_info[i].oem_revision == tbl->oem_revision) {
 509                        /* Overwrite the get() callback */
 510                        cppc_cpufreq_driver.get = hisi_cppc_cpufreq_get_rate;
 511                        break;
 512                }
 513        }
 514
 515        acpi_put_table(tbl);
 516}
 517
 518static int __init cppc_cpufreq_init(void)
 519{
 520        if ((acpi_disabled) || !acpi_cpc_valid())
 521                return -ENODEV;
 522
 523        INIT_LIST_HEAD(&cpu_data_list);
 524
 525        cppc_check_hisi_workaround();
 526
 527        return cpufreq_register_driver(&cppc_cpufreq_driver);
 528}
 529
 530static inline void free_cpu_data(void)
 531{
 532        struct cppc_cpudata *iter, *tmp;
 533
 534        list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
 535                free_cpumask_var(iter->shared_cpu_map);
 536                list_del(&iter->node);
 537                kfree(iter);
 538        }
 539
 540}
 541
 542static void __exit cppc_cpufreq_exit(void)
 543{
 544        cpufreq_unregister_driver(&cppc_cpufreq_driver);
 545
 546        free_cpu_data();
 547}
 548
 549module_exit(cppc_cpufreq_exit);
 550MODULE_AUTHOR("Ashwin Chaugule");
 551MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
 552MODULE_LICENSE("GPL");
 553
 554late_initcall(cppc_cpufreq_init);
 555
 556static const struct acpi_device_id cppc_acpi_ids[] __used = {
 557        {ACPI_PROCESSOR_DEVICE_HID, },
 558        {}
 559};
 560
 561MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
 562