linux/drivers/net/ipa/ipa_clock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/refcount.h>
   8#include <linux/mutex.h>
   9#include <linux/clk.h>
  10#include <linux/device.h>
  11#include <linux/interconnect.h>
  12
  13#include "ipa.h"
  14#include "ipa_clock.h"
  15#include "ipa_modem.h"
  16#include "ipa_data.h"
  17
  18/**
  19 * DOC: IPA Clocking
  20 *
  21 * The "IPA Clock" manages both the IPA core clock and the interconnects
  22 * (buses) the IPA depends on as a single logical entity.  A reference count
  23 * is incremented by "get" operations and decremented by "put" operations.
  24 * Transitions of that count from 0 to 1 result in the clock and interconnects
  25 * being enabled, and transitions of the count from 1 to 0 cause them to be
  26 * disabled.  We currently operate the core clock at a fixed clock rate, and
  27 * all buses at a fixed average and peak bandwidth.  As more advanced IPA
  28 * features are enabled, we can make better use of clock and bus scaling.
  29 *
  30 * An IPA clock reference must be held for any access to IPA hardware.
  31 */
  32
  33/**
  34 * struct ipa_interconnect - IPA interconnect information
  35 * @path:               Interconnect path
  36 * @average_bandwidth:  Average interconnect bandwidth (KB/second)
  37 * @peak_bandwidth:     Peak interconnect bandwidth (KB/second)
  38 */
  39struct ipa_interconnect {
  40        struct icc_path *path;
  41        u32 average_bandwidth;
  42        u32 peak_bandwidth;
  43};
  44
  45/**
  46 * struct ipa_clock - IPA clocking information
  47 * @count:              Clocking reference count
  48 * @mutex:              Protects clock enable/disable
  49 * @core:               IPA core clock
  50 * @interconnect_count: Number of elements in interconnect[]
  51 * @interconnect:       Interconnect array
  52 */
  53struct ipa_clock {
  54        refcount_t count;
  55        struct mutex mutex; /* protects clock enable/disable */
  56        struct clk *core;
  57        u32 interconnect_count;
  58        struct ipa_interconnect *interconnect;
  59};
  60
  61static int ipa_interconnect_init_one(struct device *dev,
  62                                     struct ipa_interconnect *interconnect,
  63                                     const struct ipa_interconnect_data *data)
  64{
  65        struct icc_path *path;
  66
  67        path = of_icc_get(dev, data->name);
  68        if (IS_ERR(path)) {
  69                int ret = PTR_ERR(path);
  70
  71                dev_err_probe(dev, ret, "error getting %s interconnect\n",
  72                              data->name);
  73
  74                return ret;
  75        }
  76
  77        interconnect->path = path;
  78        interconnect->average_bandwidth = data->average_bandwidth;
  79        interconnect->peak_bandwidth = data->peak_bandwidth;
  80
  81        return 0;
  82}
  83
  84static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
  85{
  86        icc_put(interconnect->path);
  87        memset(interconnect, 0, sizeof(*interconnect));
  88}
  89
  90/* Initialize interconnects required for IPA operation */
  91static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
  92                                 const struct ipa_interconnect_data *data)
  93{
  94        struct ipa_interconnect *interconnect;
  95        u32 count;
  96        int ret;
  97
  98        count = clock->interconnect_count;
  99        interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
 100        if (!interconnect)
 101                return -ENOMEM;
 102        clock->interconnect = interconnect;
 103
 104        while (count--) {
 105                ret = ipa_interconnect_init_one(dev, interconnect, data++);
 106                if (ret)
 107                        goto out_unwind;
 108                interconnect++;
 109        }
 110
 111        return 0;
 112
 113out_unwind:
 114        while (interconnect-- > clock->interconnect)
 115                ipa_interconnect_exit_one(interconnect);
 116        kfree(clock->interconnect);
 117        clock->interconnect = NULL;
 118
 119        return ret;
 120}
 121
 122/* Inverse of ipa_interconnect_init() */
 123static void ipa_interconnect_exit(struct ipa_clock *clock)
 124{
 125        struct ipa_interconnect *interconnect;
 126
 127        interconnect = clock->interconnect + clock->interconnect_count;
 128        while (interconnect-- > clock->interconnect)
 129                ipa_interconnect_exit_one(interconnect);
 130        kfree(clock->interconnect);
 131        clock->interconnect = NULL;
 132}
 133
 134/* Currently we only use one bandwidth level, so just "enable" interconnects */
 135static int ipa_interconnect_enable(struct ipa *ipa)
 136{
 137        struct ipa_interconnect *interconnect;
 138        struct ipa_clock *clock = ipa->clock;
 139        int ret;
 140        u32 i;
 141
 142        interconnect = clock->interconnect;
 143        for (i = 0; i < clock->interconnect_count; i++) {
 144                ret = icc_set_bw(interconnect->path,
 145                                 interconnect->average_bandwidth,
 146                                 interconnect->peak_bandwidth);
 147                if (ret)
 148                        goto out_unwind;
 149                interconnect++;
 150        }
 151
 152        return 0;
 153
 154out_unwind:
 155        while (interconnect-- > clock->interconnect)
 156                (void)icc_set_bw(interconnect->path, 0, 0);
 157
 158        return ret;
 159}
 160
 161/* To disable an interconnect, we just its bandwidth to 0 */
 162static void ipa_interconnect_disable(struct ipa *ipa)
 163{
 164        struct ipa_interconnect *interconnect;
 165        struct ipa_clock *clock = ipa->clock;
 166        int result = 0;
 167        u32 count;
 168        int ret;
 169
 170        count = clock->interconnect_count;
 171        interconnect = clock->interconnect + count;
 172        while (count--) {
 173                interconnect--;
 174                ret = icc_set_bw(interconnect->path, 0, 0);
 175                if (ret && !result)
 176                        result = ret;
 177        }
 178
 179        if (result)
 180                dev_err(&ipa->pdev->dev,
 181                        "error %d disabling IPA interconnects\n", ret);
 182}
 183
 184/* Turn on IPA clocks, including interconnects */
 185static int ipa_clock_enable(struct ipa *ipa)
 186{
 187        int ret;
 188
 189        ret = ipa_interconnect_enable(ipa);
 190        if (ret)
 191                return ret;
 192
 193        ret = clk_prepare_enable(ipa->clock->core);
 194        if (ret)
 195                ipa_interconnect_disable(ipa);
 196
 197        return ret;
 198}
 199
 200/* Inverse of ipa_clock_enable() */
 201static void ipa_clock_disable(struct ipa *ipa)
 202{
 203        clk_disable_unprepare(ipa->clock->core);
 204        ipa_interconnect_disable(ipa);
 205}
 206
 207/* Get an IPA clock reference, but only if the reference count is
 208 * already non-zero.  Returns true if the additional reference was
 209 * added successfully, or false otherwise.
 210 */
 211bool ipa_clock_get_additional(struct ipa *ipa)
 212{
 213        return refcount_inc_not_zero(&ipa->clock->count);
 214}
 215
 216/* Get an IPA clock reference.  If the reference count is non-zero, it is
 217 * incremented and return is immediate.  Otherwise it is checked again
 218 * under protection of the mutex, and if appropriate the IPA clock
 219 * is enabled.
 220 *
 221 * Incrementing the reference count is intentionally deferred until
 222 * after the clock is running and endpoints are resumed.
 223 */
 224void ipa_clock_get(struct ipa *ipa)
 225{
 226        struct ipa_clock *clock = ipa->clock;
 227        int ret;
 228
 229        /* If the clock is running, just bump the reference count */
 230        if (ipa_clock_get_additional(ipa))
 231                return;
 232
 233        /* Otherwise get the mutex and check again */
 234        mutex_lock(&clock->mutex);
 235
 236        /* A reference might have been added before we got the mutex. */
 237        if (ipa_clock_get_additional(ipa))
 238                goto out_mutex_unlock;
 239
 240        ret = ipa_clock_enable(ipa);
 241        if (ret) {
 242                dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
 243                goto out_mutex_unlock;
 244        }
 245
 246        refcount_set(&clock->count, 1);
 247
 248out_mutex_unlock:
 249        mutex_unlock(&clock->mutex);
 250}
 251
 252/* Attempt to remove an IPA clock reference.  If this represents the
 253 * last reference, disable the IPA clock under protection of the mutex.
 254 */
 255void ipa_clock_put(struct ipa *ipa)
 256{
 257        struct ipa_clock *clock = ipa->clock;
 258
 259        /* If this is not the last reference there's nothing more to do */
 260        if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
 261                return;
 262
 263        ipa_clock_disable(ipa);
 264
 265        mutex_unlock(&clock->mutex);
 266}
 267
 268/* Return the current IPA core clock rate */
 269u32 ipa_clock_rate(struct ipa *ipa)
 270{
 271        return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
 272}
 273
 274/* Initialize IPA clocking */
 275struct ipa_clock *
 276ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
 277{
 278        struct ipa_clock *clock;
 279        struct clk *clk;
 280        int ret;
 281
 282        clk = clk_get(dev, "core");
 283        if (IS_ERR(clk)) {
 284                dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
 285
 286                return ERR_CAST(clk);
 287        }
 288
 289        ret = clk_set_rate(clk, data->core_clock_rate);
 290        if (ret) {
 291                dev_err(dev, "error %d setting core clock rate to %u\n",
 292                        ret, data->core_clock_rate);
 293                goto err_clk_put;
 294        }
 295
 296        clock = kzalloc(sizeof(*clock), GFP_KERNEL);
 297        if (!clock) {
 298                ret = -ENOMEM;
 299                goto err_clk_put;
 300        }
 301        clock->core = clk;
 302        clock->interconnect_count = data->interconnect_count;
 303
 304        ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
 305        if (ret)
 306                goto err_kfree;
 307
 308        mutex_init(&clock->mutex);
 309        refcount_set(&clock->count, 0);
 310
 311        return clock;
 312
 313err_kfree:
 314        kfree(clock);
 315err_clk_put:
 316        clk_put(clk);
 317
 318        return ERR_PTR(ret);
 319}
 320
 321/* Inverse of ipa_clock_init() */
 322void ipa_clock_exit(struct ipa_clock *clock)
 323{
 324        struct clk *clk = clock->core;
 325
 326        WARN_ON(refcount_read(&clock->count) != 0);
 327        mutex_destroy(&clock->mutex);
 328        ipa_interconnect_exit(clock);
 329        kfree(clock);
 330        clk_put(clk);
 331}
 332