linux/drivers/clk/imx/clk-busy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2012 Freescale Semiconductor, Inc.
   4 * Copyright 2012 Linaro Ltd.
   5 */
   6
   7#include <linux/bits.h>
   8#include <linux/clk.h>
   9#include <linux/clk-provider.h>
  10#include <linux/io.h>
  11#include <linux/slab.h>
  12#include <linux/jiffies.h>
  13#include <linux/err.h>
  14#include "clk.h"
  15
  16static int clk_busy_wait(void __iomem *reg, u8 shift)
  17{
  18        unsigned long timeout = jiffies + msecs_to_jiffies(10);
  19
  20        while (readl_relaxed(reg) & (1 << shift))
  21                if (time_after(jiffies, timeout))
  22                        return -ETIMEDOUT;
  23
  24        return 0;
  25}
  26
  27struct clk_busy_divider {
  28        struct clk_divider div;
  29        const struct clk_ops *div_ops;
  30        void __iomem *reg;
  31        u8 shift;
  32};
  33
  34static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
  35{
  36        struct clk_divider *div = to_clk_divider(hw);
  37
  38        return container_of(div, struct clk_busy_divider, div);
  39}
  40
  41static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
  42                                                  unsigned long parent_rate)
  43{
  44        struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  45
  46        return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
  47}
  48
  49static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  50                                        unsigned long *prate)
  51{
  52        struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  53
  54        return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
  55}
  56
  57static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  58                unsigned long parent_rate)
  59{
  60        struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  61        int ret;
  62
  63        ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
  64        if (!ret)
  65                ret = clk_busy_wait(busy->reg, busy->shift);
  66
  67        return ret;
  68}
  69
  70static const struct clk_ops clk_busy_divider_ops = {
  71        .recalc_rate = clk_busy_divider_recalc_rate,
  72        .round_rate = clk_busy_divider_round_rate,
  73        .set_rate = clk_busy_divider_set_rate,
  74};
  75
  76struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
  77                                 void __iomem *reg, u8 shift, u8 width,
  78                                 void __iomem *busy_reg, u8 busy_shift)
  79{
  80        struct clk_busy_divider *busy;
  81        struct clk_hw *hw;
  82        struct clk_init_data init;
  83        int ret;
  84
  85        busy = kzalloc(sizeof(*busy), GFP_KERNEL);
  86        if (!busy)
  87                return ERR_PTR(-ENOMEM);
  88
  89        busy->reg = busy_reg;
  90        busy->shift = busy_shift;
  91
  92        busy->div.reg = reg;
  93        busy->div.shift = shift;
  94        busy->div.width = width;
  95        busy->div.lock = &imx_ccm_lock;
  96        busy->div_ops = &clk_divider_ops;
  97
  98        init.name = name;
  99        init.ops = &clk_busy_divider_ops;
 100        init.flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL;
 101        init.parent_names = &parent_name;
 102        init.num_parents = 1;
 103
 104        busy->div.hw.init = &init;
 105
 106        hw = &busy->div.hw;
 107
 108        ret = clk_hw_register(NULL, hw);
 109        if (ret) {
 110                kfree(busy);
 111                return ERR_PTR(ret);
 112        }
 113
 114        return hw;
 115}
 116
 117struct clk_busy_mux {
 118        struct clk_mux mux;
 119        const struct clk_ops *mux_ops;
 120        void __iomem *reg;
 121        u8 shift;
 122};
 123
 124static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
 125{
 126        struct clk_mux *mux = to_clk_mux(hw);
 127
 128        return container_of(mux, struct clk_busy_mux, mux);
 129}
 130
 131static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
 132{
 133        struct clk_busy_mux *busy = to_clk_busy_mux(hw);
 134
 135        return busy->mux_ops->get_parent(&busy->mux.hw);
 136}
 137
 138static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
 139{
 140        struct clk_busy_mux *busy = to_clk_busy_mux(hw);
 141        int ret;
 142
 143        ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
 144        if (!ret)
 145                ret = clk_busy_wait(busy->reg, busy->shift);
 146
 147        return ret;
 148}
 149
 150static const struct clk_ops clk_busy_mux_ops = {
 151        .get_parent = clk_busy_mux_get_parent,
 152        .set_parent = clk_busy_mux_set_parent,
 153};
 154
 155struct clk_hw *imx_clk_hw_busy_mux(const char *name, void __iomem *reg, u8 shift,
 156                             u8 width, void __iomem *busy_reg, u8 busy_shift,
 157                             const char * const *parent_names, int num_parents)
 158{
 159        struct clk_busy_mux *busy;
 160        struct clk_hw *hw;
 161        struct clk_init_data init;
 162        int ret;
 163
 164        busy = kzalloc(sizeof(*busy), GFP_KERNEL);
 165        if (!busy)
 166                return ERR_PTR(-ENOMEM);
 167
 168        busy->reg = busy_reg;
 169        busy->shift = busy_shift;
 170
 171        busy->mux.reg = reg;
 172        busy->mux.shift = shift;
 173        busy->mux.mask = BIT(width) - 1;
 174        busy->mux.lock = &imx_ccm_lock;
 175        busy->mux_ops = &clk_mux_ops;
 176
 177        init.name = name;
 178        init.ops = &clk_busy_mux_ops;
 179        init.flags = CLK_IS_CRITICAL;
 180        init.parent_names = parent_names;
 181        init.num_parents = num_parents;
 182
 183        busy->mux.hw.init = &init;
 184
 185        hw = &busy->mux.hw;
 186
 187        ret = clk_hw_register(NULL, hw);
 188        if (ret) {
 189                kfree(busy);
 190                return ERR_PTR(ret);
 191        }
 192
 193        return hw;
 194}
 195