linux/drivers/clk/clk-zynq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 National Instruments
   3 *
   4 * Josh Cartwright <josh.cartwright@ni.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#include <linux/io.h>
  19#include <linux/of.h>
  20#include <linux/slab.h>
  21#include <linux/kernel.h>
  22#include <linux/clk-provider.h>
  23
  24static void __iomem *slcr_base;
  25
  26struct zynq_pll_clk {
  27        struct clk_hw   hw;
  28        void __iomem    *pll_ctrl;
  29        void __iomem    *pll_cfg;
  30};
  31
  32#define to_zynq_pll_clk(hw)     container_of(hw, struct zynq_pll_clk, hw)
  33
  34#define CTRL_PLL_FDIV(x)        ((x) >> 12)
  35
  36static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
  37                                          unsigned long parent_rate)
  38{
  39        struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
  40        return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
  41}
  42
  43static const struct clk_ops zynq_pll_clk_ops = {
  44        .recalc_rate    = zynq_pll_recalc_rate,
  45};
  46
  47static void __init zynq_pll_clk_setup(struct device_node *np)
  48{
  49        struct clk_init_data init;
  50        struct zynq_pll_clk *pll;
  51        const char *parent_name;
  52        struct clk *clk;
  53        u32 regs[2];
  54        int ret;
  55
  56        ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
  57        if (WARN_ON(ret))
  58                return;
  59
  60        pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  61        if (WARN_ON(!pll))
  62                return;
  63
  64        pll->pll_ctrl = slcr_base + regs[0];
  65        pll->pll_cfg  = slcr_base + regs[1];
  66
  67        of_property_read_string(np, "clock-output-names", &init.name);
  68
  69        init.ops = &zynq_pll_clk_ops;
  70        parent_name = of_clk_get_parent_name(np, 0);
  71        init.parent_names = &parent_name;
  72        init.num_parents = 1;
  73
  74        pll->hw.init = &init;
  75
  76        clk = clk_register(NULL, &pll->hw);
  77        if (WARN_ON(IS_ERR(clk)))
  78                return;
  79
  80        ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
  81        if (WARN_ON(ret))
  82                return;
  83}
  84
  85struct zynq_periph_clk {
  86        struct clk_hw           hw;
  87        struct clk_onecell_data onecell_data;
  88        struct clk              *gates[2];
  89        void __iomem            *clk_ctrl;
  90        spinlock_t              clkact_lock;
  91};
  92
  93#define to_zynq_periph_clk(hw)  container_of(hw, struct zynq_periph_clk, hw)
  94
  95static const u8 periph_clk_parent_map[] = {
  96        0, 0, 1, 2
  97};
  98#define PERIPH_CLK_CTRL_SRC(x)  (periph_clk_parent_map[((x) & 0x30) >> 4])
  99#define PERIPH_CLK_CTRL_DIV(x)  (((x) & 0x3F00) >> 8)
 100
 101static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
 102                                             unsigned long parent_rate)
 103{
 104        struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
 105        return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
 106}
 107
 108static u8 zynq_periph_get_parent(struct clk_hw *hw)
 109{
 110        struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
 111        return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
 112}
 113
 114static const struct clk_ops zynq_periph_clk_ops = {
 115        .recalc_rate    = zynq_periph_recalc_rate,
 116        .get_parent     = zynq_periph_get_parent,
 117};
 118
 119static void __init zynq_periph_clk_setup(struct device_node *np)
 120{
 121        struct zynq_periph_clk *periph;
 122        const char *parent_names[3];
 123        struct clk_init_data init;
 124        int clk_num = 0, err;
 125        const char *name;
 126        struct clk *clk;
 127        u32 reg;
 128        int i;
 129
 130        err = of_property_read_u32(np, "reg", &reg);
 131        if (WARN_ON(err))
 132                return;
 133
 134        periph = kzalloc(sizeof(*periph), GFP_KERNEL);
 135        if (WARN_ON(!periph))
 136                return;
 137
 138        periph->clk_ctrl = slcr_base + reg;
 139        spin_lock_init(&periph->clkact_lock);
 140
 141        init.name = np->name;
 142        init.ops = &zynq_periph_clk_ops;
 143        for (i = 0; i < ARRAY_SIZE(parent_names); i++)
 144                parent_names[i] = of_clk_get_parent_name(np, i);
 145        init.parent_names = parent_names;
 146        init.num_parents = ARRAY_SIZE(parent_names);
 147
 148        periph->hw.init = &init;
 149
 150        clk = clk_register(NULL, &periph->hw);
 151        if (WARN_ON(IS_ERR(clk)))
 152                return;
 153
 154        err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
 155        if (WARN_ON(err))
 156                return;
 157
 158        err = of_property_read_string_index(np, "clock-output-names", 0,
 159                                            &name);
 160        if (WARN_ON(err))
 161                return;
 162
 163        periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
 164                                             periph->clk_ctrl, 0, 0,
 165                                             &periph->clkact_lock);
 166        if (WARN_ON(IS_ERR(periph->gates[0])))
 167                return;
 168        clk_num++;
 169
 170        /* some periph clks have 2 downstream gates */
 171        err = of_property_read_string_index(np, "clock-output-names", 1,
 172                                            &name);
 173        if (err != -ENODATA) {
 174                periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
 175                                                     periph->clk_ctrl, 1, 0,
 176                                                     &periph->clkact_lock);
 177                if (WARN_ON(IS_ERR(periph->gates[1])))
 178                        return;
 179                clk_num++;
 180        }
 181
 182        periph->onecell_data.clks = periph->gates;
 183        periph->onecell_data.clk_num = clk_num;
 184
 185        err = of_clk_add_provider(np, of_clk_src_onecell_get,
 186                                  &periph->onecell_data);
 187        if (WARN_ON(err))
 188                return;
 189}
 190
 191/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
 192 * derivative rates depend on CLK_621_TRUE
 193 */
 194
 195struct zynq_cpu_clk {
 196        struct clk_hw           hw;
 197        struct clk_onecell_data onecell_data;
 198        struct clk              *subclks[4];
 199        void __iomem            *clk_ctrl;
 200        spinlock_t              clkact_lock;
 201};
 202
 203#define to_zynq_cpu_clk(hw)     container_of(hw, struct zynq_cpu_clk, hw)
 204
 205static const u8 zynq_cpu_clk_parent_map[] = {
 206        1, 1, 2, 0
 207};
 208#define CPU_CLK_SRCSEL(x)       (zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
 209#define CPU_CLK_CTRL_DIV(x)     (((x) & 0x3F00) >> 8)
 210
 211static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
 212{
 213        struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
 214        return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
 215}
 216
 217static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
 218                                              unsigned long parent_rate)
 219{
 220        struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
 221        return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
 222}
 223
 224static const struct clk_ops zynq_cpu_clk_ops = {
 225        .get_parent     = zynq_cpu_clk_get_parent,
 226        .recalc_rate    = zynq_cpu_clk_recalc_rate,
 227};
 228
 229struct zynq_cpu_subclk {
 230        struct clk_hw   hw;
 231        void __iomem    *clk_621;
 232        enum {
 233                CPU_SUBCLK_6X4X,
 234                CPU_SUBCLK_3X2X,
 235                CPU_SUBCLK_2X,
 236                CPU_SUBCLK_1X,
 237        } which;
 238};
 239
 240#define CLK_621_TRUE(x) ((x) & 1)
 241
 242#define to_zynq_cpu_subclk(hw)  container_of(hw, struct zynq_cpu_subclk, hw);
 243
 244static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
 245                                                 unsigned long parent_rate)
 246{
 247        unsigned long uninitialized_var(rate);
 248        struct zynq_cpu_subclk *subclk;
 249        bool is_621;
 250
 251        subclk = to_zynq_cpu_subclk(hw)
 252        is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
 253
 254        switch (subclk->which) {
 255        case CPU_SUBCLK_6X4X:
 256                rate = parent_rate;
 257                break;
 258        case CPU_SUBCLK_3X2X:
 259                rate = parent_rate / 2;
 260                break;
 261        case CPU_SUBCLK_2X:
 262                rate = parent_rate / (is_621 ? 3 : 2);
 263                break;
 264        case CPU_SUBCLK_1X:
 265                rate = parent_rate / (is_621 ? 6 : 4);
 266                break;
 267        };
 268
 269        return rate;
 270}
 271
 272static const struct clk_ops zynq_cpu_subclk_ops = {
 273        .recalc_rate    = zynq_cpu_subclk_recalc_rate,
 274};
 275
 276static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
 277                                         void __iomem *clk_621)
 278{
 279        struct zynq_cpu_subclk *subclk;
 280        struct clk_init_data init;
 281        struct clk *clk;
 282        int err;
 283
 284        err = of_property_read_string_index(np, "clock-output-names",
 285                                            which, &init.name);
 286        if (WARN_ON(err))
 287                goto err_read_output_name;
 288
 289        subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
 290        if (!subclk)
 291                goto err_subclk_alloc;
 292
 293        subclk->clk_621 = clk_621;
 294        subclk->which = which;
 295
 296        init.ops = &zynq_cpu_subclk_ops;
 297        init.parent_names = &np->name;
 298        init.num_parents = 1;
 299
 300        subclk->hw.init = &init;
 301
 302        clk = clk_register(NULL, &subclk->hw);
 303        if (WARN_ON(IS_ERR(clk)))
 304                goto err_clk_register;
 305
 306        return clk;
 307
 308err_clk_register:
 309        kfree(subclk);
 310err_subclk_alloc:
 311err_read_output_name:
 312        return ERR_PTR(-EINVAL);
 313}
 314
 315static void __init zynq_cpu_clk_setup(struct device_node *np)
 316{
 317        struct zynq_cpu_clk *cpuclk;
 318        const char *parent_names[3];
 319        struct clk_init_data init;
 320        void __iomem *clk_621;
 321        struct clk *clk;
 322        u32 reg[2];
 323        int err;
 324        int i;
 325
 326        err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
 327        if (WARN_ON(err))
 328                return;
 329
 330        cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
 331        if (WARN_ON(!cpuclk))
 332                return;
 333
 334        cpuclk->clk_ctrl = slcr_base + reg[0];
 335        clk_621 = slcr_base + reg[1];
 336        spin_lock_init(&cpuclk->clkact_lock);
 337
 338        init.name = np->name;
 339        init.ops = &zynq_cpu_clk_ops;
 340        for (i = 0; i < ARRAY_SIZE(parent_names); i++)
 341                parent_names[i] = of_clk_get_parent_name(np, i);
 342        init.parent_names = parent_names;
 343        init.num_parents = ARRAY_SIZE(parent_names);
 344
 345        cpuclk->hw.init = &init;
 346
 347        clk = clk_register(NULL, &cpuclk->hw);
 348        if (WARN_ON(IS_ERR(clk)))
 349                return;
 350
 351        err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
 352        if (WARN_ON(err))
 353                return;
 354
 355        for (i = 0; i < 4; i++) {
 356                cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
 357                if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
 358                        return;
 359        }
 360
 361        cpuclk->onecell_data.clks = cpuclk->subclks;
 362        cpuclk->onecell_data.clk_num = i;
 363
 364        err = of_clk_add_provider(np, of_clk_src_onecell_get,
 365                                  &cpuclk->onecell_data);
 366        if (WARN_ON(err))
 367                return;
 368}
 369
 370static const __initconst struct of_device_id zynq_clk_match[] = {
 371        { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
 372        { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
 373        { .compatible = "xlnx,zynq-periph-clock",
 374                .data = zynq_periph_clk_setup, },
 375        { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
 376        {}
 377};
 378
 379void __init xilinx_zynq_clocks_init(void __iomem *slcr)
 380{
 381        slcr_base = slcr;
 382        of_clk_init(zynq_clk_match);
 383}
 384
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.