linux/drivers/clk/clk-zynq.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012 National Instruments
   3 *
   4 * Josh Cartwright <josh.cartwright@ni.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along with
  16 * this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#include <linux/io.h>
  19#include <linux/of.h>
  20#include <linux/slab.h>
  21#include <linux/kernel.h>
  22#include <linux/clk-provider.h>
  23
  24static void __iomem *slcr_base;
  25
  26struct zynq_pll_clk {
  27        struct clk_hw   hw;
  28        void __iomem    *pll_ctrl;
  29        void __iomem    *pll_cfg;
  30};
  31
  32#define to_zynq_pll_clk(hw)     container_of(hw, struct zynq_pll_clk, hw)
  33
  34#define CTRL_PLL_FDIV(x)        ((x) >> 12)
  35
  36static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
  37                                          unsigned long parent_rate)
  38{
  39        struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
  40        return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
  41}
  42
  43static const struct clk_ops zynq_pll_clk_ops = {
  44        .recalc_rate    = zynq_pll_recalc_rate,
  45};
  46
  47static void __init zynq_pll_clk_setup(struct device_node *np)
  48{
  49        struct clk_init_data init;
  50        struct zynq_pll_clk *pll;
  51        const char *parent_name;
  52        struct clk *clk;
  53        u32 regs[2];
  54        int ret;
  55
  56        ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
  57        if (WARN_ON(ret))
  58                return;
  59
  60        pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  61        if (WARN_ON(!pll))
  62                return;
  63
  64        pll->pll_ctrl = slcr_base + regs[0];
  65        pll->pll_cfg  = slcr_base + regs[1];
  66
  67        of_property_read_string(np, "clock-output-names", &init.name);
  68
  69        init.ops = &zynq_pll_clk_ops;
  70        parent_name = of_clk_get_parent_name(np, 0);
  71        init.parent_names = &parent_name;
  72        init.num_parents = 1;
  73
  74        pll->hw.init = &init;
  75
  76        clk = clk_register(NULL, &pll->hw);
  77        if (WARN_ON(IS_ERR(clk)))
  78                return;
  79
  80        ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
  81        if (WARN_ON(ret))
  82                return;
  83}
  84CLK_OF_DECLARE(zynq_pll, "xlnx,zynq-pll", zynq_pll_clk_setup);
  85
  86struct zynq_periph_clk {
  87        struct clk_hw           hw;
  88        struct clk_onecell_data onecell_data;
  89        struct clk              *gates[2];
  90        void __iomem            *clk_ctrl;
  91        spinlock_t              clkact_lock;
  92};
  93
  94#define to_zynq_periph_clk(hw)  container_of(hw, struct zynq_periph_clk, hw)
  95
  96static const u8 periph_clk_parent_map[] = {
  97        0, 0, 1, 2
  98};
  99#define PERIPH_CLK_CTRL_SRC(x)  (periph_clk_parent_map[((x) & 0x30) >> 4])
 100#define PERIPH_CLK_CTRL_DIV(x)  (((x) & 0x3F00) >> 8)
 101
 102static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
 103                                             unsigned long parent_rate)
 104{
 105        struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
 106        return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
 107}
 108
 109static u8 zynq_periph_get_parent(struct clk_hw *hw)
 110{
 111        struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
 112        return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
 113}
 114
 115static const struct clk_ops zynq_periph_clk_ops = {
 116        .recalc_rate    = zynq_periph_recalc_rate,
 117        .get_parent     = zynq_periph_get_parent,
 118};
 119
 120static void __init zynq_periph_clk_setup(struct device_node *np)
 121{
 122        struct zynq_periph_clk *periph;
 123        const char *parent_names[3];
 124        struct clk_init_data init;
 125        int clk_num = 0, err;
 126        const char *name;
 127        struct clk *clk;
 128        u32 reg;
 129        int i;
 130
 131        err = of_property_read_u32(np, "reg", &reg);
 132        if (WARN_ON(err))
 133                return;
 134
 135        periph = kzalloc(sizeof(*periph), GFP_KERNEL);
 136        if (WARN_ON(!periph))
 137                return;
 138
 139        periph->clk_ctrl = slcr_base + reg;
 140        spin_lock_init(&periph->clkact_lock);
 141
 142        init.name = np->name;
 143        init.ops = &zynq_periph_clk_ops;
 144        for (i = 0; i < ARRAY_SIZE(parent_names); i++)
 145                parent_names[i] = of_clk_get_parent_name(np, i);
 146        init.parent_names = parent_names;
 147        init.num_parents = ARRAY_SIZE(parent_names);
 148
 149        periph->hw.init = &init;
 150
 151        clk = clk_register(NULL, &periph->hw);
 152        if (WARN_ON(IS_ERR(clk)))
 153                return;
 154
 155        err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
 156        if (WARN_ON(err))
 157                return;
 158
 159        err = of_property_read_string_index(np, "clock-output-names", 0,
 160                                            &name);
 161        if (WARN_ON(err))
 162                return;
 163
 164        periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
 165                                             periph->clk_ctrl, 0, 0,
 166                                             &periph->clkact_lock);
 167        if (WARN_ON(IS_ERR(periph->gates[0])))
 168                return;
 169        clk_num++;
 170
 171        /* some periph clks have 2 downstream gates */
 172        err = of_property_read_string_index(np, "clock-output-names", 1,
 173                                            &name);
 174        if (err != -ENODATA) {
 175                periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
 176                                                     periph->clk_ctrl, 1, 0,
 177                                                     &periph->clkact_lock);
 178                if (WARN_ON(IS_ERR(periph->gates[1])))
 179                        return;
 180                clk_num++;
 181        }
 182
 183        periph->onecell_data.clks = periph->gates;
 184        periph->onecell_data.clk_num = clk_num;
 185
 186        err = of_clk_add_provider(np, of_clk_src_onecell_get,
 187                                  &periph->onecell_data);
 188        if (WARN_ON(err))
 189                return;
 190}
 191CLK_OF_DECLARE(zynq_periph, "xlnx,zynq-periph-clock", zynq_periph_clk_setup);
 192
 193/* CPU Clock domain is modelled as a mux with 4 children subclks, whose
 194 * derivative rates depend on CLK_621_TRUE
 195 */
 196
 197struct zynq_cpu_clk {
 198        struct clk_hw           hw;
 199        struct clk_onecell_data onecell_data;
 200        struct clk              *subclks[4];
 201        void __iomem            *clk_ctrl;
 202        spinlock_t              clkact_lock;
 203};
 204
 205#define to_zynq_cpu_clk(hw)     container_of(hw, struct zynq_cpu_clk, hw)
 206
 207static const u8 zynq_cpu_clk_parent_map[] = {
 208        1, 1, 2, 0
 209};
 210#define CPU_CLK_SRCSEL(x)       (zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
 211#define CPU_CLK_CTRL_DIV(x)     (((x) & 0x3F00) >> 8)
 212
 213static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
 214{
 215        struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
 216        return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
 217}
 218
 219static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
 220                                              unsigned long parent_rate)
 221{
 222        struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
 223        return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
 224}
 225
 226static const struct clk_ops zynq_cpu_clk_ops = {
 227        .get_parent     = zynq_cpu_clk_get_parent,
 228        .recalc_rate    = zynq_cpu_clk_recalc_rate,
 229};
 230
 231struct zynq_cpu_subclk {
 232        struct clk_hw   hw;
 233        void __iomem    *clk_621;
 234        enum {
 235                CPU_SUBCLK_6X4X,
 236                CPU_SUBCLK_3X2X,
 237                CPU_SUBCLK_2X,
 238                CPU_SUBCLK_1X,
 239        } which;
 240};
 241
 242#define CLK_621_TRUE(x) ((x) & 1)
 243
 244#define to_zynq_cpu_subclk(hw)  container_of(hw, struct zynq_cpu_subclk, hw);
 245
 246static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
 247                                                 unsigned long parent_rate)
 248{
 249        unsigned long uninitialized_var(rate);
 250        struct zynq_cpu_subclk *subclk;
 251        bool is_621;
 252
 253        subclk = to_zynq_cpu_subclk(hw)
 254        is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
 255
 256        switch (subclk->which) {
 257        case CPU_SUBCLK_6X4X:
 258                rate = parent_rate;
 259                break;
 260        case CPU_SUBCLK_3X2X:
 261                rate = parent_rate / 2;
 262                break;
 263        case CPU_SUBCLK_2X:
 264                rate = parent_rate / (is_621 ? 3 : 2);
 265                break;
 266        case CPU_SUBCLK_1X:
 267                rate = parent_rate / (is_621 ? 6 : 4);
 268                break;
 269        };
 270
 271        return rate;
 272}
 273
 274static const struct clk_ops zynq_cpu_subclk_ops = {
 275        .recalc_rate    = zynq_cpu_subclk_recalc_rate,
 276};
 277
 278static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
 279                                         void __iomem *clk_621)
 280{
 281        struct zynq_cpu_subclk *subclk;
 282        struct clk_init_data init;
 283        struct clk *clk;
 284        int err;
 285
 286        err = of_property_read_string_index(np, "clock-output-names",
 287                                            which, &init.name);
 288        if (WARN_ON(err))
 289                goto err_read_output_name;
 290
 291        subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
 292        if (!subclk)
 293                goto err_subclk_alloc;
 294
 295        subclk->clk_621 = clk_621;
 296        subclk->which = which;
 297
 298        init.ops = &zynq_cpu_subclk_ops;
 299        init.parent_names = &np->name;
 300        init.num_parents = 1;
 301
 302        subclk->hw.init = &init;
 303
 304        clk = clk_register(NULL, &subclk->hw);
 305        if (WARN_ON(IS_ERR(clk)))
 306                goto err_clk_register;
 307
 308        return clk;
 309
 310err_clk_register:
 311        kfree(subclk);
 312err_subclk_alloc:
 313err_read_output_name:
 314        return ERR_PTR(-EINVAL);
 315}
 316
 317static void __init zynq_cpu_clk_setup(struct device_node *np)
 318{
 319        struct zynq_cpu_clk *cpuclk;
 320        const char *parent_names[3];
 321        struct clk_init_data init;
 322        void __iomem *clk_621;
 323        struct clk *clk;
 324        u32 reg[2];
 325        int err;
 326        int i;
 327
 328        err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
 329        if (WARN_ON(err))
 330                return;
 331
 332        cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
 333        if (WARN_ON(!cpuclk))
 334                return;
 335
 336        cpuclk->clk_ctrl = slcr_base + reg[0];
 337        clk_621 = slcr_base + reg[1];
 338        spin_lock_init(&cpuclk->clkact_lock);
 339
 340        init.name = np->name;
 341        init.ops = &zynq_cpu_clk_ops;
 342        for (i = 0; i < ARRAY_SIZE(parent_names); i++)
 343                parent_names[i] = of_clk_get_parent_name(np, i);
 344        init.parent_names = parent_names;
 345        init.num_parents = ARRAY_SIZE(parent_names);
 346
 347        cpuclk->hw.init = &init;
 348
 349        clk = clk_register(NULL, &cpuclk->hw);
 350        if (WARN_ON(IS_ERR(clk)))
 351                return;
 352
 353        err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
 354        if (WARN_ON(err))
 355                return;
 356
 357        for (i = 0; i < 4; i++) {
 358                cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
 359                if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
 360                        return;
 361        }
 362
 363        cpuclk->onecell_data.clks = cpuclk->subclks;
 364        cpuclk->onecell_data.clk_num = i;
 365
 366        err = of_clk_add_provider(np, of_clk_src_onecell_get,
 367                                  &cpuclk->onecell_data);
 368        if (WARN_ON(err))
 369                return;
 370}
 371CLK_OF_DECLARE(zynq_cpu, "xlnx,zynq-cpu-clock", zynq_cpu_clk_setup);
 372
 373void __init xilinx_zynq_clocks_init(void __iomem *slcr)
 374{
 375        slcr_base = slcr;
 376        of_clk_init(NULL);
 377}
 378
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.