linux/drivers/clk/x86/clk-cgu-pll.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2020 Intel Corporation.
   4 * Zhu YiXin <yixin.zhu@intel.com>
   5 * Rahul Tanwar <rahul.tanwar@intel.com>
   6 */
   7
   8#include <linux/clk-provider.h>
   9#include <linux/delay.h>
  10#include <linux/device.h>
  11#include <linux/iopoll.h>
  12#include <linux/of.h>
  13
  14#include "clk-cgu.h"
  15
  16#define to_lgm_clk_pll(_hw)     container_of(_hw, struct lgm_clk_pll, hw)
  17#define PLL_REF_DIV(x)          ((x) + 0x08)
  18
  19/*
  20 * Calculate formula:
  21 * rate = (prate * mult + (prate * frac) / frac_div) / div
  22 */
  23static unsigned long
  24lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
  25                  unsigned int div, unsigned int frac, unsigned int frac_div)
  26{
  27        u64 crate, frate, rate64;
  28
  29        rate64 = prate;
  30        crate = rate64 * mult;
  31        frate = rate64 * frac;
  32        do_div(frate, frac_div);
  33        crate += frate;
  34        do_div(crate, div);
  35
  36        return crate;
  37}
  38
  39static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
  40{
  41        struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
  42        unsigned int div, mult, frac;
  43        unsigned long flags;
  44
  45        spin_lock_irqsave(&pll->lock, flags);
  46        mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
  47        div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
  48        frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
  49        spin_unlock_irqrestore(&pll->lock, flags);
  50
  51        if (pll->type == TYPE_LJPLL)
  52                div *= 4;
  53
  54        return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
  55}
  56
  57static int lgm_pll_is_enabled(struct clk_hw *hw)
  58{
  59        struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
  60        unsigned long flags;
  61        unsigned int ret;
  62
  63        spin_lock_irqsave(&pll->lock, flags);
  64        ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
  65        spin_unlock_irqrestore(&pll->lock, flags);
  66
  67        return ret;
  68}
  69
  70static int lgm_pll_enable(struct clk_hw *hw)
  71{
  72        struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
  73        unsigned long flags;
  74        u32 val;
  75        int ret;
  76
  77        spin_lock_irqsave(&pll->lock, flags);
  78        lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
  79        ret = readl_poll_timeout_atomic(pll->membase + pll->reg,
  80                                        val, (val & 0x1), 1, 100);
  81        spin_unlock_irqrestore(&pll->lock, flags);
  82
  83        return ret;
  84}
  85
  86static void lgm_pll_disable(struct clk_hw *hw)
  87{
  88        struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
  89        unsigned long flags;
  90
  91        spin_lock_irqsave(&pll->lock, flags);
  92        lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
  93        spin_unlock_irqrestore(&pll->lock, flags);
  94}
  95
  96static const struct clk_ops lgm_pll_ops = {
  97        .recalc_rate = lgm_pll_recalc_rate,
  98        .is_enabled = lgm_pll_is_enabled,
  99        .enable = lgm_pll_enable,
 100        .disable = lgm_pll_disable,
 101};
 102
 103static struct clk_hw *
 104lgm_clk_register_pll(struct lgm_clk_provider *ctx,
 105                     const struct lgm_pll_clk_data *list)
 106{
 107        struct clk_init_data init = {};
 108        struct lgm_clk_pll *pll;
 109        struct device *dev = ctx->dev;
 110        struct clk_hw *hw;
 111        int ret;
 112
 113        init.ops = &lgm_pll_ops;
 114        init.name = list->name;
 115        init.flags = list->flags;
 116        init.parent_data = list->parent_data;
 117        init.num_parents = list->num_parents;
 118
 119        pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
 120        if (!pll)
 121                return ERR_PTR(-ENOMEM);
 122
 123        pll->membase = ctx->membase;
 124        pll->lock = ctx->lock;
 125        pll->reg = list->reg;
 126        pll->flags = list->flags;
 127        pll->type = list->type;
 128        pll->hw.init = &init;
 129
 130        hw = &pll->hw;
 131        ret = devm_clk_hw_register(dev, hw);
 132        if (ret)
 133                return ERR_PTR(ret);
 134
 135        return hw;
 136}
 137
 138int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
 139                          const struct lgm_pll_clk_data *list,
 140                          unsigned int nr_clk)
 141{
 142        struct clk_hw *hw;
 143        int i;
 144
 145        for (i = 0; i < nr_clk; i++, list++) {
 146                hw = lgm_clk_register_pll(ctx, list);
 147                if (IS_ERR(hw)) {
 148                        dev_err(ctx->dev, "failed to register pll: %s\n",
 149                                list->name);
 150                        return PTR_ERR(hw);
 151                }
 152                ctx->clk_data.hws[list->id] = hw;
 153        }
 154
 155        return 0;
 156}
 157