linux/drivers/clk/clk.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * Standard functionality for the common clock API.  See Documentation/clk.txt
  10 */
  11
  12#include <linux/clk-private.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/spinlock.h>
  16#include <linux/err.h>
  17#include <linux/list.h>
  18#include <linux/slab.h>
  19#include <linux/of.h>
  20
  21static DEFINE_SPINLOCK(enable_lock);
  22static DEFINE_MUTEX(prepare_lock);
  23
  24static HLIST_HEAD(clk_root_list);
  25static HLIST_HEAD(clk_orphan_list);
  26static LIST_HEAD(clk_notifier_list);
  27
  28/***        debugfs support        ***/
  29
  30#ifdef CONFIG_COMMON_CLK_DEBUG
  31#include <linux/debugfs.h>
  32
  33static struct dentry *rootdir;
  34static struct dentry *orphandir;
  35static int inited = 0;
  36
  37/* caller must hold prepare_lock */
  38static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
  39{
  40        struct dentry *d;
  41        int ret = -ENOMEM;
  42
  43        if (!clk || !pdentry) {
  44                ret = -EINVAL;
  45                goto out;
  46        }
  47
  48        d = debugfs_create_dir(clk->name, pdentry);
  49        if (!d)
  50                goto out;
  51
  52        clk->dentry = d;
  53
  54        d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
  55                        (u32 *)&clk->rate);
  56        if (!d)
  57                goto err_out;
  58
  59        d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
  60                        (u32 *)&clk->flags);
  61        if (!d)
  62                goto err_out;
  63
  64        d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
  65                        (u32 *)&clk->prepare_count);
  66        if (!d)
  67                goto err_out;
  68
  69        d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
  70                        (u32 *)&clk->enable_count);
  71        if (!d)
  72                goto err_out;
  73
  74        d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
  75                        (u32 *)&clk->notifier_count);
  76        if (!d)
  77                goto err_out;
  78
  79        ret = 0;
  80        goto out;
  81
  82err_out:
  83        debugfs_remove(clk->dentry);
  84out:
  85        return ret;
  86}
  87
  88/* caller must hold prepare_lock */
  89static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
  90{
  91        struct clk *child;
  92        struct hlist_node *tmp;
  93        int ret = -EINVAL;;
  94
  95        if (!clk || !pdentry)
  96                goto out;
  97
  98        ret = clk_debug_create_one(clk, pdentry);
  99
 100        if (ret)
 101                goto out;
 102
 103        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 104                clk_debug_create_subtree(child, clk->dentry);
 105
 106        ret = 0;
 107out:
 108        return ret;
 109}
 110
 111/**
 112 * clk_debug_register - add a clk node to the debugfs clk tree
 113 * @clk: the clk being added to the debugfs clk tree
 114 *
 115 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
 116 * initialized.  Otherwise it bails out early since the debugfs clk tree
 117 * will be created lazily by clk_debug_init as part of a late_initcall.
 118 *
 119 * Caller must hold prepare_lock.  Only clk_init calls this function (so
 120 * far) so this is taken care.
 121 */
 122static int clk_debug_register(struct clk *clk)
 123{
 124        struct clk *parent;
 125        struct dentry *pdentry;
 126        int ret = 0;
 127
 128        if (!inited)
 129                goto out;
 130
 131        parent = clk->parent;
 132
 133        /*
 134         * Check to see if a clk is a root clk.  Also check that it is
 135         * safe to add this clk to debugfs
 136         */
 137        if (!parent)
 138                if (clk->flags & CLK_IS_ROOT)
 139                        pdentry = rootdir;
 140                else
 141                        pdentry = orphandir;
 142        else
 143                if (parent->dentry)
 144                        pdentry = parent->dentry;
 145                else
 146                        goto out;
 147
 148        ret = clk_debug_create_subtree(clk, pdentry);
 149
 150out:
 151        return ret;
 152}
 153
 154/**
 155 * clk_debug_init - lazily create the debugfs clk tree visualization
 156 *
 157 * clks are often initialized very early during boot before memory can
 158 * be dynamically allocated and well before debugfs is setup.
 159 * clk_debug_init walks the clk tree hierarchy while holding
 160 * prepare_lock and creates the topology as part of a late_initcall,
 161 * thus insuring that clks initialized very early will still be
 162 * represented in the debugfs clk tree.  This function should only be
 163 * called once at boot-time, and all other clks added dynamically will
 164 * be done so with clk_debug_register.
 165 */
 166static int __init clk_debug_init(void)
 167{
 168        struct clk *clk;
 169        struct hlist_node *tmp;
 170
 171        rootdir = debugfs_create_dir("clk", NULL);
 172
 173        if (!rootdir)
 174                return -ENOMEM;
 175
 176        orphandir = debugfs_create_dir("orphans", rootdir);
 177
 178        if (!orphandir)
 179                return -ENOMEM;
 180
 181        mutex_lock(&prepare_lock);
 182
 183        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 184                clk_debug_create_subtree(clk, rootdir);
 185
 186        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 187                clk_debug_create_subtree(clk, orphandir);
 188
 189        inited = 1;
 190
 191        mutex_unlock(&prepare_lock);
 192
 193        return 0;
 194}
 195late_initcall(clk_debug_init);
 196#else
 197static inline int clk_debug_register(struct clk *clk) { return 0; }
 198#endif
 199
 200/* caller must hold prepare_lock */
 201static void clk_disable_unused_subtree(struct clk *clk)
 202{
 203        struct clk *child;
 204        struct hlist_node *tmp;
 205        unsigned long flags;
 206
 207        if (!clk)
 208                goto out;
 209
 210        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 211                clk_disable_unused_subtree(child);
 212
 213        spin_lock_irqsave(&enable_lock, flags);
 214
 215        if (clk->enable_count)
 216                goto unlock_out;
 217
 218        if (clk->flags & CLK_IGNORE_UNUSED)
 219                goto unlock_out;
 220
 221        if (__clk_is_enabled(clk) && clk->ops->disable)
 222                clk->ops->disable(clk->hw);
 223
 224unlock_out:
 225        spin_unlock_irqrestore(&enable_lock, flags);
 226
 227out:
 228        return;
 229}
 230
 231static int clk_disable_unused(void)
 232{
 233        struct clk *clk;
 234        struct hlist_node *tmp;
 235
 236        mutex_lock(&prepare_lock);
 237
 238        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 239                clk_disable_unused_subtree(clk);
 240
 241        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 242                clk_disable_unused_subtree(clk);
 243
 244        mutex_unlock(&prepare_lock);
 245
 246        return 0;
 247}
 248late_initcall(clk_disable_unused);
 249
 250/***    helper functions   ***/
 251
 252inline const char *__clk_get_name(struct clk *clk)
 253{
 254        return !clk ? NULL : clk->name;
 255}
 256
 257inline struct clk_hw *__clk_get_hw(struct clk *clk)
 258{
 259        return !clk ? NULL : clk->hw;
 260}
 261
 262inline u8 __clk_get_num_parents(struct clk *clk)
 263{
 264        return !clk ? -EINVAL : clk->num_parents;
 265}
 266
 267inline struct clk *__clk_get_parent(struct clk *clk)
 268{
 269        return !clk ? NULL : clk->parent;
 270}
 271
 272inline int __clk_get_enable_count(struct clk *clk)
 273{
 274        return !clk ? -EINVAL : clk->enable_count;
 275}
 276
 277inline int __clk_get_prepare_count(struct clk *clk)
 278{
 279        return !clk ? -EINVAL : clk->prepare_count;
 280}
 281
 282unsigned long __clk_get_rate(struct clk *clk)
 283{
 284        unsigned long ret;
 285
 286        if (!clk) {
 287                ret = 0;
 288                goto out;
 289        }
 290
 291        ret = clk->rate;
 292
 293        if (clk->flags & CLK_IS_ROOT)
 294                goto out;
 295
 296        if (!clk->parent)
 297                ret = 0;
 298
 299out:
 300        return ret;
 301}
 302
 303inline unsigned long __clk_get_flags(struct clk *clk)
 304{
 305        return !clk ? -EINVAL : clk->flags;
 306}
 307
 308int __clk_is_enabled(struct clk *clk)
 309{
 310        int ret;
 311
 312        if (!clk)
 313                return -EINVAL;
 314
 315        /*
 316         * .is_enabled is only mandatory for clocks that gate
 317         * fall back to software usage counter if .is_enabled is missing
 318         */
 319        if (!clk->ops->is_enabled) {
 320                ret = clk->enable_count ? 1 : 0;
 321                goto out;
 322        }
 323
 324        ret = clk->ops->is_enabled(clk->hw);
 325out:
 326        return ret;
 327}
 328
 329static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
 330{
 331        struct clk *child;
 332        struct clk *ret;
 333        struct hlist_node *tmp;
 334
 335        if (!strcmp(clk->name, name))
 336                return clk;
 337
 338        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 339                ret = __clk_lookup_subtree(name, child);
 340                if (ret)
 341                        return ret;
 342        }
 343
 344        return NULL;
 345}
 346
 347struct clk *__clk_lookup(const char *name)
 348{
 349        struct clk *root_clk;
 350        struct clk *ret;
 351        struct hlist_node *tmp;
 352
 353        if (!name)
 354                return NULL;
 355
 356        /* search the 'proper' clk tree first */
 357        hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
 358                ret = __clk_lookup_subtree(name, root_clk);
 359                if (ret)
 360                        return ret;
 361        }
 362
 363        /* if not found, then search the orphan tree */
 364        hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
 365                ret = __clk_lookup_subtree(name, root_clk);
 366                if (ret)
 367                        return ret;
 368        }
 369
 370        return NULL;
 371}
 372
 373/***        clk api        ***/
 374
 375void __clk_unprepare(struct clk *clk)
 376{
 377        if (!clk)
 378                return;
 379
 380        if (WARN_ON(clk->prepare_count == 0))
 381                return;
 382
 383        if (--clk->prepare_count > 0)
 384                return;
 385
 386        WARN_ON(clk->enable_count > 0);
 387
 388        if (clk->ops->unprepare)
 389                clk->ops->unprepare(clk->hw);
 390
 391        __clk_unprepare(clk->parent);
 392}
 393
 394/**
 395 * clk_unprepare - undo preparation of a clock source
 396 * @clk: the clk being unprepare
 397 *
 398 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 399 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 400 * if the operation may sleep.  One example is a clk which is accessed over
 401 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 402 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 403 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 404 */
 405void clk_unprepare(struct clk *clk)
 406{
 407        mutex_lock(&prepare_lock);
 408        __clk_unprepare(clk);
 409        mutex_unlock(&prepare_lock);
 410}
 411EXPORT_SYMBOL_GPL(clk_unprepare);
 412
 413int __clk_prepare(struct clk *clk)
 414{
 415        int ret = 0;
 416
 417        if (!clk)
 418                return 0;
 419
 420        if (clk->prepare_count == 0) {
 421                ret = __clk_prepare(clk->parent);
 422                if (ret)
 423                        return ret;
 424
 425                if (clk->ops->prepare) {
 426                        ret = clk->ops->prepare(clk->hw);
 427                        if (ret) {
 428                                __clk_unprepare(clk->parent);
 429                                return ret;
 430                        }
 431                }
 432        }
 433
 434        clk->prepare_count++;
 435
 436        return 0;
 437}
 438
 439/**
 440 * clk_prepare - prepare a clock source
 441 * @clk: the clk being prepared
 442 *
 443 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 444 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 445 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 446 * the complex case a clk ungate operation may require a fast and a slow part.
 447 * It is this reason that clk_prepare and clk_enable are not mutually
 448 * exclusive.  In fact clk_prepare must be called before clk_enable.
 449 * Returns 0 on success, -EERROR otherwise.
 450 */
 451int clk_prepare(struct clk *clk)
 452{
 453        int ret;
 454
 455        mutex_lock(&prepare_lock);
 456        ret = __clk_prepare(clk);
 457        mutex_unlock(&prepare_lock);
 458
 459        return ret;
 460}
 461EXPORT_SYMBOL_GPL(clk_prepare);
 462
 463static void __clk_disable(struct clk *clk)
 464{
 465        if (!clk)
 466                return;
 467
 468        if (WARN_ON(IS_ERR(clk)))
 469                return;
 470
 471        if (WARN_ON(clk->enable_count == 0))
 472                return;
 473
 474        if (--clk->enable_count > 0)
 475                return;
 476
 477        if (clk->ops->disable)
 478                clk->ops->disable(clk->hw);
 479
 480        __clk_disable(clk->parent);
 481}
 482
 483/**
 484 * clk_disable - gate a clock
 485 * @clk: the clk being gated
 486 *
 487 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 488 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 489 * clk if the operation is fast and will never sleep.  One example is a
 490 * SoC-internal clk which is controlled via simple register writes.  In the
 491 * complex case a clk gate operation may require a fast and a slow part.  It is
 492 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 493 * In fact clk_disable must be called before clk_unprepare.
 494 */
 495void clk_disable(struct clk *clk)
 496{
 497        unsigned long flags;
 498
 499        spin_lock_irqsave(&enable_lock, flags);
 500        __clk_disable(clk);
 501        spin_unlock_irqrestore(&enable_lock, flags);
 502}
 503EXPORT_SYMBOL_GPL(clk_disable);
 504
 505static int __clk_enable(struct clk *clk)
 506{
 507        int ret = 0;
 508
 509        if (!clk)
 510                return 0;
 511
 512        if (WARN_ON(clk->prepare_count == 0))
 513                return -ESHUTDOWN;
 514
 515        if (clk->enable_count == 0) {
 516                ret = __clk_enable(clk->parent);
 517
 518                if (ret)
 519                        return ret;
 520
 521                if (clk->ops->enable) {
 522                        ret = clk->ops->enable(clk->hw);
 523                        if (ret) {
 524                                __clk_disable(clk->parent);
 525                                return ret;
 526                        }
 527                }
 528        }
 529
 530        clk->enable_count++;
 531        return 0;
 532}
 533
 534/**
 535 * clk_enable - ungate a clock
 536 * @clk: the clk being ungated
 537 *
 538 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 539 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 540 * if the operation will never sleep.  One example is a SoC-internal clk which
 541 * is controlled via simple register writes.  In the complex case a clk ungate
 542 * operation may require a fast and a slow part.  It is this reason that
 543 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 544 * must be called before clk_enable.  Returns 0 on success, -EERROR
 545 * otherwise.
 546 */
 547int clk_enable(struct clk *clk)
 548{
 549        unsigned long flags;
 550        int ret;
 551
 552        spin_lock_irqsave(&enable_lock, flags);
 553        ret = __clk_enable(clk);
 554        spin_unlock_irqrestore(&enable_lock, flags);
 555
 556        return ret;
 557}
 558EXPORT_SYMBOL_GPL(clk_enable);
 559
 560/**
 561 * __clk_round_rate - round the given rate for a clk
 562 * @clk: round the rate of this clock
 563 *
 564 * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
 565 */
 566unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 567{
 568        unsigned long parent_rate = 0;
 569
 570        if (!clk)
 571                return -EINVAL;
 572
 573        if (!clk->ops->round_rate) {
 574                if (clk->flags & CLK_SET_RATE_PARENT)
 575                        return __clk_round_rate(clk->parent, rate);
 576                else
 577                        return clk->rate;
 578        }
 579
 580        if (clk->parent)
 581                parent_rate = clk->parent->rate;
 582
 583        return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 584}
 585
 586/**
 587 * clk_round_rate - round the given rate for a clk
 588 * @clk: the clk for which we are rounding a rate
 589 * @rate: the rate which is to be rounded
 590 *
 591 * Takes in a rate as input and rounds it to a rate that the clk can actually
 592 * use which is then returned.  If clk doesn't support round_rate operation
 593 * then the parent rate is returned.
 594 */
 595long clk_round_rate(struct clk *clk, unsigned long rate)
 596{
 597        unsigned long ret;
 598
 599        mutex_lock(&prepare_lock);
 600        ret = __clk_round_rate(clk, rate);
 601        mutex_unlock(&prepare_lock);
 602
 603        return ret;
 604}
 605EXPORT_SYMBOL_GPL(clk_round_rate);
 606
 607/**
 608 * __clk_notify - call clk notifier chain
 609 * @clk: struct clk * that is changing rate
 610 * @msg: clk notifier type (see include/linux/clk.h)
 611 * @old_rate: old clk rate
 612 * @new_rate: new clk rate
 613 *
 614 * Triggers a notifier call chain on the clk rate-change notification
 615 * for 'clk'.  Passes a pointer to the struct clk and the previous
 616 * and current rates to the notifier callback.  Intended to be called by
 617 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 618 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 619 * a driver returns that.
 620 */
 621static int __clk_notify(struct clk *clk, unsigned long msg,
 622                unsigned long old_rate, unsigned long new_rate)
 623{
 624        struct clk_notifier *cn;
 625        struct clk_notifier_data cnd;
 626        int ret = NOTIFY_DONE;
 627
 628        cnd.clk = clk;
 629        cnd.old_rate = old_rate;
 630        cnd.new_rate = new_rate;
 631
 632        list_for_each_entry(cn, &clk_notifier_list, node) {
 633                if (cn->clk == clk) {
 634                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
 635                                        &cnd);
 636                        break;
 637                }
 638        }
 639
 640        return ret;
 641}
 642
 643/**
 644 * __clk_recalc_rates
 645 * @clk: first clk in the subtree
 646 * @msg: notification type (see include/linux/clk.h)
 647 *
 648 * Walks the subtree of clks starting with clk and recalculates rates as it
 649 * goes.  Note that if a clk does not implement the .recalc_rate callback then
 650 * it is assumed that the clock will take on the rate of it's parent.
 651 *
 652 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 653 * if necessary.
 654 *
 655 * Caller must hold prepare_lock.
 656 */
 657static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 658{
 659        unsigned long old_rate;
 660        unsigned long parent_rate = 0;
 661        struct hlist_node *tmp;
 662        struct clk *child;
 663
 664        old_rate = clk->rate;
 665
 666        if (clk->parent)
 667                parent_rate = clk->parent->rate;
 668
 669        if (clk->ops->recalc_rate)
 670                clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 671        else
 672                clk->rate = parent_rate;
 673
 674        /*
 675         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
 676         * & ABORT_RATE_CHANGE notifiers
 677         */
 678        if (clk->notifier_count && msg)
 679                __clk_notify(clk, msg, old_rate, clk->rate);
 680
 681        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 682                __clk_recalc_rates(child, msg);
 683}
 684
 685/**
 686 * clk_get_rate - return the rate of clk
 687 * @clk: the clk whose rate is being returned
 688 *
 689 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
 690 * is set, which means a recalc_rate will be issued.
 691 * If clk is NULL then returns 0.
 692 */
 693unsigned long clk_get_rate(struct clk *clk)
 694{
 695        unsigned long rate;
 696
 697        mutex_lock(&prepare_lock);
 698
 699        if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 700                __clk_recalc_rates(clk, 0);
 701
 702        rate = __clk_get_rate(clk);
 703        mutex_unlock(&prepare_lock);
 704
 705        return rate;
 706}
 707EXPORT_SYMBOL_GPL(clk_get_rate);
 708
 709/**
 710 * __clk_speculate_rates
 711 * @clk: first clk in the subtree
 712 * @parent_rate: the "future" rate of clk's parent
 713 *
 714 * Walks the subtree of clks starting with clk, speculating rates as it
 715 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 716 *
 717 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 718 * pre-rate change notifications and returns early if no clks in the
 719 * subtree have subscribed to the notifications.  Note that if a clk does not
 720 * implement the .recalc_rate callback then it is assumed that the clock will
 721 * take on the rate of it's parent.
 722 *
 723 * Caller must hold prepare_lock.
 724 */
 725static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
 726{
 727        struct hlist_node *tmp;
 728        struct clk *child;
 729        unsigned long new_rate;
 730        int ret = NOTIFY_DONE;
 731
 732        if (clk->ops->recalc_rate)
 733                new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 734        else
 735                new_rate = parent_rate;
 736
 737        /* abort the rate change if a driver returns NOTIFY_BAD */
 738        if (clk->notifier_count)
 739                ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
 740
 741        if (ret == NOTIFY_BAD)
 742                goto out;
 743
 744        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 745                ret = __clk_speculate_rates(child, new_rate);
 746                if (ret == NOTIFY_BAD)
 747                        break;
 748        }
 749
 750out:
 751        return ret;
 752}
 753
 754static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 755{
 756        struct clk *child;
 757        struct hlist_node *tmp;
 758
 759        clk->new_rate = new_rate;
 760
 761        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 762                if (child->ops->recalc_rate)
 763                        child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
 764                else
 765                        child->new_rate = new_rate;
 766                clk_calc_subtree(child, child->new_rate);
 767        }
 768}
 769
 770/*
 771 * calculate the new rates returning the topmost clock that has to be
 772 * changed.
 773 */
 774static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 775{
 776        struct clk *top = clk;
 777        unsigned long best_parent_rate = 0;
 778        unsigned long new_rate;
 779
 780        /* sanity */
 781        if (IS_ERR_OR_NULL(clk))
 782                return NULL;
 783
 784        /* save parent rate, if it exists */
 785        if (clk->parent)
 786                best_parent_rate = clk->parent->rate;
 787
 788        /* never propagate up to the parent */
 789        if (!(clk->flags & CLK_SET_RATE_PARENT)) {
 790                if (!clk->ops->round_rate) {
 791                        clk->new_rate = clk->rate;
 792                        return NULL;
 793                }
 794                new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 795                goto out;
 796        }
 797
 798        /* need clk->parent from here on out */
 799        if (!clk->parent) {
 800                pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
 801                return NULL;
 802        }
 803
 804        if (!clk->ops->round_rate) {
 805                top = clk_calc_new_rates(clk->parent, rate);
 806                new_rate = clk->parent->new_rate;
 807
 808                goto out;
 809        }
 810
 811        new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 812
 813        if (best_parent_rate != clk->parent->rate) {
 814                top = clk_calc_new_rates(clk->parent, best_parent_rate);
 815
 816                goto out;
 817        }
 818
 819out:
 820        clk_calc_subtree(clk, new_rate);
 821
 822        return top;
 823}
 824
 825/*
 826 * Notify about rate changes in a subtree. Always walk down the whole tree
 827 * so that in case of an error we can walk down the whole tree again and
 828 * abort the change.
 829 */
 830static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 831{
 832        struct hlist_node *tmp;
 833        struct clk *child, *fail_clk = NULL;
 834        int ret = NOTIFY_DONE;
 835
 836        if (clk->rate == clk->new_rate)
 837                return 0;
 838
 839        if (clk->notifier_count) {
 840                ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
 841                if (ret == NOTIFY_BAD)
 842                        fail_clk = clk;
 843        }
 844
 845        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 846                clk = clk_propagate_rate_change(child, event);
 847                if (clk)
 848                        fail_clk = clk;
 849        }
 850
 851        return fail_clk;
 852}
 853
 854/*
 855 * walk down a subtree and set the new rates notifying the rate
 856 * change on the way
 857 */
 858static void clk_change_rate(struct clk *clk)
 859{
 860        struct clk *child;
 861        unsigned long old_rate;
 862        unsigned long best_parent_rate = 0;
 863        struct hlist_node *tmp;
 864
 865        old_rate = clk->rate;
 866
 867        if (clk->parent)
 868                best_parent_rate = clk->parent->rate;
 869
 870        if (clk->ops->set_rate)
 871                clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 872
 873        if (clk->ops->recalc_rate)
 874                clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
 875        else
 876                clk->rate = best_parent_rate;
 877
 878        if (clk->notifier_count && old_rate != clk->rate)
 879                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 880
 881        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 882                clk_change_rate(child);
 883}
 884
 885/**
 886 * clk_set_rate - specify a new rate for clk
 887 * @clk: the clk whose rate is being changed
 888 * @rate: the new rate for clk
 889 *
 890 * In the simplest case clk_set_rate will only adjust the rate of clk.
 891 *
 892 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 893 * propagate up to clk's parent; whether or not this happens depends on the
 894 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 895 * after calling .round_rate then upstream parent propagation is ignored.  If
 896 * *parent_rate comes back with a new rate for clk's parent then we propagate
 897 * up to clk's parent and set it's rate.  Upward propagation will continue
 898 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 899 * .round_rate stops requesting changes to clk's parent_rate.
 900 *
 901 * Rate changes are accomplished via tree traversal that also recalculates the
 902 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
 903 *
 904 * Returns 0 on success, -EERROR otherwise.
 905 */
 906int clk_set_rate(struct clk *clk, unsigned long rate)
 907{
 908        struct clk *top, *fail_clk;
 909        int ret = 0;
 910
 911        /* prevent racing with updates to the clock topology */
 912        mutex_lock(&prepare_lock);
 913
 914        /* bail early if nothing to do */
 915        if (rate == clk->rate)
 916                goto out;
 917
 918        if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
 919                ret = -EBUSY;
 920                goto out;
 921        }
 922
 923        /* calculate new rates and get the topmost changed clock */
 924        top = clk_calc_new_rates(clk, rate);
 925        if (!top) {
 926                ret = -EINVAL;
 927                goto out;
 928        }
 929
 930        /* notify that we are about to change rates */
 931        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
 932        if (fail_clk) {
 933                pr_warn("%s: failed to set %s rate\n", __func__,
 934                                fail_clk->name);
 935                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
 936                ret = -EBUSY;
 937                goto out;
 938        }
 939
 940        /* change the rates */
 941        clk_change_rate(top);
 942
 943        mutex_unlock(&prepare_lock);
 944
 945        return 0;
 946out:
 947        mutex_unlock(&prepare_lock);
 948
 949        return ret;
 950}
 951EXPORT_SYMBOL_GPL(clk_set_rate);
 952
 953/**
 954 * clk_get_parent - return the parent of a clk
 955 * @clk: the clk whose parent gets returned
 956 *
 957 * Simply returns clk->parent.  Returns NULL if clk is NULL.
 958 */
 959struct clk *clk_get_parent(struct clk *clk)
 960{
 961        struct clk *parent;
 962
 963        mutex_lock(&prepare_lock);
 964        parent = __clk_get_parent(clk);
 965        mutex_unlock(&prepare_lock);
 966
 967        return parent;
 968}
 969EXPORT_SYMBOL_GPL(clk_get_parent);
 970
 971/*
 972 * .get_parent is mandatory for clocks with multiple possible parents.  It is
 973 * optional for single-parent clocks.  Always call .get_parent if it is
 974 * available and WARN if it is missing for multi-parent clocks.
 975 *
 976 * For single-parent clocks without .get_parent, first check to see if the
 977 * .parents array exists, and if so use it to avoid an expensive tree
 978 * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
 979 */
 980static struct clk *__clk_init_parent(struct clk *clk)
 981{
 982        struct clk *ret = NULL;
 983        u8 index;
 984
 985        /* handle the trivial cases */
 986
 987        if (!clk->num_parents)
 988                goto out;
 989
 990        if (clk->num_parents == 1) {
 991                if (IS_ERR_OR_NULL(clk->parent))
 992                        ret = clk->parent = __clk_lookup(clk->parent_names[0]);
 993                ret = clk->parent;
 994                goto out;
 995        }
 996
 997        if (!clk->ops->get_parent) {
 998                WARN(!clk->ops->get_parent,
 999                        "%s: multi-parent clocks must implement .get_parent\n",
1000                        __func__);
1001                goto out;
1002        };
1003
1004        /*
1005         * Do our best to cache parent clocks in clk->parents.  This prevents
1006         * unnecessary and expensive calls to __clk_lookup.  We don't set
1007         * clk->parent here; that is done by the calling function
1008         */
1009
1010        index = clk->ops->get_parent(clk->hw);
1011
1012        if (!clk->parents)
1013                clk->parents =
1014                        kzalloc((sizeof(struct clk*) * clk->num_parents),
1015                                        GFP_KERNEL);
1016
1017        if (!clk->parents)
1018                ret = __clk_lookup(clk->parent_names[index]);
1019        else if (!clk->parents[index])
1020                ret = clk->parents[index] =
1021                        __clk_lookup(clk->parent_names[index]);
1022        else
1023                ret = clk->parents[index];
1024
1025out:
1026        return ret;
1027}
1028
1029void __clk_reparent(struct clk *clk, struct clk *new_parent)
1030{
1031#ifdef CONFIG_COMMON_CLK_DEBUG
1032        struct dentry *d;
1033        struct dentry *new_parent_d;
1034#endif
1035
1036        if (!clk || !new_parent)
1037                return;
1038
1039        hlist_del(&clk->child_node);
1040
1041        if (new_parent)
1042                hlist_add_head(&clk->child_node, &new_parent->children);
1043        else
1044                hlist_add_head(&clk->child_node, &clk_orphan_list);
1045
1046#ifdef CONFIG_COMMON_CLK_DEBUG
1047        if (!inited)
1048                goto out;
1049
1050        if (new_parent)
1051                new_parent_d = new_parent->dentry;
1052        else
1053                new_parent_d = orphandir;
1054
1055        d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1056                        new_parent_d, clk->name);
1057        if (d)
1058                clk->dentry = d;
1059        else
1060                pr_debug("%s: failed to rename debugfs entry for %s\n",
1061                                __func__, clk->name);
1062out:
1063#endif
1064
1065        clk->parent = new_parent;
1066
1067        __clk_recalc_rates(clk, POST_RATE_CHANGE);
1068}
1069
1070static int __clk_set_parent(struct clk *clk, struct clk *parent)
1071{
1072        struct clk *old_parent;
1073        unsigned long flags;
1074        int ret = -EINVAL;
1075        u8 i;
1076
1077        old_parent = clk->parent;
1078
1079        if (!clk->parents)
1080                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1081                                                                GFP_KERNEL);
1082
1083        /*
1084         * find index of new parent clock using cached parent ptrs,
1085         * or if not yet cached, use string name comparison and cache
1086         * them now to avoid future calls to __clk_lookup.
1087         */
1088        for (i = 0; i < clk->num_parents; i++) {
1089                if (clk->parents && clk->parents[i] == parent)
1090                        break;
1091                else if (!strcmp(clk->parent_names[i], parent->name)) {
1092                        if (clk->parents)
1093                                clk->parents[i] = __clk_lookup(parent->name);
1094                        break;
1095                }
1096        }
1097
1098        if (i == clk->num_parents) {
1099                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1100                                __func__, parent->name, clk->name);
1101                goto out;
1102        }
1103
1104        /* migrate prepare and enable */
1105        if (clk->prepare_count)
1106                __clk_prepare(parent);
1107
1108        /* FIXME replace with clk_is_enabled(clk) someday */
1109        spin_lock_irqsave(&enable_lock, flags);
1110        if (clk->enable_count)
1111                __clk_enable(parent);
1112        spin_unlock_irqrestore(&enable_lock, flags);
1113
1114        /* change clock input source */
1115        ret = clk->ops->set_parent(clk->hw, i);
1116
1117        /* clean up old prepare and enable */
1118        spin_lock_irqsave(&enable_lock, flags);
1119        if (clk->enable_count)
1120                __clk_disable(old_parent);
1121        spin_unlock_irqrestore(&enable_lock, flags);
1122
1123        if (clk->prepare_count)
1124                __clk_unprepare(old_parent);
1125
1126out:
1127        return ret;
1128}
1129
1130/**
1131 * clk_set_parent - switch the parent of a mux clk
1132 * @clk: the mux clk whose input we are switching
1133 * @parent: the new input to clk
1134 *
1135 * Re-parent clk to use parent as it's new input source.  If clk has the
1136 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1137 * operation to succeed.  After successfully changing clk's parent
1138 * clk_set_parent will update the clk topology, sysfs topology and
1139 * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1140 * success, -EERROR otherwise.
1141 */
1142int clk_set_parent(struct clk *clk, struct clk *parent)
1143{
1144        int ret = 0;
1145
1146        if (!clk || !clk->ops)
1147                return -EINVAL;
1148
1149        if (!clk->ops->set_parent)
1150                return -ENOSYS;
1151
1152        /* prevent racing with updates to the clock topology */
1153        mutex_lock(&prepare_lock);
1154
1155        if (clk->parent == parent)
1156                goto out;
1157
1158        /* propagate PRE_RATE_CHANGE notifications */
1159        if (clk->notifier_count)
1160                ret = __clk_speculate_rates(clk, parent->rate);
1161
1162        /* abort if a driver objects */
1163        if (ret == NOTIFY_STOP)
1164                goto out;
1165
1166        /* only re-parent if the clock is not in use */
1167        if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1168                ret = -EBUSY;
1169        else
1170                ret = __clk_set_parent(clk, parent);
1171
1172        /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1173        if (ret) {
1174                __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1175                goto out;
1176        }
1177
1178        /* propagate rate recalculation downstream */
1179        __clk_reparent(clk, parent);
1180
1181out:
1182        mutex_unlock(&prepare_lock);
1183
1184        return ret;
1185}
1186EXPORT_SYMBOL_GPL(clk_set_parent);
1187
1188/**
1189 * __clk_init - initialize the data structures in a struct clk
1190 * @dev:        device initializing this clk, placeholder for now
1191 * @clk:        clk being initialized
1192 *
1193 * Initializes the lists in struct clk, queries the hardware for the
1194 * parent and rate and sets them both.
1195 */
1196int __clk_init(struct device *dev, struct clk *clk)
1197{
1198        int i, ret = 0;
1199        struct clk *orphan;
1200        struct hlist_node *tmp, *tmp2;
1201
1202        if (!clk)
1203                return -EINVAL;
1204
1205        mutex_lock(&prepare_lock);
1206
1207        /* check to see if a clock with this name is already registered */
1208        if (__clk_lookup(clk->name)) {
1209                pr_debug("%s: clk %s already initialized\n",
1210                                __func__, clk->name);
1211                ret = -EEXIST;
1212                goto out;
1213        }
1214
1215        /* check that clk_ops are sane.  See Documentation/clk.txt */
1216        if (clk->ops->set_rate &&
1217                        !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1218                pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1219                                __func__, clk->name);
1220                ret = -EINVAL;
1221                goto out;
1222        }
1223
1224        if (clk->ops->set_parent && !clk->ops->get_parent) {
1225                pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1226                                __func__, clk->name);
1227                ret = -EINVAL;
1228                goto out;
1229        }
1230
1231        /* throw a WARN if any entries in parent_names are NULL */
1232        for (i = 0; i < clk->num_parents; i++)
1233                WARN(!clk->parent_names[i],
1234                                "%s: invalid NULL in %s's .parent_names\n",
1235                                __func__, clk->name);
1236
1237        /*
1238         * Allocate an array of struct clk *'s to avoid unnecessary string
1239         * look-ups of clk's possible parents.  This can fail for clocks passed
1240         * in to clk_init during early boot; thus any access to clk->parents[]
1241         * must always check for a NULL pointer and try to populate it if
1242         * necessary.
1243         *
1244         * If clk->parents is not NULL we skip this entire block.  This allows
1245         * for clock drivers to statically initialize clk->parents.
1246         */
1247        if (clk->num_parents > 1 && !clk->parents) {
1248                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1249                                GFP_KERNEL);
1250                /*
1251                 * __clk_lookup returns NULL for parents that have not been
1252                 * clk_init'd; thus any access to clk->parents[] must check
1253                 * for a NULL pointer.  We can always perform lazy lookups for
1254                 * missing parents later on.
1255                 */
1256                if (clk->parents)
1257                        for (i = 0; i < clk->num_parents; i++)
1258                                clk->parents[i] =
1259                                        __clk_lookup(clk->parent_names[i]);
1260        }
1261
1262        clk->parent = __clk_init_parent(clk);
1263
1264        /*
1265         * Populate clk->parent if parent has already been __clk_init'd.  If
1266         * parent has not yet been __clk_init'd then place clk in the orphan
1267         * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1268         * clk list.
1269         *
1270         * Every time a new clk is clk_init'd then we walk the list of orphan
1271         * clocks and re-parent any that are children of the clock currently
1272         * being clk_init'd.
1273         */
1274        if (clk->parent)
1275                hlist_add_head(&clk->child_node,
1276                                &clk->parent->children);
1277        else if (clk->flags & CLK_IS_ROOT)
1278                hlist_add_head(&clk->child_node, &clk_root_list);
1279        else
1280                hlist_add_head(&clk->child_node, &clk_orphan_list);
1281
1282        /*
1283         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1284         * simple clocks and lazy developers the default fallback is to use the
1285         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1286         * then rate is set to zero.
1287         */
1288        if (clk->ops->recalc_rate)
1289                clk->rate = clk->ops->recalc_rate(clk->hw,
1290                                __clk_get_rate(clk->parent));
1291        else if (clk->parent)
1292                clk->rate = clk->parent->rate;
1293        else
1294                clk->rate = 0;
1295
1296        /*
1297         * walk the list of orphan clocks and reparent any that are children of
1298         * this clock
1299         */
1300        hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1301                for (i = 0; i < orphan->num_parents; i++)
1302                        if (!strcmp(clk->name, orphan->parent_names[i])) {
1303                                __clk_reparent(orphan, clk);
1304                                break;
1305                        }
1306
1307        /*
1308         * optional platform-specific magic
1309         *
1310         * The .init callback is not used by any of the basic clock types, but
1311         * exists for weird hardware that must perform initialization magic.
1312         * Please consider other ways of solving initialization problems before
1313         * using this callback, as it's use is discouraged.
1314         */
1315        if (clk->ops->init)
1316                clk->ops->init(clk->hw);
1317
1318        clk_debug_register(clk);
1319
1320out:
1321        mutex_unlock(&prepare_lock);
1322
1323        return ret;
1324}
1325
1326/**
1327 * __clk_register - register a clock and return a cookie.
1328 *
1329 * Same as clk_register, except that the .clk field inside hw shall point to a
1330 * preallocated (generally statically allocated) struct clk. None of the fields
1331 * of the struct clk need to be initialized.
1332 *
1333 * The data pointed to by .init and .clk field shall NOT be marked as init
1334 * data.
1335 *
1336 * __clk_register is only exposed via clk-private.h and is intended for use with
1337 * very large numbers of clocks that need to be statically initialized.  It is
1338 * a layering violation to include clk-private.h from any code which implements
1339 * a clock's .ops; as such any statically initialized clock data MUST be in a
1340 * separate C file from the logic that implements it's operations.  Returns 0
1341 * on success, otherwise an error code.
1342 */
1343struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1344{
1345        int ret;
1346        struct clk *clk;
1347
1348        clk = hw->clk;
1349        clk->name = hw->init->name;
1350        clk->ops = hw->init->ops;
1351        clk->hw = hw;
1352        clk->flags = hw->init->flags;
1353        clk->parent_names = hw->init->parent_names;
1354        clk->num_parents = hw->init->num_parents;
1355
1356        ret = __clk_init(dev, clk);
1357        if (ret)
1358                return ERR_PTR(ret);
1359
1360        return clk;
1361}
1362EXPORT_SYMBOL_GPL(__clk_register);
1363
1364/**
1365 * clk_register - allocate a new clock, register it and return an opaque cookie
1366 * @dev: device that is registering this clock
1367 * @hw: link to hardware-specific clock data
1368 *
1369 * clk_register is the primary interface for populating the clock tree with new
1370 * clock nodes.  It returns a pointer to the newly allocated struct clk which
1371 * cannot be dereferenced by driver code but may be used in conjuction with the
1372 * rest of the clock API.  In the event of an error clk_register will return an
1373 * error code; drivers must test for an error code after calling clk_register.
1374 */
1375struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1376{
1377        int i, ret;
1378        struct clk *clk;
1379
1380        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1381        if (!clk) {
1382                pr_err("%s: could not allocate clk\n", __func__);
1383                ret = -ENOMEM;
1384                goto fail_out;
1385        }
1386
1387        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1388        if (!clk->name) {
1389                pr_err("%s: could not allocate clk->name\n", __func__);
1390                ret = -ENOMEM;
1391                goto fail_name;
1392        }
1393        clk->ops = hw->init->ops;
1394        clk->hw = hw;
1395        clk->flags = hw->init->flags;
1396        clk->num_parents = hw->init->num_parents;
1397        hw->clk = clk;
1398
1399        /* allocate local copy in case parent_names is __initdata */
1400        clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1401                        GFP_KERNEL);
1402
1403        if (!clk->parent_names) {
1404                pr_err("%s: could not allocate clk->parent_names\n", __func__);
1405                ret = -ENOMEM;
1406                goto fail_parent_names;
1407        }
1408
1409
1410        /* copy each string name in case parent_names is __initdata */
1411        for (i = 0; i < clk->num_parents; i++) {
1412                clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1413                                                GFP_KERNEL);
1414                if (!clk->parent_names[i]) {
1415                        pr_err("%s: could not copy parent_names\n", __func__);
1416                        ret = -ENOMEM;
1417                        goto fail_parent_names_copy;
1418                }
1419        }
1420
1421        ret = __clk_init(dev, clk);
1422        if (!ret)
1423                return clk;
1424
1425fail_parent_names_copy:
1426        while (--i >= 0)
1427                kfree(clk->parent_names[i]);
1428        kfree(clk->parent_names);
1429fail_parent_names:
1430        kfree(clk->name);
1431fail_name:
1432        kfree(clk);
1433fail_out:
1434        return ERR_PTR(ret);
1435}
1436EXPORT_SYMBOL_GPL(clk_register);
1437
1438/**
1439 * clk_unregister - unregister a currently registered clock
1440 * @clk: clock to unregister
1441 *
1442 * Currently unimplemented.
1443 */
1444void clk_unregister(struct clk *clk) {}
1445EXPORT_SYMBOL_GPL(clk_unregister);
1446
1447/***        clk rate change notifiers        ***/
1448
1449/**
1450 * clk_notifier_register - add a clk rate change notifier
1451 * @clk: struct clk * to watch
1452 * @nb: struct notifier_block * with callback info
1453 *
1454 * Request notification when clk's rate changes.  This uses an SRCU
1455 * notifier because we want it to block and notifier unregistrations are
1456 * uncommon.  The callbacks associated with the notifier must not
1457 * re-enter into the clk framework by calling any top-level clk APIs;
1458 * this will cause a nested prepare_lock mutex.
1459 *
1460 * Pre-change notifier callbacks will be passed the current, pre-change
1461 * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1462 * post-change rate of the clk is passed via struct
1463 * clk_notifier_data.new_rate.
1464 *
1465 * Post-change notifiers will pass the now-current, post-change rate of
1466 * the clk in both struct clk_notifier_data.old_rate and struct
1467 * clk_notifier_data.new_rate.
1468 *
1469 * Abort-change notifiers are effectively the opposite of pre-change
1470 * notifiers: the original pre-change clk rate is passed in via struct
1471 * clk_notifier_data.new_rate and the failed post-change rate is passed
1472 * in via struct clk_notifier_data.old_rate.
1473 *
1474 * clk_notifier_register() must be called from non-atomic context.
1475 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1476 * allocation failure; otherwise, passes along the return value of
1477 * srcu_notifier_chain_register().
1478 */
1479int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1480{
1481        struct clk_notifier *cn;
1482        int ret = -ENOMEM;
1483
1484        if (!clk || !nb)
1485                return -EINVAL;
1486
1487        mutex_lock(&prepare_lock);
1488
1489        /* search the list of notifiers for this clk */
1490        list_for_each_entry(cn, &clk_notifier_list, node)
1491                if (cn->clk == clk)
1492                        break;
1493
1494        /* if clk wasn't in the notifier list, allocate new clk_notifier */
1495        if (cn->clk != clk) {
1496                cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1497                if (!cn)
1498                        goto out;
1499
1500                cn->clk = clk;
1501                srcu_init_notifier_head(&cn->notifier_head);
1502
1503                list_add(&cn->node, &clk_notifier_list);
1504        }
1505
1506        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1507
1508        clk->notifier_count++;
1509
1510out:
1511        mutex_unlock(&prepare_lock);
1512
1513        return ret;
1514}
1515EXPORT_SYMBOL_GPL(clk_notifier_register);
1516
1517/**
1518 * clk_notifier_unregister - remove a clk rate change notifier
1519 * @clk: struct clk *
1520 * @nb: struct notifier_block * with callback info
1521 *
1522 * Request no further notification for changes to 'clk' and frees memory
1523 * allocated in clk_notifier_register.
1524 *
1525 * Returns -EINVAL if called with null arguments; otherwise, passes
1526 * along the return value of srcu_notifier_chain_unregister().
1527 */
1528int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1529{
1530        struct clk_notifier *cn = NULL;
1531        int ret = -EINVAL;
1532
1533        if (!clk || !nb)
1534                return -EINVAL;
1535
1536        mutex_lock(&prepare_lock);
1537
1538        list_for_each_entry(cn, &clk_notifier_list, node)
1539                if (cn->clk == clk)
1540                        break;
1541
1542        if (cn->clk == clk) {
1543                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1544
1545                clk->notifier_count--;
1546
1547                /* XXX the notifier code should handle this better */
1548                if (!cn->notifier_head.head) {
1549                        srcu_cleanup_notifier_head(&cn->notifier_head);
1550                        kfree(cn);
1551                }
1552
1553        } else {
1554                ret = -ENOENT;
1555        }
1556
1557        mutex_unlock(&prepare_lock);
1558
1559        return ret;
1560}
1561EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1562
1563#ifdef CONFIG_OF
1564/**
1565 * struct of_clk_provider - Clock provider registration structure
1566 * @link: Entry in global list of clock providers
1567 * @node: Pointer to device tree node of clock provider
1568 * @get: Get clock callback.  Returns NULL or a struct clk for the
1569 *       given clock specifier
1570 * @data: context pointer to be passed into @get callback
1571 */
1572struct of_clk_provider {
1573        struct list_head link;
1574
1575        struct device_node *node;
1576        struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1577        void *data;
1578};
1579
1580static LIST_HEAD(of_clk_providers);
1581static DEFINE_MUTEX(of_clk_lock);
1582
1583struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1584                                     void *data)
1585{
1586        return data;
1587}
1588EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1589
1590struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1591{
1592        struct clk_onecell_data *clk_data = data;
1593        unsigned int idx = clkspec->args[0];
1594
1595        if (idx >= clk_data->clk_num) {
1596                pr_err("%s: invalid clock index %d\n", __func__, idx);
1597                return ERR_PTR(-EINVAL);
1598        }
1599
1600        return clk_data->clks[idx];
1601}
1602EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1603
1604/**
1605 * of_clk_add_provider() - Register a clock provider for a node
1606 * @np: Device node pointer associated with clock provider
1607 * @clk_src_get: callback for decoding clock
1608 * @data: context pointer for @clk_src_get callback.
1609 */
1610int of_clk_add_provider(struct device_node *np,
1611                        struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1612                                                   void *data),
1613                        void *data)
1614{
1615        struct of_clk_provider *cp;
1616
1617        cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1618        if (!cp)
1619                return -ENOMEM;
1620
1621        cp->node = of_node_get(np);
1622        cp->data = data;
1623        cp->get = clk_src_get;
1624
1625        mutex_lock(&of_clk_lock);
1626        list_add(&cp->link, &of_clk_providers);
1627        mutex_unlock(&of_clk_lock);
1628        pr_debug("Added clock from %s\n", np->full_name);
1629
1630        return 0;
1631}
1632EXPORT_SYMBOL_GPL(of_clk_add_provider);
1633
1634/**
1635 * of_clk_del_provider() - Remove a previously registered clock provider
1636 * @np: Device node pointer associated with clock provider
1637 */
1638void of_clk_del_provider(struct device_node *np)
1639{
1640        struct of_clk_provider *cp;
1641
1642        mutex_lock(&of_clk_lock);
1643        list_for_each_entry(cp, &of_clk_providers, link) {
1644                if (cp->node == np) {
1645                        list_del(&cp->link);
1646                        of_node_put(cp->node);
1647                        kfree(cp);
1648                        break;
1649                }
1650        }
1651        mutex_unlock(&of_clk_lock);
1652}
1653EXPORT_SYMBOL_GPL(of_clk_del_provider);
1654
1655struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1656{
1657        struct of_clk_provider *provider;
1658        struct clk *clk = ERR_PTR(-ENOENT);
1659
1660        /* Check if we have such a provider in our array */
1661        mutex_lock(&of_clk_lock);
1662        list_for_each_entry(provider, &of_clk_providers, link) {
1663                if (provider->node == clkspec->np)
1664                        clk = provider->get(clkspec, provider->data);
1665                if (!IS_ERR(clk))
1666                        break;
1667        }
1668        mutex_unlock(&of_clk_lock);
1669
1670        return clk;
1671}
1672
1673const char *of_clk_get_parent_name(struct device_node *np, int index)
1674{
1675        struct of_phandle_args clkspec;
1676        const char *clk_name;
1677        int rc;
1678
1679        if (index < 0)
1680                return NULL;
1681
1682        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1683                                        &clkspec);
1684        if (rc)
1685                return NULL;
1686
1687        if (of_property_read_string_index(clkspec.np, "clock-output-names",
1688                                          clkspec.args_count ? clkspec.args[0] : 0,
1689                                          &clk_name) < 0)
1690                clk_name = clkspec.np->name;
1691
1692        of_node_put(clkspec.np);
1693        return clk_name;
1694}
1695EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1696
1697/**
1698 * of_clk_init() - Scan and init clock providers from the DT
1699 * @matches: array of compatible values and init functions for providers.
1700 *
1701 * This function scans the device tree for matching clock providers and
1702 * calls their initialization functions
1703 */
1704void __init of_clk_init(const struct of_device_id *matches)
1705{
1706        struct device_node *np;
1707
1708        for_each_matching_node(np, matches) {
1709                const struct of_device_id *match = of_match_node(matches, np);
1710                of_clk_init_cb_t clk_init_cb = match->data;
1711                clk_init_cb(np);
1712        }
1713}
1714#endif
1715
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.