linux/drivers/clk/clk.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * Standard functionality for the common clock API.  See Documentation/clk.txt
  10 */
  11
  12#include <linux/clk-private.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/spinlock.h>
  16#include <linux/err.h>
  17#include <linux/list.h>
  18#include <linux/slab.h>
  19#include <linux/of.h>
  20
  21static DEFINE_SPINLOCK(enable_lock);
  22static DEFINE_MUTEX(prepare_lock);
  23
  24static HLIST_HEAD(clk_root_list);
  25static HLIST_HEAD(clk_orphan_list);
  26static LIST_HEAD(clk_notifier_list);
  27
  28/***        debugfs support        ***/
  29
  30#ifdef CONFIG_COMMON_CLK_DEBUG
  31#include <linux/debugfs.h>
  32
  33static struct dentry *rootdir;
  34static struct dentry *orphandir;
  35static int inited = 0;
  36
  37/* caller must hold prepare_lock */
  38static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
  39{
  40        struct dentry *d;
  41        int ret = -ENOMEM;
  42
  43        if (!clk || !pdentry) {
  44                ret = -EINVAL;
  45                goto out;
  46        }
  47
  48        d = debugfs_create_dir(clk->name, pdentry);
  49        if (!d)
  50                goto out;
  51
  52        clk->dentry = d;
  53
  54        d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
  55                        (u32 *)&clk->rate);
  56        if (!d)
  57                goto err_out;
  58
  59        d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
  60                        (u32 *)&clk->flags);
  61        if (!d)
  62                goto err_out;
  63
  64        d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
  65                        (u32 *)&clk->prepare_count);
  66        if (!d)
  67                goto err_out;
  68
  69        d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
  70                        (u32 *)&clk->enable_count);
  71        if (!d)
  72                goto err_out;
  73
  74        d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
  75                        (u32 *)&clk->notifier_count);
  76        if (!d)
  77                goto err_out;
  78
  79        ret = 0;
  80        goto out;
  81
  82err_out:
  83        debugfs_remove(clk->dentry);
  84out:
  85        return ret;
  86}
  87
  88/* caller must hold prepare_lock */
  89static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
  90{
  91        struct clk *child;
  92        struct hlist_node *tmp;
  93        int ret = -EINVAL;;
  94
  95        if (!clk || !pdentry)
  96                goto out;
  97
  98        ret = clk_debug_create_one(clk, pdentry);
  99
 100        if (ret)
 101                goto out;
 102
 103        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 104                clk_debug_create_subtree(child, clk->dentry);
 105
 106        ret = 0;
 107out:
 108        return ret;
 109}
 110
 111/**
 112 * clk_debug_register - add a clk node to the debugfs clk tree
 113 * @clk: the clk being added to the debugfs clk tree
 114 *
 115 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
 116 * initialized.  Otherwise it bails out early since the debugfs clk tree
 117 * will be created lazily by clk_debug_init as part of a late_initcall.
 118 *
 119 * Caller must hold prepare_lock.  Only clk_init calls this function (so
 120 * far) so this is taken care.
 121 */
 122static int clk_debug_register(struct clk *clk)
 123{
 124        struct clk *parent;
 125        struct dentry *pdentry;
 126        int ret = 0;
 127
 128        if (!inited)
 129                goto out;
 130
 131        parent = clk->parent;
 132
 133        /*
 134         * Check to see if a clk is a root clk.  Also check that it is
 135         * safe to add this clk to debugfs
 136         */
 137        if (!parent)
 138                if (clk->flags & CLK_IS_ROOT)
 139                        pdentry = rootdir;
 140                else
 141                        pdentry = orphandir;
 142        else
 143                if (parent->dentry)
 144                        pdentry = parent->dentry;
 145                else
 146                        goto out;
 147
 148        ret = clk_debug_create_subtree(clk, pdentry);
 149
 150out:
 151        return ret;
 152}
 153
 154/**
 155 * clk_debug_init - lazily create the debugfs clk tree visualization
 156 *
 157 * clks are often initialized very early during boot before memory can
 158 * be dynamically allocated and well before debugfs is setup.
 159 * clk_debug_init walks the clk tree hierarchy while holding
 160 * prepare_lock and creates the topology as part of a late_initcall,
 161 * thus insuring that clks initialized very early will still be
 162 * represented in the debugfs clk tree.  This function should only be
 163 * called once at boot-time, and all other clks added dynamically will
 164 * be done so with clk_debug_register.
 165 */
 166static int __init clk_debug_init(void)
 167{
 168        struct clk *clk;
 169        struct hlist_node *tmp;
 170
 171        rootdir = debugfs_create_dir("clk", NULL);
 172
 173        if (!rootdir)
 174                return -ENOMEM;
 175
 176        orphandir = debugfs_create_dir("orphans", rootdir);
 177
 178        if (!orphandir)
 179                return -ENOMEM;
 180
 181        mutex_lock(&prepare_lock);
 182
 183        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 184                clk_debug_create_subtree(clk, rootdir);
 185
 186        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 187                clk_debug_create_subtree(clk, orphandir);
 188
 189        inited = 1;
 190
 191        mutex_unlock(&prepare_lock);
 192
 193        return 0;
 194}
 195late_initcall(clk_debug_init);
 196#else
 197static inline int clk_debug_register(struct clk *clk) { return 0; }
 198#endif
 199
 200/* caller must hold prepare_lock */
 201static void clk_disable_unused_subtree(struct clk *clk)
 202{
 203        struct clk *child;
 204        struct hlist_node *tmp;
 205        unsigned long flags;
 206
 207        if (!clk)
 208                goto out;
 209
 210        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 211                clk_disable_unused_subtree(child);
 212
 213        spin_lock_irqsave(&enable_lock, flags);
 214
 215        if (clk->enable_count)
 216                goto unlock_out;
 217
 218        if (clk->flags & CLK_IGNORE_UNUSED)
 219                goto unlock_out;
 220
 221        if (__clk_is_enabled(clk) && clk->ops->disable)
 222                clk->ops->disable(clk->hw);
 223
 224unlock_out:
 225        spin_unlock_irqrestore(&enable_lock, flags);
 226
 227out:
 228        return;
 229}
 230
 231static int clk_disable_unused(void)
 232{
 233        struct clk *clk;
 234        struct hlist_node *tmp;
 235
 236        mutex_lock(&prepare_lock);
 237
 238        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 239                clk_disable_unused_subtree(clk);
 240
 241        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 242                clk_disable_unused_subtree(clk);
 243
 244        mutex_unlock(&prepare_lock);
 245
 246        return 0;
 247}
 248late_initcall(clk_disable_unused);
 249
 250/***    helper functions   ***/
 251
 252inline const char *__clk_get_name(struct clk *clk)
 253{
 254        return !clk ? NULL : clk->name;
 255}
 256
 257inline struct clk_hw *__clk_get_hw(struct clk *clk)
 258{
 259        return !clk ? NULL : clk->hw;
 260}
 261
 262inline u8 __clk_get_num_parents(struct clk *clk)
 263{
 264        return !clk ? -EINVAL : clk->num_parents;
 265}
 266
 267inline struct clk *__clk_get_parent(struct clk *clk)
 268{
 269        return !clk ? NULL : clk->parent;
 270}
 271
 272inline int __clk_get_enable_count(struct clk *clk)
 273{
 274        return !clk ? -EINVAL : clk->enable_count;
 275}
 276
 277inline int __clk_get_prepare_count(struct clk *clk)
 278{
 279        return !clk ? -EINVAL : clk->prepare_count;
 280}
 281
 282unsigned long __clk_get_rate(struct clk *clk)
 283{
 284        unsigned long ret;
 285
 286        if (!clk) {
 287                ret = 0;
 288                goto out;
 289        }
 290
 291        ret = clk->rate;
 292
 293        if (clk->flags & CLK_IS_ROOT)
 294                goto out;
 295
 296        if (!clk->parent)
 297                ret = 0;
 298
 299out:
 300        return ret;
 301}
 302
 303inline unsigned long __clk_get_flags(struct clk *clk)
 304{
 305        return !clk ? -EINVAL : clk->flags;
 306}
 307
 308int __clk_is_enabled(struct clk *clk)
 309{
 310        int ret;
 311
 312        if (!clk)
 313                return -EINVAL;
 314
 315        /*
 316         * .is_enabled is only mandatory for clocks that gate
 317         * fall back to software usage counter if .is_enabled is missing
 318         */
 319        if (!clk->ops->is_enabled) {
 320                ret = clk->enable_count ? 1 : 0;
 321                goto out;
 322        }
 323
 324        ret = clk->ops->is_enabled(clk->hw);
 325out:
 326        return ret;
 327}
 328
 329static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
 330{
 331        struct clk *child;
 332        struct clk *ret;
 333        struct hlist_node *tmp;
 334
 335        if (!strcmp(clk->name, name))
 336                return clk;
 337
 338        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 339                ret = __clk_lookup_subtree(name, child);
 340                if (ret)
 341                        return ret;
 342        }
 343
 344        return NULL;
 345}
 346
 347struct clk *__clk_lookup(const char *name)
 348{
 349        struct clk *root_clk;
 350        struct clk *ret;
 351        struct hlist_node *tmp;
 352
 353        if (!name)
 354                return NULL;
 355
 356        /* search the 'proper' clk tree first */
 357        hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
 358                ret = __clk_lookup_subtree(name, root_clk);
 359                if (ret)
 360                        return ret;
 361        }
 362
 363        /* if not found, then search the orphan tree */
 364        hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
 365                ret = __clk_lookup_subtree(name, root_clk);
 366                if (ret)
 367                        return ret;
 368        }
 369
 370        return NULL;
 371}
 372
 373/***        clk api        ***/
 374
 375void __clk_unprepare(struct clk *clk)
 376{
 377        if (!clk)
 378                return;
 379
 380        if (WARN_ON(clk->prepare_count == 0))
 381                return;
 382
 383        if (--clk->prepare_count > 0)
 384                return;
 385
 386        WARN_ON(clk->enable_count > 0);
 387
 388        if (clk->ops->unprepare)
 389                clk->ops->unprepare(clk->hw);
 390
 391        __clk_unprepare(clk->parent);
 392}
 393
 394/**
 395 * clk_unprepare - undo preparation of a clock source
 396 * @clk: the clk being unprepare
 397 *
 398 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 399 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 400 * if the operation may sleep.  One example is a clk which is accessed over
 401 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 402 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 403 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 404 */
 405void clk_unprepare(struct clk *clk)
 406{
 407        mutex_lock(&prepare_lock);
 408        __clk_unprepare(clk);
 409        mutex_unlock(&prepare_lock);
 410}
 411EXPORT_SYMBOL_GPL(clk_unprepare);
 412
 413int __clk_prepare(struct clk *clk)
 414{
 415        int ret = 0;
 416
 417        if (!clk)
 418                return 0;
 419
 420        if (clk->prepare_count == 0) {
 421                ret = __clk_prepare(clk->parent);
 422                if (ret)
 423                        return ret;
 424
 425                if (clk->ops->prepare) {
 426                        ret = clk->ops->prepare(clk->hw);
 427                        if (ret) {
 428                                __clk_unprepare(clk->parent);
 429                                return ret;
 430                        }
 431                }
 432        }
 433
 434        clk->prepare_count++;
 435
 436        return 0;
 437}
 438
 439/**
 440 * clk_prepare - prepare a clock source
 441 * @clk: the clk being prepared
 442 *
 443 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 444 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 445 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 446 * the complex case a clk ungate operation may require a fast and a slow part.
 447 * It is this reason that clk_prepare and clk_enable are not mutually
 448 * exclusive.  In fact clk_prepare must be called before clk_enable.
 449 * Returns 0 on success, -EERROR otherwise.
 450 */
 451int clk_prepare(struct clk *clk)
 452{
 453        int ret;
 454
 455        mutex_lock(&prepare_lock);
 456        ret = __clk_prepare(clk);
 457        mutex_unlock(&prepare_lock);
 458
 459        return ret;
 460}
 461EXPORT_SYMBOL_GPL(clk_prepare);
 462
 463static void __clk_disable(struct clk *clk)
 464{
 465        if (!clk)
 466                return;
 467
 468        if (WARN_ON(IS_ERR(clk)))
 469                return;
 470
 471        if (WARN_ON(clk->enable_count == 0))
 472                return;
 473
 474        if (--clk->enable_count > 0)
 475                return;
 476
 477        if (clk->ops->disable)
 478                clk->ops->disable(clk->hw);
 479
 480        __clk_disable(clk->parent);
 481}
 482
 483/**
 484 * clk_disable - gate a clock
 485 * @clk: the clk being gated
 486 *
 487 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 488 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 489 * clk if the operation is fast and will never sleep.  One example is a
 490 * SoC-internal clk which is controlled via simple register writes.  In the
 491 * complex case a clk gate operation may require a fast and a slow part.  It is
 492 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 493 * In fact clk_disable must be called before clk_unprepare.
 494 */
 495void clk_disable(struct clk *clk)
 496{
 497        unsigned long flags;
 498
 499        spin_lock_irqsave(&enable_lock, flags);
 500        __clk_disable(clk);
 501        spin_unlock_irqrestore(&enable_lock, flags);
 502}
 503EXPORT_SYMBOL_GPL(clk_disable);
 504
 505static int __clk_enable(struct clk *clk)
 506{
 507        int ret = 0;
 508
 509        if (!clk)
 510                return 0;
 511
 512        if (WARN_ON(clk->prepare_count == 0))
 513                return -ESHUTDOWN;
 514
 515        if (clk->enable_count == 0) {
 516                ret = __clk_enable(clk->parent);
 517
 518                if (ret)
 519                        return ret;
 520
 521                if (clk->ops->enable) {
 522                        ret = clk->ops->enable(clk->hw);
 523                        if (ret) {
 524                                __clk_disable(clk->parent);
 525                                return ret;
 526                        }
 527                }
 528        }
 529
 530        clk->enable_count++;
 531        return 0;
 532}
 533
 534/**
 535 * clk_enable - ungate a clock
 536 * @clk: the clk being ungated
 537 *
 538 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 539 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 540 * if the operation will never sleep.  One example is a SoC-internal clk which
 541 * is controlled via simple register writes.  In the complex case a clk ungate
 542 * operation may require a fast and a slow part.  It is this reason that
 543 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 544 * must be called before clk_enable.  Returns 0 on success, -EERROR
 545 * otherwise.
 546 */
 547int clk_enable(struct clk *clk)
 548{
 549        unsigned long flags;
 550        int ret;
 551
 552        spin_lock_irqsave(&enable_lock, flags);
 553        ret = __clk_enable(clk);
 554        spin_unlock_irqrestore(&enable_lock, flags);
 555
 556        return ret;
 557}
 558EXPORT_SYMBOL_GPL(clk_enable);
 559
 560/**
 561 * clk_get_rate - return the rate of clk
 562 * @clk: the clk whose rate is being returned
 563 *
 564 * Simply returns the cached rate of the clk.  Does not query the hardware.  If
 565 * clk is NULL then returns 0.
 566 */
 567unsigned long clk_get_rate(struct clk *clk)
 568{
 569        unsigned long rate;
 570
 571        mutex_lock(&prepare_lock);
 572        rate = __clk_get_rate(clk);
 573        mutex_unlock(&prepare_lock);
 574
 575        return rate;
 576}
 577EXPORT_SYMBOL_GPL(clk_get_rate);
 578
 579/**
 580 * __clk_round_rate - round the given rate for a clk
 581 * @clk: round the rate of this clock
 582 *
 583 * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
 584 */
 585unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 586{
 587        unsigned long parent_rate = 0;
 588
 589        if (!clk)
 590                return -EINVAL;
 591
 592        if (!clk->ops->round_rate) {
 593                if (clk->flags & CLK_SET_RATE_PARENT)
 594                        return __clk_round_rate(clk->parent, rate);
 595                else
 596                        return clk->rate;
 597        }
 598
 599        if (clk->parent)
 600                parent_rate = clk->parent->rate;
 601
 602        return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 603}
 604
 605/**
 606 * clk_round_rate - round the given rate for a clk
 607 * @clk: the clk for which we are rounding a rate
 608 * @rate: the rate which is to be rounded
 609 *
 610 * Takes in a rate as input and rounds it to a rate that the clk can actually
 611 * use which is then returned.  If clk doesn't support round_rate operation
 612 * then the parent rate is returned.
 613 */
 614long clk_round_rate(struct clk *clk, unsigned long rate)
 615{
 616        unsigned long ret;
 617
 618        mutex_lock(&prepare_lock);
 619        ret = __clk_round_rate(clk, rate);
 620        mutex_unlock(&prepare_lock);
 621
 622        return ret;
 623}
 624EXPORT_SYMBOL_GPL(clk_round_rate);
 625
 626/**
 627 * __clk_notify - call clk notifier chain
 628 * @clk: struct clk * that is changing rate
 629 * @msg: clk notifier type (see include/linux/clk.h)
 630 * @old_rate: old clk rate
 631 * @new_rate: new clk rate
 632 *
 633 * Triggers a notifier call chain on the clk rate-change notification
 634 * for 'clk'.  Passes a pointer to the struct clk and the previous
 635 * and current rates to the notifier callback.  Intended to be called by
 636 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 637 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 638 * a driver returns that.
 639 */
 640static int __clk_notify(struct clk *clk, unsigned long msg,
 641                unsigned long old_rate, unsigned long new_rate)
 642{
 643        struct clk_notifier *cn;
 644        struct clk_notifier_data cnd;
 645        int ret = NOTIFY_DONE;
 646
 647        cnd.clk = clk;
 648        cnd.old_rate = old_rate;
 649        cnd.new_rate = new_rate;
 650
 651        list_for_each_entry(cn, &clk_notifier_list, node) {
 652                if (cn->clk == clk) {
 653                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
 654                                        &cnd);
 655                        break;
 656                }
 657        }
 658
 659        return ret;
 660}
 661
 662/**
 663 * __clk_recalc_rates
 664 * @clk: first clk in the subtree
 665 * @msg: notification type (see include/linux/clk.h)
 666 *
 667 * Walks the subtree of clks starting with clk and recalculates rates as it
 668 * goes.  Note that if a clk does not implement the .recalc_rate callback then
 669 * it is assumed that the clock will take on the rate of it's parent.
 670 *
 671 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 672 * if necessary.
 673 *
 674 * Caller must hold prepare_lock.
 675 */
 676static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 677{
 678        unsigned long old_rate;
 679        unsigned long parent_rate = 0;
 680        struct hlist_node *tmp;
 681        struct clk *child;
 682
 683        old_rate = clk->rate;
 684
 685        if (clk->parent)
 686                parent_rate = clk->parent->rate;
 687
 688        if (clk->ops->recalc_rate)
 689                clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 690        else
 691                clk->rate = parent_rate;
 692
 693        /*
 694         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
 695         * & ABORT_RATE_CHANGE notifiers
 696         */
 697        if (clk->notifier_count && msg)
 698                __clk_notify(clk, msg, old_rate, clk->rate);
 699
 700        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 701                __clk_recalc_rates(child, msg);
 702}
 703
 704/**
 705 * __clk_speculate_rates
 706 * @clk: first clk in the subtree
 707 * @parent_rate: the "future" rate of clk's parent
 708 *
 709 * Walks the subtree of clks starting with clk, speculating rates as it
 710 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 711 *
 712 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 713 * pre-rate change notifications and returns early if no clks in the
 714 * subtree have subscribed to the notifications.  Note that if a clk does not
 715 * implement the .recalc_rate callback then it is assumed that the clock will
 716 * take on the rate of it's parent.
 717 *
 718 * Caller must hold prepare_lock.
 719 */
 720static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
 721{
 722        struct hlist_node *tmp;
 723        struct clk *child;
 724        unsigned long new_rate;
 725        int ret = NOTIFY_DONE;
 726
 727        if (clk->ops->recalc_rate)
 728                new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 729        else
 730                new_rate = parent_rate;
 731
 732        /* abort the rate change if a driver returns NOTIFY_BAD */
 733        if (clk->notifier_count)
 734                ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
 735
 736        if (ret == NOTIFY_BAD)
 737                goto out;
 738
 739        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 740                ret = __clk_speculate_rates(child, new_rate);
 741                if (ret == NOTIFY_BAD)
 742                        break;
 743        }
 744
 745out:
 746        return ret;
 747}
 748
 749static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 750{
 751        struct clk *child;
 752        struct hlist_node *tmp;
 753
 754        clk->new_rate = new_rate;
 755
 756        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 757                if (child->ops->recalc_rate)
 758                        child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
 759                else
 760                        child->new_rate = new_rate;
 761                clk_calc_subtree(child, child->new_rate);
 762        }
 763}
 764
 765/*
 766 * calculate the new rates returning the topmost clock that has to be
 767 * changed.
 768 */
 769static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 770{
 771        struct clk *top = clk;
 772        unsigned long best_parent_rate = 0;
 773        unsigned long new_rate;
 774
 775        /* sanity */
 776        if (IS_ERR_OR_NULL(clk))
 777                return NULL;
 778
 779        /* save parent rate, if it exists */
 780        if (clk->parent)
 781                best_parent_rate = clk->parent->rate;
 782
 783        /* never propagate up to the parent */
 784        if (!(clk->flags & CLK_SET_RATE_PARENT)) {
 785                if (!clk->ops->round_rate) {
 786                        clk->new_rate = clk->rate;
 787                        return NULL;
 788                }
 789                new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 790                goto out;
 791        }
 792
 793        /* need clk->parent from here on out */
 794        if (!clk->parent) {
 795                pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
 796                return NULL;
 797        }
 798
 799        if (!clk->ops->round_rate) {
 800                top = clk_calc_new_rates(clk->parent, rate);
 801                new_rate = clk->parent->new_rate;
 802
 803                goto out;
 804        }
 805
 806        new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 807
 808        if (best_parent_rate != clk->parent->rate) {
 809                top = clk_calc_new_rates(clk->parent, best_parent_rate);
 810
 811                goto out;
 812        }
 813
 814out:
 815        clk_calc_subtree(clk, new_rate);
 816
 817        return top;
 818}
 819
 820/*
 821 * Notify about rate changes in a subtree. Always walk down the whole tree
 822 * so that in case of an error we can walk down the whole tree again and
 823 * abort the change.
 824 */
 825static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 826{
 827        struct hlist_node *tmp;
 828        struct clk *child, *fail_clk = NULL;
 829        int ret = NOTIFY_DONE;
 830
 831        if (clk->rate == clk->new_rate)
 832                return 0;
 833
 834        if (clk->notifier_count) {
 835                ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
 836                if (ret == NOTIFY_BAD)
 837                        fail_clk = clk;
 838        }
 839
 840        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 841                clk = clk_propagate_rate_change(child, event);
 842                if (clk)
 843                        fail_clk = clk;
 844        }
 845
 846        return fail_clk;
 847}
 848
 849/*
 850 * walk down a subtree and set the new rates notifying the rate
 851 * change on the way
 852 */
 853static void clk_change_rate(struct clk *clk)
 854{
 855        struct clk *child;
 856        unsigned long old_rate;
 857        unsigned long best_parent_rate = 0;
 858        struct hlist_node *tmp;
 859
 860        old_rate = clk->rate;
 861
 862        if (clk->parent)
 863                best_parent_rate = clk->parent->rate;
 864
 865        if (clk->ops->set_rate)
 866                clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 867
 868        if (clk->ops->recalc_rate)
 869                clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
 870        else
 871                clk->rate = best_parent_rate;
 872
 873        if (clk->notifier_count && old_rate != clk->rate)
 874                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 875
 876        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 877                clk_change_rate(child);
 878}
 879
 880/**
 881 * clk_set_rate - specify a new rate for clk
 882 * @clk: the clk whose rate is being changed
 883 * @rate: the new rate for clk
 884 *
 885 * In the simplest case clk_set_rate will only adjust the rate of clk.
 886 *
 887 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 888 * propagate up to clk's parent; whether or not this happens depends on the
 889 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 890 * after calling .round_rate then upstream parent propagation is ignored.  If
 891 * *parent_rate comes back with a new rate for clk's parent then we propagate
 892 * up to clk's parent and set it's rate.  Upward propagation will continue
 893 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 894 * .round_rate stops requesting changes to clk's parent_rate.
 895 *
 896 * Rate changes are accomplished via tree traversal that also recalculates the
 897 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
 898 *
 899 * Returns 0 on success, -EERROR otherwise.
 900 */
 901int clk_set_rate(struct clk *clk, unsigned long rate)
 902{
 903        struct clk *top, *fail_clk;
 904        int ret = 0;
 905
 906        /* prevent racing with updates to the clock topology */
 907        mutex_lock(&prepare_lock);
 908
 909        /* bail early if nothing to do */
 910        if (rate == clk->rate)
 911                goto out;
 912
 913        if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
 914                ret = -EBUSY;
 915                goto out;
 916        }
 917
 918        /* calculate new rates and get the topmost changed clock */
 919        top = clk_calc_new_rates(clk, rate);
 920        if (!top) {
 921                ret = -EINVAL;
 922                goto out;
 923        }
 924
 925        /* notify that we are about to change rates */
 926        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
 927        if (fail_clk) {
 928                pr_warn("%s: failed to set %s rate\n", __func__,
 929                                fail_clk->name);
 930                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
 931                ret = -EBUSY;
 932                goto out;
 933        }
 934
 935        /* change the rates */
 936        clk_change_rate(top);
 937
 938        mutex_unlock(&prepare_lock);
 939
 940        return 0;
 941out:
 942        mutex_unlock(&prepare_lock);
 943
 944        return ret;
 945}
 946EXPORT_SYMBOL_GPL(clk_set_rate);
 947
 948/**
 949 * clk_get_parent - return the parent of a clk
 950 * @clk: the clk whose parent gets returned
 951 *
 952 * Simply returns clk->parent.  Returns NULL if clk is NULL.
 953 */
 954struct clk *clk_get_parent(struct clk *clk)
 955{
 956        struct clk *parent;
 957
 958        mutex_lock(&prepare_lock);
 959        parent = __clk_get_parent(clk);
 960        mutex_unlock(&prepare_lock);
 961
 962        return parent;
 963}
 964EXPORT_SYMBOL_GPL(clk_get_parent);
 965
 966/*
 967 * .get_parent is mandatory for clocks with multiple possible parents.  It is
 968 * optional for single-parent clocks.  Always call .get_parent if it is
 969 * available and WARN if it is missing for multi-parent clocks.
 970 *
 971 * For single-parent clocks without .get_parent, first check to see if the
 972 * .parents array exists, and if so use it to avoid an expensive tree
 973 * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
 974 */
 975static struct clk *__clk_init_parent(struct clk *clk)
 976{
 977        struct clk *ret = NULL;
 978        u8 index;
 979
 980        /* handle the trivial cases */
 981
 982        if (!clk->num_parents)
 983                goto out;
 984
 985        if (clk->num_parents == 1) {
 986                if (IS_ERR_OR_NULL(clk->parent))
 987                        ret = clk->parent = __clk_lookup(clk->parent_names[0]);
 988                ret = clk->parent;
 989                goto out;
 990        }
 991
 992        if (!clk->ops->get_parent) {
 993                WARN(!clk->ops->get_parent,
 994                        "%s: multi-parent clocks must implement .get_parent\n",
 995                        __func__);
 996                goto out;
 997        };
 998
 999        /*
1000         * Do our best to cache parent clocks in clk->parents.  This prevents
1001         * unnecessary and expensive calls to __clk_lookup.  We don't set
1002         * clk->parent here; that is done by the calling function
1003         */
1004
1005        index = clk->ops->get_parent(clk->hw);
1006
1007        if (!clk->parents)
1008                clk->parents =
1009                        kzalloc((sizeof(struct clk*) * clk->num_parents),
1010                                        GFP_KERNEL);
1011
1012        if (!clk->parents)
1013                ret = __clk_lookup(clk->parent_names[index]);
1014        else if (!clk->parents[index])
1015                ret = clk->parents[index] =
1016                        __clk_lookup(clk->parent_names[index]);
1017        else
1018                ret = clk->parents[index];
1019
1020out:
1021        return ret;
1022}
1023
1024void __clk_reparent(struct clk *clk, struct clk *new_parent)
1025{
1026#ifdef CONFIG_COMMON_CLK_DEBUG
1027        struct dentry *d;
1028        struct dentry *new_parent_d;
1029#endif
1030
1031        if (!clk || !new_parent)
1032                return;
1033
1034        hlist_del(&clk->child_node);
1035
1036        if (new_parent)
1037                hlist_add_head(&clk->child_node, &new_parent->children);
1038        else
1039                hlist_add_head(&clk->child_node, &clk_orphan_list);
1040
1041#ifdef CONFIG_COMMON_CLK_DEBUG
1042        if (!inited)
1043                goto out;
1044
1045        if (new_parent)
1046                new_parent_d = new_parent->dentry;
1047        else
1048                new_parent_d = orphandir;
1049
1050        d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1051                        new_parent_d, clk->name);
1052        if (d)
1053                clk->dentry = d;
1054        else
1055                pr_debug("%s: failed to rename debugfs entry for %s\n",
1056                                __func__, clk->name);
1057out:
1058#endif
1059
1060        clk->parent = new_parent;
1061
1062        __clk_recalc_rates(clk, POST_RATE_CHANGE);
1063}
1064
1065static int __clk_set_parent(struct clk *clk, struct clk *parent)
1066{
1067        struct clk *old_parent;
1068        unsigned long flags;
1069        int ret = -EINVAL;
1070        u8 i;
1071
1072        old_parent = clk->parent;
1073
1074        if (!clk->parents)
1075                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1076                                                                GFP_KERNEL);
1077
1078        /*
1079         * find index of new parent clock using cached parent ptrs,
1080         * or if not yet cached, use string name comparison and cache
1081         * them now to avoid future calls to __clk_lookup.
1082         */
1083        for (i = 0; i < clk->num_parents; i++) {
1084                if (clk->parents && clk->parents[i] == parent)
1085                        break;
1086                else if (!strcmp(clk->parent_names[i], parent->name)) {
1087                        if (clk->parents)
1088                                clk->parents[i] = __clk_lookup(parent->name);
1089                        break;
1090                }
1091        }
1092
1093        if (i == clk->num_parents) {
1094                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1095                                __func__, parent->name, clk->name);
1096                goto out;
1097        }
1098
1099        /* migrate prepare and enable */
1100        if (clk->prepare_count)
1101                __clk_prepare(parent);
1102
1103        /* FIXME replace with clk_is_enabled(clk) someday */
1104        spin_lock_irqsave(&enable_lock, flags);
1105        if (clk->enable_count)
1106                __clk_enable(parent);
1107        spin_unlock_irqrestore(&enable_lock, flags);
1108
1109        /* change clock input source */
1110        ret = clk->ops->set_parent(clk->hw, i);
1111
1112        /* clean up old prepare and enable */
1113        spin_lock_irqsave(&enable_lock, flags);
1114        if (clk->enable_count)
1115                __clk_disable(old_parent);
1116        spin_unlock_irqrestore(&enable_lock, flags);
1117
1118        if (clk->prepare_count)
1119                __clk_unprepare(old_parent);
1120
1121out:
1122        return ret;
1123}
1124
1125/**
1126 * clk_set_parent - switch the parent of a mux clk
1127 * @clk: the mux clk whose input we are switching
1128 * @parent: the new input to clk
1129 *
1130 * Re-parent clk to use parent as it's new input source.  If clk has the
1131 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1132 * operation to succeed.  After successfully changing clk's parent
1133 * clk_set_parent will update the clk topology, sysfs topology and
1134 * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1135 * success, -EERROR otherwise.
1136 */
1137int clk_set_parent(struct clk *clk, struct clk *parent)
1138{
1139        int ret = 0;
1140
1141        if (!clk || !clk->ops)
1142                return -EINVAL;
1143
1144        if (!clk->ops->set_parent)
1145                return -ENOSYS;
1146
1147        /* prevent racing with updates to the clock topology */
1148        mutex_lock(&prepare_lock);
1149
1150        if (clk->parent == parent)
1151                goto out;
1152
1153        /* propagate PRE_RATE_CHANGE notifications */
1154        if (clk->notifier_count)
1155                ret = __clk_speculate_rates(clk, parent->rate);
1156
1157        /* abort if a driver objects */
1158        if (ret == NOTIFY_STOP)
1159                goto out;
1160
1161        /* only re-parent if the clock is not in use */
1162        if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1163                ret = -EBUSY;
1164        else
1165                ret = __clk_set_parent(clk, parent);
1166
1167        /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1168        if (ret) {
1169                __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1170                goto out;
1171        }
1172
1173        /* propagate rate recalculation downstream */
1174        __clk_reparent(clk, parent);
1175
1176out:
1177        mutex_unlock(&prepare_lock);
1178
1179        return ret;
1180}
1181EXPORT_SYMBOL_GPL(clk_set_parent);
1182
1183/**
1184 * __clk_init - initialize the data structures in a struct clk
1185 * @dev:        device initializing this clk, placeholder for now
1186 * @clk:        clk being initialized
1187 *
1188 * Initializes the lists in struct clk, queries the hardware for the
1189 * parent and rate and sets them both.
1190 */
1191int __clk_init(struct device *dev, struct clk *clk)
1192{
1193        int i, ret = 0;
1194        struct clk *orphan;
1195        struct hlist_node *tmp, *tmp2;
1196
1197        if (!clk)
1198                return -EINVAL;
1199
1200        mutex_lock(&prepare_lock);
1201
1202        /* check to see if a clock with this name is already registered */
1203        if (__clk_lookup(clk->name)) {
1204                pr_debug("%s: clk %s already initialized\n",
1205                                __func__, clk->name);
1206                ret = -EEXIST;
1207                goto out;
1208        }
1209
1210        /* check that clk_ops are sane.  See Documentation/clk.txt */
1211        if (clk->ops->set_rate &&
1212                        !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1213                pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1214                                __func__, clk->name);
1215                ret = -EINVAL;
1216                goto out;
1217        }
1218
1219        if (clk->ops->set_parent && !clk->ops->get_parent) {
1220                pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1221                                __func__, clk->name);
1222                ret = -EINVAL;
1223                goto out;
1224        }
1225
1226        /* throw a WARN if any entries in parent_names are NULL */
1227        for (i = 0; i < clk->num_parents; i++)
1228                WARN(!clk->parent_names[i],
1229                                "%s: invalid NULL in %s's .parent_names\n",
1230                                __func__, clk->name);
1231
1232        /*
1233         * Allocate an array of struct clk *'s to avoid unnecessary string
1234         * look-ups of clk's possible parents.  This can fail for clocks passed
1235         * in to clk_init during early boot; thus any access to clk->parents[]
1236         * must always check for a NULL pointer and try to populate it if
1237         * necessary.
1238         *
1239         * If clk->parents is not NULL we skip this entire block.  This allows
1240         * for clock drivers to statically initialize clk->parents.
1241         */
1242        if (clk->num_parents > 1 && !clk->parents) {
1243                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1244                                GFP_KERNEL);
1245                /*
1246                 * __clk_lookup returns NULL for parents that have not been
1247                 * clk_init'd; thus any access to clk->parents[] must check
1248                 * for a NULL pointer.  We can always perform lazy lookups for
1249                 * missing parents later on.
1250                 */
1251                if (clk->parents)
1252                        for (i = 0; i < clk->num_parents; i++)
1253                                clk->parents[i] =
1254                                        __clk_lookup(clk->parent_names[i]);
1255        }
1256
1257        clk->parent = __clk_init_parent(clk);
1258
1259        /*
1260         * Populate clk->parent if parent has already been __clk_init'd.  If
1261         * parent has not yet been __clk_init'd then place clk in the orphan
1262         * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1263         * clk list.
1264         *
1265         * Every time a new clk is clk_init'd then we walk the list of orphan
1266         * clocks and re-parent any that are children of the clock currently
1267         * being clk_init'd.
1268         */
1269        if (clk->parent)
1270                hlist_add_head(&clk->child_node,
1271                                &clk->parent->children);
1272        else if (clk->flags & CLK_IS_ROOT)
1273                hlist_add_head(&clk->child_node, &clk_root_list);
1274        else
1275                hlist_add_head(&clk->child_node, &clk_orphan_list);
1276
1277        /*
1278         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1279         * simple clocks and lazy developers the default fallback is to use the
1280         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1281         * then rate is set to zero.
1282         */
1283        if (clk->ops->recalc_rate)
1284                clk->rate = clk->ops->recalc_rate(clk->hw,
1285                                __clk_get_rate(clk->parent));
1286        else if (clk->parent)
1287                clk->rate = clk->parent->rate;
1288        else
1289                clk->rate = 0;
1290
1291        /*
1292         * walk the list of orphan clocks and reparent any that are children of
1293         * this clock
1294         */
1295        hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1296                for (i = 0; i < orphan->num_parents; i++)
1297                        if (!strcmp(clk->name, orphan->parent_names[i])) {
1298                                __clk_reparent(orphan, clk);
1299                                break;
1300                        }
1301
1302        /*
1303         * optional platform-specific magic
1304         *
1305         * The .init callback is not used by any of the basic clock types, but
1306         * exists for weird hardware that must perform initialization magic.
1307         * Please consider other ways of solving initialization problems before
1308         * using this callback, as it's use is discouraged.
1309         */
1310        if (clk->ops->init)
1311                clk->ops->init(clk->hw);
1312
1313        clk_debug_register(clk);
1314
1315out:
1316        mutex_unlock(&prepare_lock);
1317
1318        return ret;
1319}
1320
1321/**
1322 * __clk_register - register a clock and return a cookie.
1323 *
1324 * Same as clk_register, except that the .clk field inside hw shall point to a
1325 * preallocated (generally statically allocated) struct clk. None of the fields
1326 * of the struct clk need to be initialized.
1327 *
1328 * The data pointed to by .init and .clk field shall NOT be marked as init
1329 * data.
1330 *
1331 * __clk_register is only exposed via clk-private.h and is intended for use with
1332 * very large numbers of clocks that need to be statically initialized.  It is
1333 * a layering violation to include clk-private.h from any code which implements
1334 * a clock's .ops; as such any statically initialized clock data MUST be in a
1335 * separate C file from the logic that implements it's operations.  Returns 0
1336 * on success, otherwise an error code.
1337 */
1338struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1339{
1340        int ret;
1341        struct clk *clk;
1342
1343        clk = hw->clk;
1344        clk->name = hw->init->name;
1345        clk->ops = hw->init->ops;
1346        clk->hw = hw;
1347        clk->flags = hw->init->flags;
1348        clk->parent_names = hw->init->parent_names;
1349        clk->num_parents = hw->init->num_parents;
1350
1351        ret = __clk_init(dev, clk);
1352        if (ret)
1353                return ERR_PTR(ret);
1354
1355        return clk;
1356}
1357EXPORT_SYMBOL_GPL(__clk_register);
1358
1359/**
1360 * clk_register - allocate a new clock, register it and return an opaque cookie
1361 * @dev: device that is registering this clock
1362 * @hw: link to hardware-specific clock data
1363 *
1364 * clk_register is the primary interface for populating the clock tree with new
1365 * clock nodes.  It returns a pointer to the newly allocated struct clk which
1366 * cannot be dereferenced by driver code but may be used in conjuction with the
1367 * rest of the clock API.  In the event of an error clk_register will return an
1368 * error code; drivers must test for an error code after calling clk_register.
1369 */
1370struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1371{
1372        int i, ret;
1373        struct clk *clk;
1374
1375        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1376        if (!clk) {
1377                pr_err("%s: could not allocate clk\n", __func__);
1378                ret = -ENOMEM;
1379                goto fail_out;
1380        }
1381
1382        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1383        if (!clk->name) {
1384                pr_err("%s: could not allocate clk->name\n", __func__);
1385                ret = -ENOMEM;
1386                goto fail_name;
1387        }
1388        clk->ops = hw->init->ops;
1389        clk->hw = hw;
1390        clk->flags = hw->init->flags;
1391        clk->num_parents = hw->init->num_parents;
1392        hw->clk = clk;
1393
1394        /* allocate local copy in case parent_names is __initdata */
1395        clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1396                        GFP_KERNEL);
1397
1398        if (!clk->parent_names) {
1399                pr_err("%s: could not allocate clk->parent_names\n", __func__);
1400                ret = -ENOMEM;
1401                goto fail_parent_names;
1402        }
1403
1404
1405        /* copy each string name in case parent_names is __initdata */
1406        for (i = 0; i < clk->num_parents; i++) {
1407                clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1408                                                GFP_KERNEL);
1409                if (!clk->parent_names[i]) {
1410                        pr_err("%s: could not copy parent_names\n", __func__);
1411                        ret = -ENOMEM;
1412                        goto fail_parent_names_copy;
1413                }
1414        }
1415
1416        ret = __clk_init(dev, clk);
1417        if (!ret)
1418                return clk;
1419
1420fail_parent_names_copy:
1421        while (--i >= 0)
1422                kfree(clk->parent_names[i]);
1423        kfree(clk->parent_names);
1424fail_parent_names:
1425        kfree(clk->name);
1426fail_name:
1427        kfree(clk);
1428fail_out:
1429        return ERR_PTR(ret);
1430}
1431EXPORT_SYMBOL_GPL(clk_register);
1432
1433/**
1434 * clk_unregister - unregister a currently registered clock
1435 * @clk: clock to unregister
1436 *
1437 * Currently unimplemented.
1438 */
1439void clk_unregister(struct clk *clk) {}
1440EXPORT_SYMBOL_GPL(clk_unregister);
1441
1442/***        clk rate change notifiers        ***/
1443
1444/**
1445 * clk_notifier_register - add a clk rate change notifier
1446 * @clk: struct clk * to watch
1447 * @nb: struct notifier_block * with callback info
1448 *
1449 * Request notification when clk's rate changes.  This uses an SRCU
1450 * notifier because we want it to block and notifier unregistrations are
1451 * uncommon.  The callbacks associated with the notifier must not
1452 * re-enter into the clk framework by calling any top-level clk APIs;
1453 * this will cause a nested prepare_lock mutex.
1454 *
1455 * Pre-change notifier callbacks will be passed the current, pre-change
1456 * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1457 * post-change rate of the clk is passed via struct
1458 * clk_notifier_data.new_rate.
1459 *
1460 * Post-change notifiers will pass the now-current, post-change rate of
1461 * the clk in both struct clk_notifier_data.old_rate and struct
1462 * clk_notifier_data.new_rate.
1463 *
1464 * Abort-change notifiers are effectively the opposite of pre-change
1465 * notifiers: the original pre-change clk rate is passed in via struct
1466 * clk_notifier_data.new_rate and the failed post-change rate is passed
1467 * in via struct clk_notifier_data.old_rate.
1468 *
1469 * clk_notifier_register() must be called from non-atomic context.
1470 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1471 * allocation failure; otherwise, passes along the return value of
1472 * srcu_notifier_chain_register().
1473 */
1474int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1475{
1476        struct clk_notifier *cn;
1477        int ret = -ENOMEM;
1478
1479        if (!clk || !nb)
1480                return -EINVAL;
1481
1482        mutex_lock(&prepare_lock);
1483
1484        /* search the list of notifiers for this clk */
1485        list_for_each_entry(cn, &clk_notifier_list, node)
1486                if (cn->clk == clk)
1487                        break;
1488
1489        /* if clk wasn't in the notifier list, allocate new clk_notifier */
1490        if (cn->clk != clk) {
1491                cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1492                if (!cn)
1493                        goto out;
1494
1495                cn->clk = clk;
1496                srcu_init_notifier_head(&cn->notifier_head);
1497
1498                list_add(&cn->node, &clk_notifier_list);
1499        }
1500
1501        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1502
1503        clk->notifier_count++;
1504
1505out:
1506        mutex_unlock(&prepare_lock);
1507
1508        return ret;
1509}
1510EXPORT_SYMBOL_GPL(clk_notifier_register);
1511
1512/**
1513 * clk_notifier_unregister - remove a clk rate change notifier
1514 * @clk: struct clk *
1515 * @nb: struct notifier_block * with callback info
1516 *
1517 * Request no further notification for changes to 'clk' and frees memory
1518 * allocated in clk_notifier_register.
1519 *
1520 * Returns -EINVAL if called with null arguments; otherwise, passes
1521 * along the return value of srcu_notifier_chain_unregister().
1522 */
1523int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1524{
1525        struct clk_notifier *cn = NULL;
1526        int ret = -EINVAL;
1527
1528        if (!clk || !nb)
1529                return -EINVAL;
1530
1531        mutex_lock(&prepare_lock);
1532
1533        list_for_each_entry(cn, &clk_notifier_list, node)
1534                if (cn->clk == clk)
1535                        break;
1536
1537        if (cn->clk == clk) {
1538                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1539
1540                clk->notifier_count--;
1541
1542                /* XXX the notifier code should handle this better */
1543                if (!cn->notifier_head.head) {
1544                        srcu_cleanup_notifier_head(&cn->notifier_head);
1545                        kfree(cn);
1546                }
1547
1548        } else {
1549                ret = -ENOENT;
1550        }
1551
1552        mutex_unlock(&prepare_lock);
1553
1554        return ret;
1555}
1556EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1557
1558#ifdef CONFIG_OF
1559/**
1560 * struct of_clk_provider - Clock provider registration structure
1561 * @link: Entry in global list of clock providers
1562 * @node: Pointer to device tree node of clock provider
1563 * @get: Get clock callback.  Returns NULL or a struct clk for the
1564 *       given clock specifier
1565 * @data: context pointer to be passed into @get callback
1566 */
1567struct of_clk_provider {
1568        struct list_head link;
1569
1570        struct device_node *node;
1571        struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1572        void *data;
1573};
1574
1575static LIST_HEAD(of_clk_providers);
1576static DEFINE_MUTEX(of_clk_lock);
1577
1578struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1579                                     void *data)
1580{
1581        return data;
1582}
1583EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1584
1585/**
1586 * of_clk_add_provider() - Register a clock provider for a node
1587 * @np: Device node pointer associated with clock provider
1588 * @clk_src_get: callback for decoding clock
1589 * @data: context pointer for @clk_src_get callback.
1590 */
1591int of_clk_add_provider(struct device_node *np,
1592                        struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1593                                                   void *data),
1594                        void *data)
1595{
1596        struct of_clk_provider *cp;
1597
1598        cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1599        if (!cp)
1600                return -ENOMEM;
1601
1602        cp->node = of_node_get(np);
1603        cp->data = data;
1604        cp->get = clk_src_get;
1605
1606        mutex_lock(&of_clk_lock);
1607        list_add(&cp->link, &of_clk_providers);
1608        mutex_unlock(&of_clk_lock);
1609        pr_debug("Added clock from %s\n", np->full_name);
1610
1611        return 0;
1612}
1613EXPORT_SYMBOL_GPL(of_clk_add_provider);
1614
1615/**
1616 * of_clk_del_provider() - Remove a previously registered clock provider
1617 * @np: Device node pointer associated with clock provider
1618 */
1619void of_clk_del_provider(struct device_node *np)
1620{
1621        struct of_clk_provider *cp;
1622
1623        mutex_lock(&of_clk_lock);
1624        list_for_each_entry(cp, &of_clk_providers, link) {
1625                if (cp->node == np) {
1626                        list_del(&cp->link);
1627                        of_node_put(cp->node);
1628                        kfree(cp);
1629                        break;
1630                }
1631        }
1632        mutex_unlock(&of_clk_lock);
1633}
1634EXPORT_SYMBOL_GPL(of_clk_del_provider);
1635
1636struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1637{
1638        struct of_clk_provider *provider;
1639        struct clk *clk = ERR_PTR(-ENOENT);
1640
1641        /* Check if we have such a provider in our array */
1642        mutex_lock(&of_clk_lock);
1643        list_for_each_entry(provider, &of_clk_providers, link) {
1644                if (provider->node == clkspec->np)
1645                        clk = provider->get(clkspec, provider->data);
1646                if (!IS_ERR(clk))
1647                        break;
1648        }
1649        mutex_unlock(&of_clk_lock);
1650
1651        return clk;
1652}
1653
1654const char *of_clk_get_parent_name(struct device_node *np, int index)
1655{
1656        struct of_phandle_args clkspec;
1657        const char *clk_name;
1658        int rc;
1659
1660        if (index < 0)
1661                return NULL;
1662
1663        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1664                                        &clkspec);
1665        if (rc)
1666                return NULL;
1667
1668        if (of_property_read_string_index(clkspec.np, "clock-output-names",
1669                                          clkspec.args_count ? clkspec.args[0] : 0,
1670                                          &clk_name) < 0)
1671                clk_name = clkspec.np->name;
1672
1673        of_node_put(clkspec.np);
1674        return clk_name;
1675}
1676EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1677
1678/**
1679 * of_clk_init() - Scan and init clock providers from the DT
1680 * @matches: array of compatible values and init functions for providers.
1681 *
1682 * This function scans the device tree for matching clock providers and
1683 * calls their initialization functions
1684 */
1685void __init of_clk_init(const struct of_device_id *matches)
1686{
1687        struct device_node *np;
1688
1689        for_each_matching_node(np, matches) {
1690                const struct of_device_id *match = of_match_node(matches, np);
1691                of_clk_init_cb_t clk_init_cb = match->data;
1692                clk_init_cb(np);
1693        }
1694}
1695#endif
1696
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.