linux/drivers/clk/clk.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * Standard functionality for the common clock API.  See Documentation/clk.txt
  10 */
  11
  12#include <linux/clk-private.h>
  13#include <linux/module.h>
  14#include <linux/mutex.h>
  15#include <linux/spinlock.h>
  16#include <linux/err.h>
  17#include <linux/list.h>
  18#include <linux/slab.h>
  19#include <linux/of.h>
  20#include <linux/device.h>
  21
  22static DEFINE_SPINLOCK(enable_lock);
  23static DEFINE_MUTEX(prepare_lock);
  24
  25static HLIST_HEAD(clk_root_list);
  26static HLIST_HEAD(clk_orphan_list);
  27static LIST_HEAD(clk_notifier_list);
  28
  29/***        debugfs support        ***/
  30
  31#ifdef CONFIG_COMMON_CLK_DEBUG
  32#include <linux/debugfs.h>
  33
  34static struct dentry *rootdir;
  35static struct dentry *orphandir;
  36static int inited = 0;
  37
  38/* caller must hold prepare_lock */
  39static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
  40{
  41        struct dentry *d;
  42        int ret = -ENOMEM;
  43
  44        if (!clk || !pdentry) {
  45                ret = -EINVAL;
  46                goto out;
  47        }
  48
  49        d = debugfs_create_dir(clk->name, pdentry);
  50        if (!d)
  51                goto out;
  52
  53        clk->dentry = d;
  54
  55        d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
  56                        (u32 *)&clk->rate);
  57        if (!d)
  58                goto err_out;
  59
  60        d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
  61                        (u32 *)&clk->flags);
  62        if (!d)
  63                goto err_out;
  64
  65        d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
  66                        (u32 *)&clk->prepare_count);
  67        if (!d)
  68                goto err_out;
  69
  70        d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
  71                        (u32 *)&clk->enable_count);
  72        if (!d)
  73                goto err_out;
  74
  75        d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
  76                        (u32 *)&clk->notifier_count);
  77        if (!d)
  78                goto err_out;
  79
  80        ret = 0;
  81        goto out;
  82
  83err_out:
  84        debugfs_remove(clk->dentry);
  85out:
  86        return ret;
  87}
  88
  89/* caller must hold prepare_lock */
  90static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
  91{
  92        struct clk *child;
  93        struct hlist_node *tmp;
  94        int ret = -EINVAL;;
  95
  96        if (!clk || !pdentry)
  97                goto out;
  98
  99        ret = clk_debug_create_one(clk, pdentry);
 100
 101        if (ret)
 102                goto out;
 103
 104        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 105                clk_debug_create_subtree(child, clk->dentry);
 106
 107        ret = 0;
 108out:
 109        return ret;
 110}
 111
 112/**
 113 * clk_debug_register - add a clk node to the debugfs clk tree
 114 * @clk: the clk being added to the debugfs clk tree
 115 *
 116 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
 117 * initialized.  Otherwise it bails out early since the debugfs clk tree
 118 * will be created lazily by clk_debug_init as part of a late_initcall.
 119 *
 120 * Caller must hold prepare_lock.  Only clk_init calls this function (so
 121 * far) so this is taken care.
 122 */
 123static int clk_debug_register(struct clk *clk)
 124{
 125        struct clk *parent;
 126        struct dentry *pdentry;
 127        int ret = 0;
 128
 129        if (!inited)
 130                goto out;
 131
 132        parent = clk->parent;
 133
 134        /*
 135         * Check to see if a clk is a root clk.  Also check that it is
 136         * safe to add this clk to debugfs
 137         */
 138        if (!parent)
 139                if (clk->flags & CLK_IS_ROOT)
 140                        pdentry = rootdir;
 141                else
 142                        pdentry = orphandir;
 143        else
 144                if (parent->dentry)
 145                        pdentry = parent->dentry;
 146                else
 147                        goto out;
 148
 149        ret = clk_debug_create_subtree(clk, pdentry);
 150
 151out:
 152        return ret;
 153}
 154
 155/**
 156 * clk_debug_init - lazily create the debugfs clk tree visualization
 157 *
 158 * clks are often initialized very early during boot before memory can
 159 * be dynamically allocated and well before debugfs is setup.
 160 * clk_debug_init walks the clk tree hierarchy while holding
 161 * prepare_lock and creates the topology as part of a late_initcall,
 162 * thus insuring that clks initialized very early will still be
 163 * represented in the debugfs clk tree.  This function should only be
 164 * called once at boot-time, and all other clks added dynamically will
 165 * be done so with clk_debug_register.
 166 */
 167static int __init clk_debug_init(void)
 168{
 169        struct clk *clk;
 170        struct hlist_node *tmp;
 171
 172        rootdir = debugfs_create_dir("clk", NULL);
 173
 174        if (!rootdir)
 175                return -ENOMEM;
 176
 177        orphandir = debugfs_create_dir("orphans", rootdir);
 178
 179        if (!orphandir)
 180                return -ENOMEM;
 181
 182        mutex_lock(&prepare_lock);
 183
 184        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 185                clk_debug_create_subtree(clk, rootdir);
 186
 187        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 188                clk_debug_create_subtree(clk, orphandir);
 189
 190        inited = 1;
 191
 192        mutex_unlock(&prepare_lock);
 193
 194        return 0;
 195}
 196late_initcall(clk_debug_init);
 197#else
 198static inline int clk_debug_register(struct clk *clk) { return 0; }
 199#endif
 200
 201/* caller must hold prepare_lock */
 202static void clk_disable_unused_subtree(struct clk *clk)
 203{
 204        struct clk *child;
 205        struct hlist_node *tmp;
 206        unsigned long flags;
 207
 208        if (!clk)
 209                goto out;
 210
 211        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 212                clk_disable_unused_subtree(child);
 213
 214        spin_lock_irqsave(&enable_lock, flags);
 215
 216        if (clk->enable_count)
 217                goto unlock_out;
 218
 219        if (clk->flags & CLK_IGNORE_UNUSED)
 220                goto unlock_out;
 221
 222        /*
 223         * some gate clocks have special needs during the disable-unused
 224         * sequence.  call .disable_unused if available, otherwise fall
 225         * back to .disable
 226         */
 227        if (__clk_is_enabled(clk)) {
 228                if (clk->ops->disable_unused)
 229                        clk->ops->disable_unused(clk->hw);
 230                else if (clk->ops->disable)
 231                        clk->ops->disable(clk->hw);
 232        }
 233
 234unlock_out:
 235        spin_unlock_irqrestore(&enable_lock, flags);
 236
 237out:
 238        return;
 239}
 240
 241static int clk_disable_unused(void)
 242{
 243        struct clk *clk;
 244        struct hlist_node *tmp;
 245
 246        mutex_lock(&prepare_lock);
 247
 248        hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
 249                clk_disable_unused_subtree(clk);
 250
 251        hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
 252                clk_disable_unused_subtree(clk);
 253
 254        mutex_unlock(&prepare_lock);
 255
 256        return 0;
 257}
 258late_initcall(clk_disable_unused);
 259
 260/***    helper functions   ***/
 261
 262inline const char *__clk_get_name(struct clk *clk)
 263{
 264        return !clk ? NULL : clk->name;
 265}
 266
 267inline struct clk_hw *__clk_get_hw(struct clk *clk)
 268{
 269        return !clk ? NULL : clk->hw;
 270}
 271
 272inline u8 __clk_get_num_parents(struct clk *clk)
 273{
 274        return !clk ? 0 : clk->num_parents;
 275}
 276
 277inline struct clk *__clk_get_parent(struct clk *clk)
 278{
 279        return !clk ? NULL : clk->parent;
 280}
 281
 282inline unsigned int __clk_get_enable_count(struct clk *clk)
 283{
 284        return !clk ? 0 : clk->enable_count;
 285}
 286
 287inline unsigned int __clk_get_prepare_count(struct clk *clk)
 288{
 289        return !clk ? 0 : clk->prepare_count;
 290}
 291
 292unsigned long __clk_get_rate(struct clk *clk)
 293{
 294        unsigned long ret;
 295
 296        if (!clk) {
 297                ret = 0;
 298                goto out;
 299        }
 300
 301        ret = clk->rate;
 302
 303        if (clk->flags & CLK_IS_ROOT)
 304                goto out;
 305
 306        if (!clk->parent)
 307                ret = 0;
 308
 309out:
 310        return ret;
 311}
 312
 313inline unsigned long __clk_get_flags(struct clk *clk)
 314{
 315        return !clk ? 0 : clk->flags;
 316}
 317
 318bool __clk_is_enabled(struct clk *clk)
 319{
 320        int ret;
 321
 322        if (!clk)
 323                return false;
 324
 325        /*
 326         * .is_enabled is only mandatory for clocks that gate
 327         * fall back to software usage counter if .is_enabled is missing
 328         */
 329        if (!clk->ops->is_enabled) {
 330                ret = clk->enable_count ? 1 : 0;
 331                goto out;
 332        }
 333
 334        ret = clk->ops->is_enabled(clk->hw);
 335out:
 336        return !!ret;
 337}
 338
 339static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
 340{
 341        struct clk *child;
 342        struct clk *ret;
 343        struct hlist_node *tmp;
 344
 345        if (!strcmp(clk->name, name))
 346                return clk;
 347
 348        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 349                ret = __clk_lookup_subtree(name, child);
 350                if (ret)
 351                        return ret;
 352        }
 353
 354        return NULL;
 355}
 356
 357struct clk *__clk_lookup(const char *name)
 358{
 359        struct clk *root_clk;
 360        struct clk *ret;
 361        struct hlist_node *tmp;
 362
 363        if (!name)
 364                return NULL;
 365
 366        /* search the 'proper' clk tree first */
 367        hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
 368                ret = __clk_lookup_subtree(name, root_clk);
 369                if (ret)
 370                        return ret;
 371        }
 372
 373        /* if not found, then search the orphan tree */
 374        hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
 375                ret = __clk_lookup_subtree(name, root_clk);
 376                if (ret)
 377                        return ret;
 378        }
 379
 380        return NULL;
 381}
 382
 383/***        clk api        ***/
 384
 385void __clk_unprepare(struct clk *clk)
 386{
 387        if (!clk)
 388                return;
 389
 390        if (WARN_ON(clk->prepare_count == 0))
 391                return;
 392
 393        if (--clk->prepare_count > 0)
 394                return;
 395
 396        WARN_ON(clk->enable_count > 0);
 397
 398        if (clk->ops->unprepare)
 399                clk->ops->unprepare(clk->hw);
 400
 401        __clk_unprepare(clk->parent);
 402}
 403
 404/**
 405 * clk_unprepare - undo preparation of a clock source
 406 * @clk: the clk being unprepare
 407 *
 408 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 409 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 410 * if the operation may sleep.  One example is a clk which is accessed over
 411 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 412 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 413 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 414 */
 415void clk_unprepare(struct clk *clk)
 416{
 417        mutex_lock(&prepare_lock);
 418        __clk_unprepare(clk);
 419        mutex_unlock(&prepare_lock);
 420}
 421EXPORT_SYMBOL_GPL(clk_unprepare);
 422
 423int __clk_prepare(struct clk *clk)
 424{
 425        int ret = 0;
 426
 427        if (!clk)
 428                return 0;
 429
 430        if (clk->prepare_count == 0) {
 431                ret = __clk_prepare(clk->parent);
 432                if (ret)
 433                        return ret;
 434
 435                if (clk->ops->prepare) {
 436                        ret = clk->ops->prepare(clk->hw);
 437                        if (ret) {
 438                                __clk_unprepare(clk->parent);
 439                                return ret;
 440                        }
 441                }
 442        }
 443
 444        clk->prepare_count++;
 445
 446        return 0;
 447}
 448
 449/**
 450 * clk_prepare - prepare a clock source
 451 * @clk: the clk being prepared
 452 *
 453 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 454 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 455 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 456 * the complex case a clk ungate operation may require a fast and a slow part.
 457 * It is this reason that clk_prepare and clk_enable are not mutually
 458 * exclusive.  In fact clk_prepare must be called before clk_enable.
 459 * Returns 0 on success, -EERROR otherwise.
 460 */
 461int clk_prepare(struct clk *clk)
 462{
 463        int ret;
 464
 465        mutex_lock(&prepare_lock);
 466        ret = __clk_prepare(clk);
 467        mutex_unlock(&prepare_lock);
 468
 469        return ret;
 470}
 471EXPORT_SYMBOL_GPL(clk_prepare);
 472
 473static void __clk_disable(struct clk *clk)
 474{
 475        if (!clk)
 476                return;
 477
 478        if (WARN_ON(IS_ERR(clk)))
 479                return;
 480
 481        if (WARN_ON(clk->enable_count == 0))
 482                return;
 483
 484        if (--clk->enable_count > 0)
 485                return;
 486
 487        if (clk->ops->disable)
 488                clk->ops->disable(clk->hw);
 489
 490        __clk_disable(clk->parent);
 491}
 492
 493/**
 494 * clk_disable - gate a clock
 495 * @clk: the clk being gated
 496 *
 497 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 498 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 499 * clk if the operation is fast and will never sleep.  One example is a
 500 * SoC-internal clk which is controlled via simple register writes.  In the
 501 * complex case a clk gate operation may require a fast and a slow part.  It is
 502 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 503 * In fact clk_disable must be called before clk_unprepare.
 504 */
 505void clk_disable(struct clk *clk)
 506{
 507        unsigned long flags;
 508
 509        spin_lock_irqsave(&enable_lock, flags);
 510        __clk_disable(clk);
 511        spin_unlock_irqrestore(&enable_lock, flags);
 512}
 513EXPORT_SYMBOL_GPL(clk_disable);
 514
 515static int __clk_enable(struct clk *clk)
 516{
 517        int ret = 0;
 518
 519        if (!clk)
 520                return 0;
 521
 522        if (WARN_ON(clk->prepare_count == 0))
 523                return -ESHUTDOWN;
 524
 525        if (clk->enable_count == 0) {
 526                ret = __clk_enable(clk->parent);
 527
 528                if (ret)
 529                        return ret;
 530
 531                if (clk->ops->enable) {
 532                        ret = clk->ops->enable(clk->hw);
 533                        if (ret) {
 534                                __clk_disable(clk->parent);
 535                                return ret;
 536                        }
 537                }
 538        }
 539
 540        clk->enable_count++;
 541        return 0;
 542}
 543
 544/**
 545 * clk_enable - ungate a clock
 546 * @clk: the clk being ungated
 547 *
 548 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 549 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 550 * if the operation will never sleep.  One example is a SoC-internal clk which
 551 * is controlled via simple register writes.  In the complex case a clk ungate
 552 * operation may require a fast and a slow part.  It is this reason that
 553 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 554 * must be called before clk_enable.  Returns 0 on success, -EERROR
 555 * otherwise.
 556 */
 557int clk_enable(struct clk *clk)
 558{
 559        unsigned long flags;
 560        int ret;
 561
 562        spin_lock_irqsave(&enable_lock, flags);
 563        ret = __clk_enable(clk);
 564        spin_unlock_irqrestore(&enable_lock, flags);
 565
 566        return ret;
 567}
 568EXPORT_SYMBOL_GPL(clk_enable);
 569
 570/**
 571 * __clk_round_rate - round the given rate for a clk
 572 * @clk: round the rate of this clock
 573 *
 574 * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
 575 */
 576unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 577{
 578        unsigned long parent_rate = 0;
 579
 580        if (!clk)
 581                return 0;
 582
 583        if (!clk->ops->round_rate) {
 584                if (clk->flags & CLK_SET_RATE_PARENT)
 585                        return __clk_round_rate(clk->parent, rate);
 586                else
 587                        return clk->rate;
 588        }
 589
 590        if (clk->parent)
 591                parent_rate = clk->parent->rate;
 592
 593        return clk->ops->round_rate(clk->hw, rate, &parent_rate);
 594}
 595
 596/**
 597 * clk_round_rate - round the given rate for a clk
 598 * @clk: the clk for which we are rounding a rate
 599 * @rate: the rate which is to be rounded
 600 *
 601 * Takes in a rate as input and rounds it to a rate that the clk can actually
 602 * use which is then returned.  If clk doesn't support round_rate operation
 603 * then the parent rate is returned.
 604 */
 605long clk_round_rate(struct clk *clk, unsigned long rate)
 606{
 607        unsigned long ret;
 608
 609        mutex_lock(&prepare_lock);
 610        ret = __clk_round_rate(clk, rate);
 611        mutex_unlock(&prepare_lock);
 612
 613        return ret;
 614}
 615EXPORT_SYMBOL_GPL(clk_round_rate);
 616
 617/**
 618 * __clk_notify - call clk notifier chain
 619 * @clk: struct clk * that is changing rate
 620 * @msg: clk notifier type (see include/linux/clk.h)
 621 * @old_rate: old clk rate
 622 * @new_rate: new clk rate
 623 *
 624 * Triggers a notifier call chain on the clk rate-change notification
 625 * for 'clk'.  Passes a pointer to the struct clk and the previous
 626 * and current rates to the notifier callback.  Intended to be called by
 627 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 628 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 629 * a driver returns that.
 630 */
 631static int __clk_notify(struct clk *clk, unsigned long msg,
 632                unsigned long old_rate, unsigned long new_rate)
 633{
 634        struct clk_notifier *cn;
 635        struct clk_notifier_data cnd;
 636        int ret = NOTIFY_DONE;
 637
 638        cnd.clk = clk;
 639        cnd.old_rate = old_rate;
 640        cnd.new_rate = new_rate;
 641
 642        list_for_each_entry(cn, &clk_notifier_list, node) {
 643                if (cn->clk == clk) {
 644                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
 645                                        &cnd);
 646                        break;
 647                }
 648        }
 649
 650        return ret;
 651}
 652
 653/**
 654 * __clk_recalc_rates
 655 * @clk: first clk in the subtree
 656 * @msg: notification type (see include/linux/clk.h)
 657 *
 658 * Walks the subtree of clks starting with clk and recalculates rates as it
 659 * goes.  Note that if a clk does not implement the .recalc_rate callback then
 660 * it is assumed that the clock will take on the rate of it's parent.
 661 *
 662 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
 663 * if necessary.
 664 *
 665 * Caller must hold prepare_lock.
 666 */
 667static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
 668{
 669        unsigned long old_rate;
 670        unsigned long parent_rate = 0;
 671        struct hlist_node *tmp;
 672        struct clk *child;
 673
 674        old_rate = clk->rate;
 675
 676        if (clk->parent)
 677                parent_rate = clk->parent->rate;
 678
 679        if (clk->ops->recalc_rate)
 680                clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 681        else
 682                clk->rate = parent_rate;
 683
 684        /*
 685         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
 686         * & ABORT_RATE_CHANGE notifiers
 687         */
 688        if (clk->notifier_count && msg)
 689                __clk_notify(clk, msg, old_rate, clk->rate);
 690
 691        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 692                __clk_recalc_rates(child, msg);
 693}
 694
 695/**
 696 * clk_get_rate - return the rate of clk
 697 * @clk: the clk whose rate is being returned
 698 *
 699 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
 700 * is set, which means a recalc_rate will be issued.
 701 * If clk is NULL then returns 0.
 702 */
 703unsigned long clk_get_rate(struct clk *clk)
 704{
 705        unsigned long rate;
 706
 707        mutex_lock(&prepare_lock);
 708
 709        if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
 710                __clk_recalc_rates(clk, 0);
 711
 712        rate = __clk_get_rate(clk);
 713        mutex_unlock(&prepare_lock);
 714
 715        return rate;
 716}
 717EXPORT_SYMBOL_GPL(clk_get_rate);
 718
 719/**
 720 * __clk_speculate_rates
 721 * @clk: first clk in the subtree
 722 * @parent_rate: the "future" rate of clk's parent
 723 *
 724 * Walks the subtree of clks starting with clk, speculating rates as it
 725 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
 726 *
 727 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
 728 * pre-rate change notifications and returns early if no clks in the
 729 * subtree have subscribed to the notifications.  Note that if a clk does not
 730 * implement the .recalc_rate callback then it is assumed that the clock will
 731 * take on the rate of it's parent.
 732 *
 733 * Caller must hold prepare_lock.
 734 */
 735static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
 736{
 737        struct hlist_node *tmp;
 738        struct clk *child;
 739        unsigned long new_rate;
 740        int ret = NOTIFY_DONE;
 741
 742        if (clk->ops->recalc_rate)
 743                new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
 744        else
 745                new_rate = parent_rate;
 746
 747        /* abort the rate change if a driver returns NOTIFY_BAD */
 748        if (clk->notifier_count)
 749                ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
 750
 751        if (ret == NOTIFY_BAD)
 752                goto out;
 753
 754        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 755                ret = __clk_speculate_rates(child, new_rate);
 756                if (ret == NOTIFY_BAD)
 757                        break;
 758        }
 759
 760out:
 761        return ret;
 762}
 763
 764static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
 765{
 766        struct clk *child;
 767        struct hlist_node *tmp;
 768
 769        clk->new_rate = new_rate;
 770
 771        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 772                if (child->ops->recalc_rate)
 773                        child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
 774                else
 775                        child->new_rate = new_rate;
 776                clk_calc_subtree(child, child->new_rate);
 777        }
 778}
 779
 780/*
 781 * calculate the new rates returning the topmost clock that has to be
 782 * changed.
 783 */
 784static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 785{
 786        struct clk *top = clk;
 787        unsigned long best_parent_rate = 0;
 788        unsigned long new_rate;
 789
 790        /* sanity */
 791        if (IS_ERR_OR_NULL(clk))
 792                return NULL;
 793
 794        /* save parent rate, if it exists */
 795        if (clk->parent)
 796                best_parent_rate = clk->parent->rate;
 797
 798        /* never propagate up to the parent */
 799        if (!(clk->flags & CLK_SET_RATE_PARENT)) {
 800                if (!clk->ops->round_rate) {
 801                        clk->new_rate = clk->rate;
 802                        return NULL;
 803                }
 804                new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 805                goto out;
 806        }
 807
 808        /* need clk->parent from here on out */
 809        if (!clk->parent) {
 810                pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
 811                return NULL;
 812        }
 813
 814        if (!clk->ops->round_rate) {
 815                top = clk_calc_new_rates(clk->parent, rate);
 816                new_rate = clk->parent->new_rate;
 817
 818                goto out;
 819        }
 820
 821        new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
 822
 823        if (best_parent_rate != clk->parent->rate) {
 824                top = clk_calc_new_rates(clk->parent, best_parent_rate);
 825
 826                goto out;
 827        }
 828
 829out:
 830        clk_calc_subtree(clk, new_rate);
 831
 832        return top;
 833}
 834
 835/*
 836 * Notify about rate changes in a subtree. Always walk down the whole tree
 837 * so that in case of an error we can walk down the whole tree again and
 838 * abort the change.
 839 */
 840static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
 841{
 842        struct hlist_node *tmp;
 843        struct clk *child, *fail_clk = NULL;
 844        int ret = NOTIFY_DONE;
 845
 846        if (clk->rate == clk->new_rate)
 847                return 0;
 848
 849        if (clk->notifier_count) {
 850                ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
 851                if (ret == NOTIFY_BAD)
 852                        fail_clk = clk;
 853        }
 854
 855        hlist_for_each_entry(child, tmp, &clk->children, child_node) {
 856                clk = clk_propagate_rate_change(child, event);
 857                if (clk)
 858                        fail_clk = clk;
 859        }
 860
 861        return fail_clk;
 862}
 863
 864/*
 865 * walk down a subtree and set the new rates notifying the rate
 866 * change on the way
 867 */
 868static void clk_change_rate(struct clk *clk)
 869{
 870        struct clk *child;
 871        unsigned long old_rate;
 872        unsigned long best_parent_rate = 0;
 873        struct hlist_node *tmp;
 874
 875        old_rate = clk->rate;
 876
 877        if (clk->parent)
 878                best_parent_rate = clk->parent->rate;
 879
 880        if (clk->ops->set_rate)
 881                clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
 882
 883        if (clk->ops->recalc_rate)
 884                clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
 885        else
 886                clk->rate = best_parent_rate;
 887
 888        if (clk->notifier_count && old_rate != clk->rate)
 889                __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
 890
 891        hlist_for_each_entry(child, tmp, &clk->children, child_node)
 892                clk_change_rate(child);
 893}
 894
 895/**
 896 * clk_set_rate - specify a new rate for clk
 897 * @clk: the clk whose rate is being changed
 898 * @rate: the new rate for clk
 899 *
 900 * In the simplest case clk_set_rate will only adjust the rate of clk.
 901 *
 902 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
 903 * propagate up to clk's parent; whether or not this happens depends on the
 904 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
 905 * after calling .round_rate then upstream parent propagation is ignored.  If
 906 * *parent_rate comes back with a new rate for clk's parent then we propagate
 907 * up to clk's parent and set it's rate.  Upward propagation will continue
 908 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
 909 * .round_rate stops requesting changes to clk's parent_rate.
 910 *
 911 * Rate changes are accomplished via tree traversal that also recalculates the
 912 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
 913 *
 914 * Returns 0 on success, -EERROR otherwise.
 915 */
 916int clk_set_rate(struct clk *clk, unsigned long rate)
 917{
 918        struct clk *top, *fail_clk;
 919        int ret = 0;
 920
 921        /* prevent racing with updates to the clock topology */
 922        mutex_lock(&prepare_lock);
 923
 924        /* bail early if nothing to do */
 925        if (rate == clk->rate)
 926                goto out;
 927
 928        if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
 929                ret = -EBUSY;
 930                goto out;
 931        }
 932
 933        /* calculate new rates and get the topmost changed clock */
 934        top = clk_calc_new_rates(clk, rate);
 935        if (!top) {
 936                ret = -EINVAL;
 937                goto out;
 938        }
 939
 940        /* notify that we are about to change rates */
 941        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
 942        if (fail_clk) {
 943                pr_warn("%s: failed to set %s rate\n", __func__,
 944                                fail_clk->name);
 945                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
 946                ret = -EBUSY;
 947                goto out;
 948        }
 949
 950        /* change the rates */
 951        clk_change_rate(top);
 952
 953        mutex_unlock(&prepare_lock);
 954
 955        return 0;
 956out:
 957        mutex_unlock(&prepare_lock);
 958
 959        return ret;
 960}
 961EXPORT_SYMBOL_GPL(clk_set_rate);
 962
 963/**
 964 * clk_get_parent - return the parent of a clk
 965 * @clk: the clk whose parent gets returned
 966 *
 967 * Simply returns clk->parent.  Returns NULL if clk is NULL.
 968 */
 969struct clk *clk_get_parent(struct clk *clk)
 970{
 971        struct clk *parent;
 972
 973        mutex_lock(&prepare_lock);
 974        parent = __clk_get_parent(clk);
 975        mutex_unlock(&prepare_lock);
 976
 977        return parent;
 978}
 979EXPORT_SYMBOL_GPL(clk_get_parent);
 980
 981/*
 982 * .get_parent is mandatory for clocks with multiple possible parents.  It is
 983 * optional for single-parent clocks.  Always call .get_parent if it is
 984 * available and WARN if it is missing for multi-parent clocks.
 985 *
 986 * For single-parent clocks without .get_parent, first check to see if the
 987 * .parents array exists, and if so use it to avoid an expensive tree
 988 * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
 989 */
 990static struct clk *__clk_init_parent(struct clk *clk)
 991{
 992        struct clk *ret = NULL;
 993        u8 index;
 994
 995        /* handle the trivial cases */
 996
 997        if (!clk->num_parents)
 998                goto out;
 999
1000        if (clk->num_parents == 1) {
1001                if (IS_ERR_OR_NULL(clk->parent))
1002                        ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1003                ret = clk->parent;
1004                goto out;
1005        }
1006
1007        if (!clk->ops->get_parent) {
1008                WARN(!clk->ops->get_parent,
1009                        "%s: multi-parent clocks must implement .get_parent\n",
1010                        __func__);
1011                goto out;
1012        };
1013
1014        /*
1015         * Do our best to cache parent clocks in clk->parents.  This prevents
1016         * unnecessary and expensive calls to __clk_lookup.  We don't set
1017         * clk->parent here; that is done by the calling function
1018         */
1019
1020        index = clk->ops->get_parent(clk->hw);
1021
1022        if (!clk->parents)
1023                clk->parents =
1024                        kzalloc((sizeof(struct clk*) * clk->num_parents),
1025                                        GFP_KERNEL);
1026
1027        if (!clk->parents)
1028                ret = __clk_lookup(clk->parent_names[index]);
1029        else if (!clk->parents[index])
1030                ret = clk->parents[index] =
1031                        __clk_lookup(clk->parent_names[index]);
1032        else
1033                ret = clk->parents[index];
1034
1035out:
1036        return ret;
1037}
1038
1039void __clk_reparent(struct clk *clk, struct clk *new_parent)
1040{
1041#ifdef CONFIG_COMMON_CLK_DEBUG
1042        struct dentry *d;
1043        struct dentry *new_parent_d;
1044#endif
1045
1046        if (!clk || !new_parent)
1047                return;
1048
1049        hlist_del(&clk->child_node);
1050
1051        if (new_parent)
1052                hlist_add_head(&clk->child_node, &new_parent->children);
1053        else
1054                hlist_add_head(&clk->child_node, &clk_orphan_list);
1055
1056#ifdef CONFIG_COMMON_CLK_DEBUG
1057        if (!inited)
1058                goto out;
1059
1060        if (new_parent)
1061                new_parent_d = new_parent->dentry;
1062        else
1063                new_parent_d = orphandir;
1064
1065        d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1066                        new_parent_d, clk->name);
1067        if (d)
1068                clk->dentry = d;
1069        else
1070                pr_debug("%s: failed to rename debugfs entry for %s\n",
1071                                __func__, clk->name);
1072out:
1073#endif
1074
1075        clk->parent = new_parent;
1076
1077        __clk_recalc_rates(clk, POST_RATE_CHANGE);
1078}
1079
1080static int __clk_set_parent(struct clk *clk, struct clk *parent)
1081{
1082        struct clk *old_parent;
1083        unsigned long flags;
1084        int ret = -EINVAL;
1085        u8 i;
1086
1087        old_parent = clk->parent;
1088
1089        if (!clk->parents)
1090                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1091                                                                GFP_KERNEL);
1092
1093        /*
1094         * find index of new parent clock using cached parent ptrs,
1095         * or if not yet cached, use string name comparison and cache
1096         * them now to avoid future calls to __clk_lookup.
1097         */
1098        for (i = 0; i < clk->num_parents; i++) {
1099                if (clk->parents && clk->parents[i] == parent)
1100                        break;
1101                else if (!strcmp(clk->parent_names[i], parent->name)) {
1102                        if (clk->parents)
1103                                clk->parents[i] = __clk_lookup(parent->name);
1104                        break;
1105                }
1106        }
1107
1108        if (i == clk->num_parents) {
1109                pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1110                                __func__, parent->name, clk->name);
1111                goto out;
1112        }
1113
1114        /* migrate prepare and enable */
1115        if (clk->prepare_count)
1116                __clk_prepare(parent);
1117
1118        /* FIXME replace with clk_is_enabled(clk) someday */
1119        spin_lock_irqsave(&enable_lock, flags);
1120        if (clk->enable_count)
1121                __clk_enable(parent);
1122        spin_unlock_irqrestore(&enable_lock, flags);
1123
1124        /* change clock input source */
1125        ret = clk->ops->set_parent(clk->hw, i);
1126
1127        /* clean up old prepare and enable */
1128        spin_lock_irqsave(&enable_lock, flags);
1129        if (clk->enable_count)
1130                __clk_disable(old_parent);
1131        spin_unlock_irqrestore(&enable_lock, flags);
1132
1133        if (clk->prepare_count)
1134                __clk_unprepare(old_parent);
1135
1136out:
1137        return ret;
1138}
1139
1140/**
1141 * clk_set_parent - switch the parent of a mux clk
1142 * @clk: the mux clk whose input we are switching
1143 * @parent: the new input to clk
1144 *
1145 * Re-parent clk to use parent as it's new input source.  If clk has the
1146 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1147 * operation to succeed.  After successfully changing clk's parent
1148 * clk_set_parent will update the clk topology, sysfs topology and
1149 * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1150 * success, -EERROR otherwise.
1151 */
1152int clk_set_parent(struct clk *clk, struct clk *parent)
1153{
1154        int ret = 0;
1155
1156        if (!clk || !clk->ops)
1157                return -EINVAL;
1158
1159        if (!clk->ops->set_parent)
1160                return -ENOSYS;
1161
1162        /* prevent racing with updates to the clock topology */
1163        mutex_lock(&prepare_lock);
1164
1165        if (clk->parent == parent)
1166                goto out;
1167
1168        /* propagate PRE_RATE_CHANGE notifications */
1169        if (clk->notifier_count)
1170                ret = __clk_speculate_rates(clk, parent->rate);
1171
1172        /* abort if a driver objects */
1173        if (ret == NOTIFY_STOP)
1174                goto out;
1175
1176        /* only re-parent if the clock is not in use */
1177        if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1178                ret = -EBUSY;
1179        else
1180                ret = __clk_set_parent(clk, parent);
1181
1182        /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1183        if (ret) {
1184                __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1185                goto out;
1186        }
1187
1188        /* propagate rate recalculation downstream */
1189        __clk_reparent(clk, parent);
1190
1191out:
1192        mutex_unlock(&prepare_lock);
1193
1194        return ret;
1195}
1196EXPORT_SYMBOL_GPL(clk_set_parent);
1197
1198/**
1199 * __clk_init - initialize the data structures in a struct clk
1200 * @dev:        device initializing this clk, placeholder for now
1201 * @clk:        clk being initialized
1202 *
1203 * Initializes the lists in struct clk, queries the hardware for the
1204 * parent and rate and sets them both.
1205 */
1206int __clk_init(struct device *dev, struct clk *clk)
1207{
1208        int i, ret = 0;
1209        struct clk *orphan;
1210        struct hlist_node *tmp, *tmp2;
1211
1212        if (!clk)
1213                return -EINVAL;
1214
1215        mutex_lock(&prepare_lock);
1216
1217        /* check to see if a clock with this name is already registered */
1218        if (__clk_lookup(clk->name)) {
1219                pr_debug("%s: clk %s already initialized\n",
1220                                __func__, clk->name);
1221                ret = -EEXIST;
1222                goto out;
1223        }
1224
1225        /* check that clk_ops are sane.  See Documentation/clk.txt */
1226        if (clk->ops->set_rate &&
1227                        !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1228                pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1229                                __func__, clk->name);
1230                ret = -EINVAL;
1231                goto out;
1232        }
1233
1234        if (clk->ops->set_parent && !clk->ops->get_parent) {
1235                pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1236                                __func__, clk->name);
1237                ret = -EINVAL;
1238                goto out;
1239        }
1240
1241        /* throw a WARN if any entries in parent_names are NULL */
1242        for (i = 0; i < clk->num_parents; i++)
1243                WARN(!clk->parent_names[i],
1244                                "%s: invalid NULL in %s's .parent_names\n",
1245                                __func__, clk->name);
1246
1247        /*
1248         * Allocate an array of struct clk *'s to avoid unnecessary string
1249         * look-ups of clk's possible parents.  This can fail for clocks passed
1250         * in to clk_init during early boot; thus any access to clk->parents[]
1251         * must always check for a NULL pointer and try to populate it if
1252         * necessary.
1253         *
1254         * If clk->parents is not NULL we skip this entire block.  This allows
1255         * for clock drivers to statically initialize clk->parents.
1256         */
1257        if (clk->num_parents > 1 && !clk->parents) {
1258                clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1259                                GFP_KERNEL);
1260                /*
1261                 * __clk_lookup returns NULL for parents that have not been
1262                 * clk_init'd; thus any access to clk->parents[] must check
1263                 * for a NULL pointer.  We can always perform lazy lookups for
1264                 * missing parents later on.
1265                 */
1266                if (clk->parents)
1267                        for (i = 0; i < clk->num_parents; i++)
1268                                clk->parents[i] =
1269                                        __clk_lookup(clk->parent_names[i]);
1270        }
1271
1272        clk->parent = __clk_init_parent(clk);
1273
1274        /*
1275         * Populate clk->parent if parent has already been __clk_init'd.  If
1276         * parent has not yet been __clk_init'd then place clk in the orphan
1277         * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1278         * clk list.
1279         *
1280         * Every time a new clk is clk_init'd then we walk the list of orphan
1281         * clocks and re-parent any that are children of the clock currently
1282         * being clk_init'd.
1283         */
1284        if (clk->parent)
1285                hlist_add_head(&clk->child_node,
1286                                &clk->parent->children);
1287        else if (clk->flags & CLK_IS_ROOT)
1288                hlist_add_head(&clk->child_node, &clk_root_list);
1289        else
1290                hlist_add_head(&clk->child_node, &clk_orphan_list);
1291
1292        /*
1293         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1294         * simple clocks and lazy developers the default fallback is to use the
1295         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1296         * then rate is set to zero.
1297         */
1298        if (clk->ops->recalc_rate)
1299                clk->rate = clk->ops->recalc_rate(clk->hw,
1300                                __clk_get_rate(clk->parent));
1301        else if (clk->parent)
1302                clk->rate = clk->parent->rate;
1303        else
1304                clk->rate = 0;
1305
1306        /*
1307         * walk the list of orphan clocks and reparent any that are children of
1308         * this clock
1309         */
1310        hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1311                if (orphan->ops->get_parent) {
1312                        i = orphan->ops->get_parent(orphan->hw);
1313                        if (!strcmp(clk->name, orphan->parent_names[i]))
1314                                __clk_reparent(orphan, clk);
1315                        continue;
1316                }
1317
1318                for (i = 0; i < orphan->num_parents; i++)
1319                        if (!strcmp(clk->name, orphan->parent_names[i])) {
1320                                __clk_reparent(orphan, clk);
1321                                break;
1322                        }
1323         }
1324
1325        /*
1326         * optional platform-specific magic
1327         *
1328         * The .init callback is not used by any of the basic clock types, but
1329         * exists for weird hardware that must perform initialization magic.
1330         * Please consider other ways of solving initialization problems before
1331         * using this callback, as it's use is discouraged.
1332         */
1333        if (clk->ops->init)
1334                clk->ops->init(clk->hw);
1335
1336        clk_debug_register(clk);
1337
1338out:
1339        mutex_unlock(&prepare_lock);
1340
1341        return ret;
1342}
1343
1344/**
1345 * __clk_register - register a clock and return a cookie.
1346 *
1347 * Same as clk_register, except that the .clk field inside hw shall point to a
1348 * preallocated (generally statically allocated) struct clk. None of the fields
1349 * of the struct clk need to be initialized.
1350 *
1351 * The data pointed to by .init and .clk field shall NOT be marked as init
1352 * data.
1353 *
1354 * __clk_register is only exposed via clk-private.h and is intended for use with
1355 * very large numbers of clocks that need to be statically initialized.  It is
1356 * a layering violation to include clk-private.h from any code which implements
1357 * a clock's .ops; as such any statically initialized clock data MUST be in a
1358 * separate C file from the logic that implements it's operations.  Returns 0
1359 * on success, otherwise an error code.
1360 */
1361struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1362{
1363        int ret;
1364        struct clk *clk;
1365
1366        clk = hw->clk;
1367        clk->name = hw->init->name;
1368        clk->ops = hw->init->ops;
1369        clk->hw = hw;
1370        clk->flags = hw->init->flags;
1371        clk->parent_names = hw->init->parent_names;
1372        clk->num_parents = hw->init->num_parents;
1373
1374        ret = __clk_init(dev, clk);
1375        if (ret)
1376                return ERR_PTR(ret);
1377
1378        return clk;
1379}
1380EXPORT_SYMBOL_GPL(__clk_register);
1381
1382static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1383{
1384        int i, ret;
1385
1386        clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1387        if (!clk->name) {
1388                pr_err("%s: could not allocate clk->name\n", __func__);
1389                ret = -ENOMEM;
1390                goto fail_name;
1391        }
1392        clk->ops = hw->init->ops;
1393        clk->hw = hw;
1394        clk->flags = hw->init->flags;
1395        clk->num_parents = hw->init->num_parents;
1396        hw->clk = clk;
1397
1398        /* allocate local copy in case parent_names is __initdata */
1399        clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1400                        GFP_KERNEL);
1401
1402        if (!clk->parent_names) {
1403                pr_err("%s: could not allocate clk->parent_names\n", __func__);
1404                ret = -ENOMEM;
1405                goto fail_parent_names;
1406        }
1407
1408
1409        /* copy each string name in case parent_names is __initdata */
1410        for (i = 0; i < clk->num_parents; i++) {
1411                clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1412                                                GFP_KERNEL);
1413                if (!clk->parent_names[i]) {
1414                        pr_err("%s: could not copy parent_names\n", __func__);
1415                        ret = -ENOMEM;
1416                        goto fail_parent_names_copy;
1417                }
1418        }
1419
1420        ret = __clk_init(dev, clk);
1421        if (!ret)
1422                return 0;
1423
1424fail_parent_names_copy:
1425        while (--i >= 0)
1426                kfree(clk->parent_names[i]);
1427        kfree(clk->parent_names);
1428fail_parent_names:
1429        kfree(clk->name);
1430fail_name:
1431        return ret;
1432}
1433
1434/**
1435 * clk_register - allocate a new clock, register it and return an opaque cookie
1436 * @dev: device that is registering this clock
1437 * @hw: link to hardware-specific clock data
1438 *
1439 * clk_register is the primary interface for populating the clock tree with new
1440 * clock nodes.  It returns a pointer to the newly allocated struct clk which
1441 * cannot be dereferenced by driver code but may be used in conjuction with the
1442 * rest of the clock API.  In the event of an error clk_register will return an
1443 * error code; drivers must test for an error code after calling clk_register.
1444 */
1445struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1446{
1447        int ret;
1448        struct clk *clk;
1449
1450        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1451        if (!clk) {
1452                pr_err("%s: could not allocate clk\n", __func__);
1453                ret = -ENOMEM;
1454                goto fail_out;
1455        }
1456
1457        ret = _clk_register(dev, hw, clk);
1458        if (!ret)
1459                return clk;
1460
1461        kfree(clk);
1462fail_out:
1463        return ERR_PTR(ret);
1464}
1465EXPORT_SYMBOL_GPL(clk_register);
1466
1467/**
1468 * clk_unregister - unregister a currently registered clock
1469 * @clk: clock to unregister
1470 *
1471 * Currently unimplemented.
1472 */
1473void clk_unregister(struct clk *clk) {}
1474EXPORT_SYMBOL_GPL(clk_unregister);
1475
1476static void devm_clk_release(struct device *dev, void *res)
1477{
1478        clk_unregister(res);
1479}
1480
1481/**
1482 * devm_clk_register - resource managed clk_register()
1483 * @dev: device that is registering this clock
1484 * @hw: link to hardware-specific clock data
1485 *
1486 * Managed clk_register(). Clocks returned from this function are
1487 * automatically clk_unregister()ed on driver detach. See clk_register() for
1488 * more information.
1489 */
1490struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1491{
1492        struct clk *clk;
1493        int ret;
1494
1495        clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1496        if (!clk)
1497                return ERR_PTR(-ENOMEM);
1498
1499        ret = _clk_register(dev, hw, clk);
1500        if (!ret) {
1501                devres_add(dev, clk);
1502        } else {
1503                devres_free(clk);
1504                clk = ERR_PTR(ret);
1505        }
1506
1507        return clk;
1508}
1509EXPORT_SYMBOL_GPL(devm_clk_register);
1510
1511static int devm_clk_match(struct device *dev, void *res, void *data)
1512{
1513        struct clk *c = res;
1514        if (WARN_ON(!c))
1515                return 0;
1516        return c == data;
1517}
1518
1519/**
1520 * devm_clk_unregister - resource managed clk_unregister()
1521 * @clk: clock to unregister
1522 *
1523 * Deallocate a clock allocated with devm_clk_register(). Normally
1524 * this function will not need to be called and the resource management
1525 * code will ensure that the resource is freed.
1526 */
1527void devm_clk_unregister(struct device *dev, struct clk *clk)
1528{
1529        WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1530}
1531EXPORT_SYMBOL_GPL(devm_clk_unregister);
1532
1533/***        clk rate change notifiers        ***/
1534
1535/**
1536 * clk_notifier_register - add a clk rate change notifier
1537 * @clk: struct clk * to watch
1538 * @nb: struct notifier_block * with callback info
1539 *
1540 * Request notification when clk's rate changes.  This uses an SRCU
1541 * notifier because we want it to block and notifier unregistrations are
1542 * uncommon.  The callbacks associated with the notifier must not
1543 * re-enter into the clk framework by calling any top-level clk APIs;
1544 * this will cause a nested prepare_lock mutex.
1545 *
1546 * Pre-change notifier callbacks will be passed the current, pre-change
1547 * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1548 * post-change rate of the clk is passed via struct
1549 * clk_notifier_data.new_rate.
1550 *
1551 * Post-change notifiers will pass the now-current, post-change rate of
1552 * the clk in both struct clk_notifier_data.old_rate and struct
1553 * clk_notifier_data.new_rate.
1554 *
1555 * Abort-change notifiers are effectively the opposite of pre-change
1556 * notifiers: the original pre-change clk rate is passed in via struct
1557 * clk_notifier_data.new_rate and the failed post-change rate is passed
1558 * in via struct clk_notifier_data.old_rate.
1559 *
1560 * clk_notifier_register() must be called from non-atomic context.
1561 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1562 * allocation failure; otherwise, passes along the return value of
1563 * srcu_notifier_chain_register().
1564 */
1565int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1566{
1567        struct clk_notifier *cn;
1568        int ret = -ENOMEM;
1569
1570        if (!clk || !nb)
1571                return -EINVAL;
1572
1573        mutex_lock(&prepare_lock);
1574
1575        /* search the list of notifiers for this clk */
1576        list_for_each_entry(cn, &clk_notifier_list, node)
1577                if (cn->clk == clk)
1578                        break;
1579
1580        /* if clk wasn't in the notifier list, allocate new clk_notifier */
1581        if (cn->clk != clk) {
1582                cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1583                if (!cn)
1584                        goto out;
1585
1586                cn->clk = clk;
1587                srcu_init_notifier_head(&cn->notifier_head);
1588
1589                list_add(&cn->node, &clk_notifier_list);
1590        }
1591
1592        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1593
1594        clk->notifier_count++;
1595
1596out:
1597        mutex_unlock(&prepare_lock);
1598
1599        return ret;
1600}
1601EXPORT_SYMBOL_GPL(clk_notifier_register);
1602
1603/**
1604 * clk_notifier_unregister - remove a clk rate change notifier
1605 * @clk: struct clk *
1606 * @nb: struct notifier_block * with callback info
1607 *
1608 * Request no further notification for changes to 'clk' and frees memory
1609 * allocated in clk_notifier_register.
1610 *
1611 * Returns -EINVAL if called with null arguments; otherwise, passes
1612 * along the return value of srcu_notifier_chain_unregister().
1613 */
1614int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1615{
1616        struct clk_notifier *cn = NULL;
1617        int ret = -EINVAL;
1618
1619        if (!clk || !nb)
1620                return -EINVAL;
1621
1622        mutex_lock(&prepare_lock);
1623
1624        list_for_each_entry(cn, &clk_notifier_list, node)
1625                if (cn->clk == clk)
1626                        break;
1627
1628        if (cn->clk == clk) {
1629                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1630
1631                clk->notifier_count--;
1632
1633                /* XXX the notifier code should handle this better */
1634                if (!cn->notifier_head.head) {
1635                        srcu_cleanup_notifier_head(&cn->notifier_head);
1636                        kfree(cn);
1637                }
1638
1639        } else {
1640                ret = -ENOENT;
1641        }
1642
1643        mutex_unlock(&prepare_lock);
1644
1645        return ret;
1646}
1647EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1648
1649#ifdef CONFIG_OF
1650/**
1651 * struct of_clk_provider - Clock provider registration structure
1652 * @link: Entry in global list of clock providers
1653 * @node: Pointer to device tree node of clock provider
1654 * @get: Get clock callback.  Returns NULL or a struct clk for the
1655 *       given clock specifier
1656 * @data: context pointer to be passed into @get callback
1657 */
1658struct of_clk_provider {
1659        struct list_head link;
1660
1661        struct device_node *node;
1662        struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1663        void *data;
1664};
1665
1666static LIST_HEAD(of_clk_providers);
1667static DEFINE_MUTEX(of_clk_lock);
1668
1669struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1670                                     void *data)
1671{
1672        return data;
1673}
1674EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1675
1676struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1677{
1678        struct clk_onecell_data *clk_data = data;
1679        unsigned int idx = clkspec->args[0];
1680
1681        if (idx >= clk_data->clk_num) {
1682                pr_err("%s: invalid clock index %d\n", __func__, idx);
1683                return ERR_PTR(-EINVAL);
1684        }
1685
1686        return clk_data->clks[idx];
1687}
1688EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1689
1690/**
1691 * of_clk_add_provider() - Register a clock provider for a node
1692 * @np: Device node pointer associated with clock provider
1693 * @clk_src_get: callback for decoding clock
1694 * @data: context pointer for @clk_src_get callback.
1695 */
1696int of_clk_add_provider(struct device_node *np,
1697                        struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1698                                                   void *data),
1699                        void *data)
1700{
1701        struct of_clk_provider *cp;
1702
1703        cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1704        if (!cp)
1705                return -ENOMEM;
1706
1707        cp->node = of_node_get(np);
1708        cp->data = data;
1709        cp->get = clk_src_get;
1710
1711        mutex_lock(&of_clk_lock);
1712        list_add(&cp->link, &of_clk_providers);
1713        mutex_unlock(&of_clk_lock);
1714        pr_debug("Added clock from %s\n", np->full_name);
1715
1716        return 0;
1717}
1718EXPORT_SYMBOL_GPL(of_clk_add_provider);
1719
1720/**
1721 * of_clk_del_provider() - Remove a previously registered clock provider
1722 * @np: Device node pointer associated with clock provider
1723 */
1724void of_clk_del_provider(struct device_node *np)
1725{
1726        struct of_clk_provider *cp;
1727
1728        mutex_lock(&of_clk_lock);
1729        list_for_each_entry(cp, &of_clk_providers, link) {
1730                if (cp->node == np) {
1731                        list_del(&cp->link);
1732                        of_node_put(cp->node);
1733                        kfree(cp);
1734                        break;
1735                }
1736        }
1737        mutex_unlock(&of_clk_lock);
1738}
1739EXPORT_SYMBOL_GPL(of_clk_del_provider);
1740
1741struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1742{
1743        struct of_clk_provider *provider;
1744        struct clk *clk = ERR_PTR(-ENOENT);
1745
1746        /* Check if we have such a provider in our array */
1747        mutex_lock(&of_clk_lock);
1748        list_for_each_entry(provider, &of_clk_providers, link) {
1749                if (provider->node == clkspec->np)
1750                        clk = provider->get(clkspec, provider->data);
1751                if (!IS_ERR(clk))
1752                        break;
1753        }
1754        mutex_unlock(&of_clk_lock);
1755
1756        return clk;
1757}
1758
1759const char *of_clk_get_parent_name(struct device_node *np, int index)
1760{
1761        struct of_phandle_args clkspec;
1762        const char *clk_name;
1763        int rc;
1764
1765        if (index < 0)
1766                return NULL;
1767
1768        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1769                                        &clkspec);
1770        if (rc)
1771                return NULL;
1772
1773        if (of_property_read_string_index(clkspec.np, "clock-output-names",
1774                                          clkspec.args_count ? clkspec.args[0] : 0,
1775                                          &clk_name) < 0)
1776                clk_name = clkspec.np->name;
1777
1778        of_node_put(clkspec.np);
1779        return clk_name;
1780}
1781EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1782
1783/**
1784 * of_clk_init() - Scan and init clock providers from the DT
1785 * @matches: array of compatible values and init functions for providers.
1786 *
1787 * This function scans the device tree for matching clock providers and
1788 * calls their initialization functions
1789 */
1790void __init of_clk_init(const struct of_device_id *matches)
1791{
1792        struct device_node *np;
1793
1794        for_each_matching_node(np, matches) {
1795                const struct of_device_id *match = of_match_node(matches, np);
1796                of_clk_init_cb_t clk_init_cb = match->data;
1797                clk_init_cb(np);
1798        }
1799}
1800#endif
1801
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.