1
2
3
4
5
6
7
8
9#include <linux/clk.h>
10#include <linux/clk-provider.h>
11#include <linux/clk/clk-conf.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/spinlock.h>
15#include <linux/err.h>
16#include <linux/list.h>
17#include <linux/slab.h>
18#include <linux/of.h>
19#include <linux/device.h>
20#include <linux/init.h>
21#include <linux/pm_runtime.h>
22#include <linux/sched.h>
23#include <linux/clkdev.h>
24
25#include "clk.h"
26
27static DEFINE_SPINLOCK(enable_lock);
28static DEFINE_MUTEX(prepare_lock);
29
30static struct task_struct *prepare_owner;
31static struct task_struct *enable_owner;
32
33static int prepare_refcnt;
34static int enable_refcnt;
35
36static HLIST_HEAD(clk_root_list);
37static HLIST_HEAD(clk_orphan_list);
38static LIST_HEAD(clk_notifier_list);
39
40static struct hlist_head *all_lists[] = {
41 &clk_root_list,
42 &clk_orphan_list,
43 NULL,
44};
45
46
47
48struct clk_parent_map {
49 const struct clk_hw *hw;
50 struct clk_core *core;
51 const char *fw_name;
52 const char *name;
53 int index;
54};
55
56struct clk_core {
57 const char *name;
58 const struct clk_ops *ops;
59 struct clk_hw *hw;
60 struct module *owner;
61 struct device *dev;
62 struct device_node *of_node;
63 struct clk_core *parent;
64 struct clk_parent_map *parents;
65 u8 num_parents;
66 u8 new_parent_index;
67 unsigned long rate;
68 unsigned long req_rate;
69 unsigned long new_rate;
70 struct clk_core *new_parent;
71 struct clk_core *new_child;
72 unsigned long flags;
73 bool orphan;
74 bool rpm_enabled;
75 unsigned int enable_count;
76 unsigned int prepare_count;
77 unsigned int protect_count;
78 unsigned long min_rate;
79 unsigned long max_rate;
80 unsigned long accuracy;
81 int phase;
82 struct clk_duty duty;
83 struct hlist_head children;
84 struct hlist_node child_node;
85 struct hlist_head clks;
86 unsigned int notifier_count;
87#ifdef CONFIG_DEBUG_FS
88 struct dentry *dentry;
89 struct hlist_node debug_node;
90#endif
91 struct kref ref;
92};
93
94#define CREATE_TRACE_POINTS
95#include <trace/events/clk.h>
96
97struct clk {
98 struct clk_core *core;
99 struct device *dev;
100 const char *dev_id;
101 const char *con_id;
102 unsigned long min_rate;
103 unsigned long max_rate;
104 unsigned int exclusive_count;
105 struct hlist_node clks_node;
106};
107
108
109static int clk_pm_runtime_get(struct clk_core *core)
110{
111 int ret;
112
113 if (!core->rpm_enabled)
114 return 0;
115
116 ret = pm_runtime_get_sync(core->dev);
117 if (ret < 0) {
118 pm_runtime_put_noidle(core->dev);
119 return ret;
120 }
121 return 0;
122}
123
124static void clk_pm_runtime_put(struct clk_core *core)
125{
126 if (!core->rpm_enabled)
127 return;
128
129 pm_runtime_put_sync(core->dev);
130}
131
132
133static void clk_prepare_lock(void)
134{
135 if (!mutex_trylock(&prepare_lock)) {
136 if (prepare_owner == current) {
137 prepare_refcnt++;
138 return;
139 }
140 mutex_lock(&prepare_lock);
141 }
142 WARN_ON_ONCE(prepare_owner != NULL);
143 WARN_ON_ONCE(prepare_refcnt != 0);
144 prepare_owner = current;
145 prepare_refcnt = 1;
146}
147
148static void clk_prepare_unlock(void)
149{
150 WARN_ON_ONCE(prepare_owner != current);
151 WARN_ON_ONCE(prepare_refcnt == 0);
152
153 if (--prepare_refcnt)
154 return;
155 prepare_owner = NULL;
156 mutex_unlock(&prepare_lock);
157}
158
159static unsigned long clk_enable_lock(void)
160 __acquires(enable_lock)
161{
162 unsigned long flags;
163
164
165
166
167
168
169 if (!IS_ENABLED(CONFIG_SMP) ||
170 !spin_trylock_irqsave(&enable_lock, flags)) {
171 if (enable_owner == current) {
172 enable_refcnt++;
173 __acquire(enable_lock);
174 if (!IS_ENABLED(CONFIG_SMP))
175 local_save_flags(flags);
176 return flags;
177 }
178 spin_lock_irqsave(&enable_lock, flags);
179 }
180 WARN_ON_ONCE(enable_owner != NULL);
181 WARN_ON_ONCE(enable_refcnt != 0);
182 enable_owner = current;
183 enable_refcnt = 1;
184 return flags;
185}
186
187static void clk_enable_unlock(unsigned long flags)
188 __releases(enable_lock)
189{
190 WARN_ON_ONCE(enable_owner != current);
191 WARN_ON_ONCE(enable_refcnt == 0);
192
193 if (--enable_refcnt) {
194 __release(enable_lock);
195 return;
196 }
197 enable_owner = NULL;
198 spin_unlock_irqrestore(&enable_lock, flags);
199}
200
201static bool clk_core_rate_is_protected(struct clk_core *core)
202{
203 return core->protect_count;
204}
205
206static bool clk_core_is_prepared(struct clk_core *core)
207{
208 bool ret = false;
209
210
211
212
213
214 if (!core->ops->is_prepared)
215 return core->prepare_count;
216
217 if (!clk_pm_runtime_get(core)) {
218 ret = core->ops->is_prepared(core->hw);
219 clk_pm_runtime_put(core);
220 }
221
222 return ret;
223}
224
225static bool clk_core_is_enabled(struct clk_core *core)
226{
227 bool ret = false;
228
229
230
231
232
233 if (!core->ops->is_enabled)
234 return core->enable_count;
235
236
237
238
239
240
241
242
243
244
245
246 if (core->rpm_enabled) {
247 pm_runtime_get_noresume(core->dev);
248 if (!pm_runtime_active(core->dev)) {
249 ret = false;
250 goto done;
251 }
252 }
253
254 ret = core->ops->is_enabled(core->hw);
255done:
256 if (core->rpm_enabled)
257 pm_runtime_put(core->dev);
258
259 return ret;
260}
261
262
263
264const char *__clk_get_name(const struct clk *clk)
265{
266 return !clk ? NULL : clk->core->name;
267}
268EXPORT_SYMBOL_GPL(__clk_get_name);
269
270const char *clk_hw_get_name(const struct clk_hw *hw)
271{
272 return hw->core->name;
273}
274EXPORT_SYMBOL_GPL(clk_hw_get_name);
275
276struct clk_hw *__clk_get_hw(struct clk *clk)
277{
278 return !clk ? NULL : clk->core->hw;
279}
280EXPORT_SYMBOL_GPL(__clk_get_hw);
281
282unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
283{
284 return hw->core->num_parents;
285}
286EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
287
288struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
289{
290 return hw->core->parent ? hw->core->parent->hw : NULL;
291}
292EXPORT_SYMBOL_GPL(clk_hw_get_parent);
293
294static struct clk_core *__clk_lookup_subtree(const char *name,
295 struct clk_core *core)
296{
297 struct clk_core *child;
298 struct clk_core *ret;
299
300 if (!strcmp(core->name, name))
301 return core;
302
303 hlist_for_each_entry(child, &core->children, child_node) {
304 ret = __clk_lookup_subtree(name, child);
305 if (ret)
306 return ret;
307 }
308
309 return NULL;
310}
311
312static struct clk_core *clk_core_lookup(const char *name)
313{
314 struct clk_core *root_clk;
315 struct clk_core *ret;
316
317 if (!name)
318 return NULL;
319
320
321 hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
322 ret = __clk_lookup_subtree(name, root_clk);
323 if (ret)
324 return ret;
325 }
326
327
328 hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
329 ret = __clk_lookup_subtree(name, root_clk);
330 if (ret)
331 return ret;
332 }
333
334 return NULL;
335}
336
337#ifdef CONFIG_OF
338static int of_parse_clkspec(const struct device_node *np, int index,
339 const char *name, struct of_phandle_args *out_args);
340static struct clk_hw *
341of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
342#else
343static inline int of_parse_clkspec(const struct device_node *np, int index,
344 const char *name,
345 struct of_phandle_args *out_args)
346{
347 return -ENOENT;
348}
349static inline struct clk_hw *
350of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
351{
352 return ERR_PTR(-ENOENT);
353}
354#endif
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
393{
394 const char *name = core->parents[p_index].fw_name;
395 int index = core->parents[p_index].index;
396 struct clk_hw *hw = ERR_PTR(-ENOENT);
397 struct device *dev = core->dev;
398 const char *dev_id = dev ? dev_name(dev) : NULL;
399 struct device_node *np = core->of_node;
400 struct of_phandle_args clkspec;
401
402 if (np && (name || index >= 0) &&
403 !of_parse_clkspec(np, index, name, &clkspec)) {
404 hw = of_clk_get_hw_from_clkspec(&clkspec);
405 of_node_put(clkspec.np);
406 } else if (name) {
407
408
409
410
411 hw = clk_find_hw(dev_id, name);
412 }
413
414 if (IS_ERR(hw))
415 return ERR_CAST(hw);
416
417 return hw->core;
418}
419
420static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
421{
422 struct clk_parent_map *entry = &core->parents[index];
423 struct clk_core *parent;
424
425 if (entry->hw) {
426 parent = entry->hw->core;
427
428
429
430
431
432 if (!parent)
433 parent = ERR_PTR(-EPROBE_DEFER);
434 } else {
435 parent = clk_core_get(core, index);
436 if (PTR_ERR(parent) == -ENOENT && entry->name)
437 parent = clk_core_lookup(entry->name);
438 }
439
440
441 if (!IS_ERR(parent))
442 entry->core = parent;
443}
444
445static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
446 u8 index)
447{
448 if (!core || index >= core->num_parents || !core->parents)
449 return NULL;
450
451 if (!core->parents[index].core)
452 clk_core_fill_parent_index(core, index);
453
454 return core->parents[index].core;
455}
456
457struct clk_hw *
458clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
459{
460 struct clk_core *parent;
461
462 parent = clk_core_get_parent_by_index(hw->core, index);
463
464 return !parent ? NULL : parent->hw;
465}
466EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
467
468unsigned int __clk_get_enable_count(struct clk *clk)
469{
470 return !clk ? 0 : clk->core->enable_count;
471}
472
473static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
474{
475 if (!core)
476 return 0;
477
478 if (!core->num_parents || core->parent)
479 return core->rate;
480
481
482
483
484
485
486 return 0;
487}
488
489unsigned long clk_hw_get_rate(const struct clk_hw *hw)
490{
491 return clk_core_get_rate_nolock(hw->core);
492}
493EXPORT_SYMBOL_GPL(clk_hw_get_rate);
494
495static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
496{
497 if (!core)
498 return 0;
499
500 return core->accuracy;
501}
502
503unsigned long clk_hw_get_flags(const struct clk_hw *hw)
504{
505 return hw->core->flags;
506}
507EXPORT_SYMBOL_GPL(clk_hw_get_flags);
508
509bool clk_hw_is_prepared(const struct clk_hw *hw)
510{
511 return clk_core_is_prepared(hw->core);
512}
513EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
514
515bool clk_hw_rate_is_protected(const struct clk_hw *hw)
516{
517 return clk_core_rate_is_protected(hw->core);
518}
519EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
520
521bool clk_hw_is_enabled(const struct clk_hw *hw)
522{
523 return clk_core_is_enabled(hw->core);
524}
525EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
526
527bool __clk_is_enabled(struct clk *clk)
528{
529 if (!clk)
530 return false;
531
532 return clk_core_is_enabled(clk->core);
533}
534EXPORT_SYMBOL_GPL(__clk_is_enabled);
535
536static bool mux_is_better_rate(unsigned long rate, unsigned long now,
537 unsigned long best, unsigned long flags)
538{
539 if (flags & CLK_MUX_ROUND_CLOSEST)
540 return abs(now - rate) < abs(best - rate);
541
542 return now <= rate && now > best;
543}
544
545int clk_mux_determine_rate_flags(struct clk_hw *hw,
546 struct clk_rate_request *req,
547 unsigned long flags)
548{
549 struct clk_core *core = hw->core, *parent, *best_parent = NULL;
550 int i, num_parents, ret;
551 unsigned long best = 0;
552 struct clk_rate_request parent_req = *req;
553
554
555 if (core->flags & CLK_SET_RATE_NO_REPARENT) {
556 parent = core->parent;
557 if (core->flags & CLK_SET_RATE_PARENT) {
558 ret = __clk_determine_rate(parent ? parent->hw : NULL,
559 &parent_req);
560 if (ret)
561 return ret;
562
563 best = parent_req.rate;
564 } else if (parent) {
565 best = clk_core_get_rate_nolock(parent);
566 } else {
567 best = clk_core_get_rate_nolock(core);
568 }
569
570 goto out;
571 }
572
573
574 num_parents = core->num_parents;
575 for (i = 0; i < num_parents; i++) {
576 parent = clk_core_get_parent_by_index(core, i);
577 if (!parent)
578 continue;
579
580 if (core->flags & CLK_SET_RATE_PARENT) {
581 parent_req = *req;
582 ret = __clk_determine_rate(parent->hw, &parent_req);
583 if (ret)
584 continue;
585 } else {
586 parent_req.rate = clk_core_get_rate_nolock(parent);
587 }
588
589 if (mux_is_better_rate(req->rate, parent_req.rate,
590 best, flags)) {
591 best_parent = parent;
592 best = parent_req.rate;
593 }
594 }
595
596 if (!best_parent)
597 return -EINVAL;
598
599out:
600 if (best_parent)
601 req->best_parent_hw = best_parent->hw;
602 req->best_parent_rate = best;
603 req->rate = best;
604
605 return 0;
606}
607EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
608
609struct clk *__clk_lookup(const char *name)
610{
611 struct clk_core *core = clk_core_lookup(name);
612
613 return !core ? NULL : core->hw->clk;
614}
615
616static void clk_core_get_boundaries(struct clk_core *core,
617 unsigned long *min_rate,
618 unsigned long *max_rate)
619{
620 struct clk *clk_user;
621
622 lockdep_assert_held(&prepare_lock);
623
624 *min_rate = core->min_rate;
625 *max_rate = core->max_rate;
626
627 hlist_for_each_entry(clk_user, &core->clks, clks_node)
628 *min_rate = max(*min_rate, clk_user->min_rate);
629
630 hlist_for_each_entry(clk_user, &core->clks, clks_node)
631 *max_rate = min(*max_rate, clk_user->max_rate);
632}
633
634void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
635 unsigned long max_rate)
636{
637 hw->core->min_rate = min_rate;
638 hw->core->max_rate = max_rate;
639}
640EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
641
642
643
644
645
646
647
648
649
650
651
652
653int __clk_mux_determine_rate(struct clk_hw *hw,
654 struct clk_rate_request *req)
655{
656 return clk_mux_determine_rate_flags(hw, req, 0);
657}
658EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
659
660int __clk_mux_determine_rate_closest(struct clk_hw *hw,
661 struct clk_rate_request *req)
662{
663 return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
664}
665EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
666
667
668
669static void clk_core_rate_unprotect(struct clk_core *core)
670{
671 lockdep_assert_held(&prepare_lock);
672
673 if (!core)
674 return;
675
676 if (WARN(core->protect_count == 0,
677 "%s already unprotected\n", core->name))
678 return;
679
680 if (--core->protect_count > 0)
681 return;
682
683 clk_core_rate_unprotect(core->parent);
684}
685
686static int clk_core_rate_nuke_protect(struct clk_core *core)
687{
688 int ret;
689
690 lockdep_assert_held(&prepare_lock);
691
692 if (!core)
693 return -EINVAL;
694
695 if (core->protect_count == 0)
696 return 0;
697
698 ret = core->protect_count;
699 core->protect_count = 1;
700 clk_core_rate_unprotect(core);
701
702 return ret;
703}
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723void clk_rate_exclusive_put(struct clk *clk)
724{
725 if (!clk)
726 return;
727
728 clk_prepare_lock();
729
730
731
732
733
734 if (WARN_ON(clk->exclusive_count <= 0))
735 goto out;
736
737 clk_core_rate_unprotect(clk->core);
738 clk->exclusive_count--;
739out:
740 clk_prepare_unlock();
741}
742EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
743
744static void clk_core_rate_protect(struct clk_core *core)
745{
746 lockdep_assert_held(&prepare_lock);
747
748 if (!core)
749 return;
750
751 if (core->protect_count == 0)
752 clk_core_rate_protect(core->parent);
753
754 core->protect_count++;
755}
756
757static void clk_core_rate_restore_protect(struct clk_core *core, int count)
758{
759 lockdep_assert_held(&prepare_lock);
760
761 if (!core)
762 return;
763
764 if (count == 0)
765 return;
766
767 clk_core_rate_protect(core);
768 core->protect_count = count;
769}
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789int clk_rate_exclusive_get(struct clk *clk)
790{
791 if (!clk)
792 return 0;
793
794 clk_prepare_lock();
795 clk_core_rate_protect(clk->core);
796 clk->exclusive_count++;
797 clk_prepare_unlock();
798
799 return 0;
800}
801EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
802
803static void clk_core_unprepare(struct clk_core *core)
804{
805 lockdep_assert_held(&prepare_lock);
806
807 if (!core)
808 return;
809
810 if (WARN(core->prepare_count == 0,
811 "%s already unprepared\n", core->name))
812 return;
813
814 if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
815 "Unpreparing critical %s\n", core->name))
816 return;
817
818 if (core->flags & CLK_SET_RATE_GATE)
819 clk_core_rate_unprotect(core);
820
821 if (--core->prepare_count > 0)
822 return;
823
824 WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
825
826 trace_clk_unprepare(core);
827
828 if (core->ops->unprepare)
829 core->ops->unprepare(core->hw);
830
831 clk_pm_runtime_put(core);
832
833 trace_clk_unprepare_complete(core);
834 clk_core_unprepare(core->parent);
835}
836
837static void clk_core_unprepare_lock(struct clk_core *core)
838{
839 clk_prepare_lock();
840 clk_core_unprepare(core);
841 clk_prepare_unlock();
842}
843
844
845
846
847
848
849
850
851
852
853
854
855void clk_unprepare(struct clk *clk)
856{
857 if (IS_ERR_OR_NULL(clk))
858 return;
859
860 clk_core_unprepare_lock(clk->core);
861}
862EXPORT_SYMBOL_GPL(clk_unprepare);
863
864static int clk_core_prepare(struct clk_core *core)
865{
866 int ret = 0;
867
868 lockdep_assert_held(&prepare_lock);
869
870 if (!core)
871 return 0;
872
873 if (core->prepare_count == 0) {
874 ret = clk_pm_runtime_get(core);
875 if (ret)
876 return ret;
877
878 ret = clk_core_prepare(core->parent);
879 if (ret)
880 goto runtime_put;
881
882 trace_clk_prepare(core);
883
884 if (core->ops->prepare)
885 ret = core->ops->prepare(core->hw);
886
887 trace_clk_prepare_complete(core);
888
889 if (ret)
890 goto unprepare;
891 }
892
893 core->prepare_count++;
894
895
896
897
898
899
900
901
902 if (core->flags & CLK_SET_RATE_GATE)
903 clk_core_rate_protect(core);
904
905 return 0;
906unprepare:
907 clk_core_unprepare(core->parent);
908runtime_put:
909 clk_pm_runtime_put(core);
910 return ret;
911}
912
913static int clk_core_prepare_lock(struct clk_core *core)
914{
915 int ret;
916
917 clk_prepare_lock();
918 ret = clk_core_prepare(core);
919 clk_prepare_unlock();
920
921 return ret;
922}
923
924
925
926
927
928
929
930
931
932
933
934
935
936int clk_prepare(struct clk *clk)
937{
938 if (!clk)
939 return 0;
940
941 return clk_core_prepare_lock(clk->core);
942}
943EXPORT_SYMBOL_GPL(clk_prepare);
944
945static void clk_core_disable(struct clk_core *core)
946{
947 lockdep_assert_held(&enable_lock);
948
949 if (!core)
950 return;
951
952 if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
953 return;
954
955 if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
956 "Disabling critical %s\n", core->name))
957 return;
958
959 if (--core->enable_count > 0)
960 return;
961
962 trace_clk_disable_rcuidle(core);
963
964 if (core->ops->disable)
965 core->ops->disable(core->hw);
966
967 trace_clk_disable_complete_rcuidle(core);
968
969 clk_core_disable(core->parent);
970}
971
972static void clk_core_disable_lock(struct clk_core *core)
973{
974 unsigned long flags;
975
976 flags = clk_enable_lock();
977 clk_core_disable(core);
978 clk_enable_unlock(flags);
979}
980
981
982
983
984
985
986
987
988
989
990
991
992
993void clk_disable(struct clk *clk)
994{
995 if (IS_ERR_OR_NULL(clk))
996 return;
997
998 clk_core_disable_lock(clk->core);
999}
1000EXPORT_SYMBOL_GPL(clk_disable);
1001
1002static int clk_core_enable(struct clk_core *core)
1003{
1004 int ret = 0;
1005
1006 lockdep_assert_held(&enable_lock);
1007
1008 if (!core)
1009 return 0;
1010
1011 if (WARN(core->prepare_count == 0,
1012 "Enabling unprepared %s\n", core->name))
1013 return -ESHUTDOWN;
1014
1015 if (core->enable_count == 0) {
1016 ret = clk_core_enable(core->parent);
1017
1018 if (ret)
1019 return ret;
1020
1021 trace_clk_enable_rcuidle(core);
1022
1023 if (core->ops->enable)
1024 ret = core->ops->enable(core->hw);
1025
1026 trace_clk_enable_complete_rcuidle(core);
1027
1028 if (ret) {
1029 clk_core_disable(core->parent);
1030 return ret;
1031 }
1032 }
1033
1034 core->enable_count++;
1035 return 0;
1036}
1037
1038static int clk_core_enable_lock(struct clk_core *core)
1039{
1040 unsigned long flags;
1041 int ret;
1042
1043 flags = clk_enable_lock();
1044 ret = clk_core_enable(core);
1045 clk_enable_unlock(flags);
1046
1047 return ret;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060void clk_gate_restore_context(struct clk_hw *hw)
1061{
1062 struct clk_core *core = hw->core;
1063
1064 if (core->enable_count)
1065 core->ops->enable(hw);
1066 else
1067 core->ops->disable(hw);
1068}
1069EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1070
1071static int clk_core_save_context(struct clk_core *core)
1072{
1073 struct clk_core *child;
1074 int ret = 0;
1075
1076 hlist_for_each_entry(child, &core->children, child_node) {
1077 ret = clk_core_save_context(child);
1078 if (ret < 0)
1079 return ret;
1080 }
1081
1082 if (core->ops && core->ops->save_context)
1083 ret = core->ops->save_context(core->hw);
1084
1085 return ret;
1086}
1087
1088static void clk_core_restore_context(struct clk_core *core)
1089{
1090 struct clk_core *child;
1091
1092 if (core->ops && core->ops->restore_context)
1093 core->ops->restore_context(core->hw);
1094
1095 hlist_for_each_entry(child, &core->children, child_node)
1096 clk_core_restore_context(child);
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106int clk_save_context(void)
1107{
1108 struct clk_core *clk;
1109 int ret;
1110
1111 hlist_for_each_entry(clk, &clk_root_list, child_node) {
1112 ret = clk_core_save_context(clk);
1113 if (ret < 0)
1114 return ret;
1115 }
1116
1117 hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1118 ret = clk_core_save_context(clk);
1119 if (ret < 0)
1120 return ret;
1121 }
1122
1123 return 0;
1124}
1125EXPORT_SYMBOL_GPL(clk_save_context);
1126
1127
1128
1129
1130
1131
1132
1133void clk_restore_context(void)
1134{
1135 struct clk_core *core;
1136
1137 hlist_for_each_entry(core, &clk_root_list, child_node)
1138 clk_core_restore_context(core);
1139
1140 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1141 clk_core_restore_context(core);
1142}
1143EXPORT_SYMBOL_GPL(clk_restore_context);
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158int clk_enable(struct clk *clk)
1159{
1160 if (!clk)
1161 return 0;
1162
1163 return clk_core_enable_lock(clk->core);
1164}
1165EXPORT_SYMBOL_GPL(clk_enable);
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182bool clk_is_enabled_when_prepared(struct clk *clk)
1183{
1184 return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1185}
1186EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
1187
1188static int clk_core_prepare_enable(struct clk_core *core)
1189{
1190 int ret;
1191
1192 ret = clk_core_prepare_lock(core);
1193 if (ret)
1194 return ret;
1195
1196 ret = clk_core_enable_lock(core);
1197 if (ret)
1198 clk_core_unprepare_lock(core);
1199
1200 return ret;
1201}
1202
1203static void clk_core_disable_unprepare(struct clk_core *core)
1204{
1205 clk_core_disable_lock(core);
1206 clk_core_unprepare_lock(core);
1207}
1208
1209static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1210{
1211 struct clk_core *child;
1212
1213 lockdep_assert_held(&prepare_lock);
1214
1215 hlist_for_each_entry(child, &core->children, child_node)
1216 clk_unprepare_unused_subtree(child);
1217
1218 if (core->prepare_count)
1219 return;
1220
1221 if (core->flags & CLK_IGNORE_UNUSED)
1222 return;
1223
1224 if (clk_pm_runtime_get(core))
1225 return;
1226
1227 if (clk_core_is_prepared(core)) {
1228 trace_clk_unprepare(core);
1229 if (core->ops->unprepare_unused)
1230 core->ops->unprepare_unused(core->hw);
1231 else if (core->ops->unprepare)
1232 core->ops->unprepare(core->hw);
1233 trace_clk_unprepare_complete(core);
1234 }
1235
1236 clk_pm_runtime_put(core);
1237}
1238
1239static void __init clk_disable_unused_subtree(struct clk_core *core)
1240{
1241 struct clk_core *child;
1242 unsigned long flags;
1243
1244 lockdep_assert_held(&prepare_lock);
1245
1246 hlist_for_each_entry(child, &core->children, child_node)
1247 clk_disable_unused_subtree(child);
1248
1249 if (core->flags & CLK_OPS_PARENT_ENABLE)
1250 clk_core_prepare_enable(core->parent);
1251
1252 if (clk_pm_runtime_get(core))
1253 goto unprepare_out;
1254
1255 flags = clk_enable_lock();
1256
1257 if (core->enable_count)
1258 goto unlock_out;
1259
1260 if (core->flags & CLK_IGNORE_UNUSED)
1261 goto unlock_out;
1262
1263
1264
1265
1266
1267
1268 if (clk_core_is_enabled(core)) {
1269 trace_clk_disable(core);
1270 if (core->ops->disable_unused)
1271 core->ops->disable_unused(core->hw);
1272 else if (core->ops->disable)
1273 core->ops->disable(core->hw);
1274 trace_clk_disable_complete(core);
1275 }
1276
1277unlock_out:
1278 clk_enable_unlock(flags);
1279 clk_pm_runtime_put(core);
1280unprepare_out:
1281 if (core->flags & CLK_OPS_PARENT_ENABLE)
1282 clk_core_disable_unprepare(core->parent);
1283}
1284
1285static bool clk_ignore_unused __initdata;
1286static int __init clk_ignore_unused_setup(char *__unused)
1287{
1288 clk_ignore_unused = true;
1289 return 1;
1290}
1291__setup("clk_ignore_unused", clk_ignore_unused_setup);
1292
1293static int __init clk_disable_unused(void)
1294{
1295 struct clk_core *core;
1296
1297 if (clk_ignore_unused) {
1298 pr_warn("clk: Not disabling unused clocks\n");
1299 return 0;
1300 }
1301
1302 clk_prepare_lock();
1303
1304 hlist_for_each_entry(core, &clk_root_list, child_node)
1305 clk_disable_unused_subtree(core);
1306
1307 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1308 clk_disable_unused_subtree(core);
1309
1310 hlist_for_each_entry(core, &clk_root_list, child_node)
1311 clk_unprepare_unused_subtree(core);
1312
1313 hlist_for_each_entry(core, &clk_orphan_list, child_node)
1314 clk_unprepare_unused_subtree(core);
1315
1316 clk_prepare_unlock();
1317
1318 return 0;
1319}
1320late_initcall_sync(clk_disable_unused);
1321
1322static int clk_core_determine_round_nolock(struct clk_core *core,
1323 struct clk_rate_request *req)
1324{
1325 long rate;
1326
1327 lockdep_assert_held(&prepare_lock);
1328
1329 if (!core)
1330 return 0;
1331
1332
1333
1334
1335
1336
1337
1338 if (clk_core_rate_is_protected(core)) {
1339 req->rate = core->rate;
1340 } else if (core->ops->determine_rate) {
1341 return core->ops->determine_rate(core->hw, req);
1342 } else if (core->ops->round_rate) {
1343 rate = core->ops->round_rate(core->hw, req->rate,
1344 &req->best_parent_rate);
1345 if (rate < 0)
1346 return rate;
1347
1348 req->rate = rate;
1349 } else {
1350 return -EINVAL;
1351 }
1352
1353 return 0;
1354}
1355
1356static void clk_core_init_rate_req(struct clk_core * const core,
1357 struct clk_rate_request *req)
1358{
1359 struct clk_core *parent;
1360
1361 if (WARN_ON(!core || !req))
1362 return;
1363
1364 parent = core->parent;
1365 if (parent) {
1366 req->best_parent_hw = parent->hw;
1367 req->best_parent_rate = parent->rate;
1368 } else {
1369 req->best_parent_hw = NULL;
1370 req->best_parent_rate = 0;
1371 }
1372}
1373
1374static bool clk_core_can_round(struct clk_core * const core)
1375{
1376 return core->ops->determine_rate || core->ops->round_rate;
1377}
1378
1379static int clk_core_round_rate_nolock(struct clk_core *core,
1380 struct clk_rate_request *req)
1381{
1382 lockdep_assert_held(&prepare_lock);
1383
1384 if (!core) {
1385 req->rate = 0;
1386 return 0;
1387 }
1388
1389 clk_core_init_rate_req(core, req);
1390
1391 if (clk_core_can_round(core))
1392 return clk_core_determine_round_nolock(core, req);
1393 else if (core->flags & CLK_SET_RATE_PARENT)
1394 return clk_core_round_rate_nolock(core->parent, req);
1395
1396 req->rate = core->rate;
1397 return 0;
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1408{
1409 if (!hw) {
1410 req->rate = 0;
1411 return 0;
1412 }
1413
1414 return clk_core_round_rate_nolock(hw->core, req);
1415}
1416EXPORT_SYMBOL_GPL(__clk_determine_rate);
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1434{
1435 int ret;
1436 struct clk_rate_request req;
1437
1438 clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1439 req.rate = rate;
1440
1441 ret = clk_core_round_rate_nolock(hw->core, &req);
1442 if (ret)
1443 return 0;
1444
1445 return req.rate;
1446}
1447EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458long clk_round_rate(struct clk *clk, unsigned long rate)
1459{
1460 struct clk_rate_request req;
1461 int ret;
1462
1463 if (!clk)
1464 return 0;
1465
1466 clk_prepare_lock();
1467
1468 if (clk->exclusive_count)
1469 clk_core_rate_unprotect(clk->core);
1470
1471 clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1472 req.rate = rate;
1473
1474 ret = clk_core_round_rate_nolock(clk->core, &req);
1475
1476 if (clk->exclusive_count)
1477 clk_core_rate_protect(clk->core);
1478
1479 clk_prepare_unlock();
1480
1481 if (ret)
1482 return ret;
1483
1484 return req.rate;
1485}
1486EXPORT_SYMBOL_GPL(clk_round_rate);
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static int __clk_notify(struct clk_core *core, unsigned long msg,
1503 unsigned long old_rate, unsigned long new_rate)
1504{
1505 struct clk_notifier *cn;
1506 struct clk_notifier_data cnd;
1507 int ret = NOTIFY_DONE;
1508
1509 cnd.old_rate = old_rate;
1510 cnd.new_rate = new_rate;
1511
1512 list_for_each_entry(cn, &clk_notifier_list, node) {
1513 if (cn->clk->core == core) {
1514 cnd.clk = cn->clk;
1515 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1516 &cnd);
1517 if (ret & NOTIFY_STOP_MASK)
1518 return ret;
1519 }
1520 }
1521
1522 return ret;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static void __clk_recalc_accuracies(struct clk_core *core)
1535{
1536 unsigned long parent_accuracy = 0;
1537 struct clk_core *child;
1538
1539 lockdep_assert_held(&prepare_lock);
1540
1541 if (core->parent)
1542 parent_accuracy = core->parent->accuracy;
1543
1544 if (core->ops->recalc_accuracy)
1545 core->accuracy = core->ops->recalc_accuracy(core->hw,
1546 parent_accuracy);
1547 else
1548 core->accuracy = parent_accuracy;
1549
1550 hlist_for_each_entry(child, &core->children, child_node)
1551 __clk_recalc_accuracies(child);
1552}
1553
1554static long clk_core_get_accuracy_recalc(struct clk_core *core)
1555{
1556 if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1557 __clk_recalc_accuracies(core);
1558
1559 return clk_core_get_accuracy_no_lock(core);
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571long clk_get_accuracy(struct clk *clk)
1572{
1573 long accuracy;
1574
1575 if (!clk)
1576 return 0;
1577
1578 clk_prepare_lock();
1579 accuracy = clk_core_get_accuracy_recalc(clk->core);
1580 clk_prepare_unlock();
1581
1582 return accuracy;
1583}
1584EXPORT_SYMBOL_GPL(clk_get_accuracy);
1585
1586static unsigned long clk_recalc(struct clk_core *core,
1587 unsigned long parent_rate)
1588{
1589 unsigned long rate = parent_rate;
1590
1591 if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1592 rate = core->ops->recalc_rate(core->hw, parent_rate);
1593 clk_pm_runtime_put(core);
1594 }
1595 return rate;
1596}
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1611{
1612 unsigned long old_rate;
1613 unsigned long parent_rate = 0;
1614 struct clk_core *child;
1615
1616 lockdep_assert_held(&prepare_lock);
1617
1618 old_rate = core->rate;
1619
1620 if (core->parent)
1621 parent_rate = core->parent->rate;
1622
1623 core->rate = clk_recalc(core, parent_rate);
1624
1625
1626
1627
1628
1629 if (core->notifier_count && msg)
1630 __clk_notify(core, msg, old_rate, core->rate);
1631
1632 hlist_for_each_entry(child, &core->children, child_node)
1633 __clk_recalc_rates(child, msg);
1634}
1635
1636static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1637{
1638 if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1639 __clk_recalc_rates(core, 0);
1640
1641 return clk_core_get_rate_nolock(core);
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652unsigned long clk_get_rate(struct clk *clk)
1653{
1654 unsigned long rate;
1655
1656 if (!clk)
1657 return 0;
1658
1659 clk_prepare_lock();
1660 rate = clk_core_get_rate_recalc(clk->core);
1661 clk_prepare_unlock();
1662
1663 return rate;
1664}
1665EXPORT_SYMBOL_GPL(clk_get_rate);
1666
1667static int clk_fetch_parent_index(struct clk_core *core,
1668 struct clk_core *parent)
1669{
1670 int i;
1671
1672 if (!parent)
1673 return -EINVAL;
1674
1675 for (i = 0; i < core->num_parents; i++) {
1676
1677 if (core->parents[i].core == parent)
1678 return i;
1679
1680
1681 if (core->parents[i].core)
1682 continue;
1683
1684
1685 if (core->parents[i].hw) {
1686 if (core->parents[i].hw == parent->hw)
1687 break;
1688
1689
1690 continue;
1691 }
1692
1693
1694 if (parent == clk_core_get(core, i))
1695 break;
1696
1697
1698 if (core->parents[i].name &&
1699 !strcmp(parent->name, core->parents[i].name))
1700 break;
1701 }
1702
1703 if (i == core->num_parents)
1704 return -EINVAL;
1705
1706 core->parents[i].core = parent;
1707 return i;
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717int clk_hw_get_parent_index(struct clk_hw *hw)
1718{
1719 struct clk_hw *parent = clk_hw_get_parent(hw);
1720
1721 if (WARN_ON(parent == NULL))
1722 return -EINVAL;
1723
1724 return clk_fetch_parent_index(hw->core, parent->core);
1725}
1726EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1727
1728
1729
1730
1731static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1732{
1733 struct clk_core *child;
1734
1735 core->orphan = is_orphan;
1736
1737 hlist_for_each_entry(child, &core->children, child_node)
1738 clk_core_update_orphan_status(child, is_orphan);
1739}
1740
1741static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1742{
1743 bool was_orphan = core->orphan;
1744
1745 hlist_del(&core->child_node);
1746
1747 if (new_parent) {
1748 bool becomes_orphan = new_parent->orphan;
1749
1750
1751 if (new_parent->new_child == core)
1752 new_parent->new_child = NULL;
1753
1754 hlist_add_head(&core->child_node, &new_parent->children);
1755
1756 if (was_orphan != becomes_orphan)
1757 clk_core_update_orphan_status(core, becomes_orphan);
1758 } else {
1759 hlist_add_head(&core->child_node, &clk_orphan_list);
1760 if (!was_orphan)
1761 clk_core_update_orphan_status(core, true);
1762 }
1763
1764 core->parent = new_parent;
1765}
1766
1767static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1768 struct clk_core *parent)
1769{
1770 unsigned long flags;
1771 struct clk_core *old_parent = core->parent;
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1795 clk_core_prepare_enable(old_parent);
1796 clk_core_prepare_enable(parent);
1797 }
1798
1799
1800 if (core->prepare_count) {
1801 clk_core_prepare_enable(parent);
1802 clk_core_enable_lock(core);
1803 }
1804
1805
1806 flags = clk_enable_lock();
1807 clk_reparent(core, parent);
1808 clk_enable_unlock(flags);
1809
1810 return old_parent;
1811}
1812
1813static void __clk_set_parent_after(struct clk_core *core,
1814 struct clk_core *parent,
1815 struct clk_core *old_parent)
1816{
1817
1818
1819
1820
1821 if (core->prepare_count) {
1822 clk_core_disable_lock(core);
1823 clk_core_disable_unprepare(old_parent);
1824 }
1825
1826
1827 if (core->flags & CLK_OPS_PARENT_ENABLE) {
1828 clk_core_disable_unprepare(parent);
1829 clk_core_disable_unprepare(old_parent);
1830 }
1831}
1832
1833static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1834 u8 p_index)
1835{
1836 unsigned long flags;
1837 int ret = 0;
1838 struct clk_core *old_parent;
1839
1840 old_parent = __clk_set_parent_before(core, parent);
1841
1842 trace_clk_set_parent(core, parent);
1843
1844
1845 if (parent && core->ops->set_parent)
1846 ret = core->ops->set_parent(core->hw, p_index);
1847
1848 trace_clk_set_parent_complete(core, parent);
1849
1850 if (ret) {
1851 flags = clk_enable_lock();
1852 clk_reparent(core, old_parent);
1853 clk_enable_unlock(flags);
1854 __clk_set_parent_after(core, old_parent, parent);
1855
1856 return ret;
1857 }
1858
1859 __clk_set_parent_after(core, parent, old_parent);
1860
1861 return 0;
1862}
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878static int __clk_speculate_rates(struct clk_core *core,
1879 unsigned long parent_rate)
1880{
1881 struct clk_core *child;
1882 unsigned long new_rate;
1883 int ret = NOTIFY_DONE;
1884
1885 lockdep_assert_held(&prepare_lock);
1886
1887 new_rate = clk_recalc(core, parent_rate);
1888
1889
1890 if (core->notifier_count)
1891 ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1892
1893 if (ret & NOTIFY_STOP_MASK) {
1894 pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1895 __func__, core->name, ret);
1896 goto out;
1897 }
1898
1899 hlist_for_each_entry(child, &core->children, child_node) {
1900 ret = __clk_speculate_rates(child, new_rate);
1901 if (ret & NOTIFY_STOP_MASK)
1902 break;
1903 }
1904
1905out:
1906 return ret;
1907}
1908
1909static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1910 struct clk_core *new_parent, u8 p_index)
1911{
1912 struct clk_core *child;
1913
1914 core->new_rate = new_rate;
1915 core->new_parent = new_parent;
1916 core->new_parent_index = p_index;
1917
1918 core->new_child = NULL;
1919 if (new_parent && new_parent != core->parent)
1920 new_parent->new_child = core;
1921
1922 hlist_for_each_entry(child, &core->children, child_node) {
1923 child->new_rate = clk_recalc(child, new_rate);
1924 clk_calc_subtree(child, child->new_rate, NULL, 0);
1925 }
1926}
1927
1928
1929
1930
1931
1932static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1933 unsigned long rate)
1934{
1935 struct clk_core *top = core;
1936 struct clk_core *old_parent, *parent;
1937 unsigned long best_parent_rate = 0;
1938 unsigned long new_rate;
1939 unsigned long min_rate;
1940 unsigned long max_rate;
1941 int p_index = 0;
1942 long ret;
1943
1944
1945 if (IS_ERR_OR_NULL(core))
1946 return NULL;
1947
1948
1949 parent = old_parent = core->parent;
1950 if (parent)
1951 best_parent_rate = parent->rate;
1952
1953 clk_core_get_boundaries(core, &min_rate, &max_rate);
1954
1955
1956 if (clk_core_can_round(core)) {
1957 struct clk_rate_request req;
1958
1959 req.rate = rate;
1960 req.min_rate = min_rate;
1961 req.max_rate = max_rate;
1962
1963 clk_core_init_rate_req(core, &req);
1964
1965 ret = clk_core_determine_round_nolock(core, &req);
1966 if (ret < 0)
1967 return NULL;
1968
1969 best_parent_rate = req.best_parent_rate;
1970 new_rate = req.rate;
1971 parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1972
1973 if (new_rate < min_rate || new_rate > max_rate)
1974 return NULL;
1975 } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1976
1977 core->new_rate = core->rate;
1978 return NULL;
1979 } else {
1980
1981 top = clk_calc_new_rates(parent, rate);
1982 new_rate = parent->new_rate;
1983 goto out;
1984 }
1985
1986
1987 if (parent != old_parent &&
1988 (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1989 pr_debug("%s: %s not gated but wants to reparent\n",
1990 __func__, core->name);
1991 return NULL;
1992 }
1993
1994
1995 if (parent && core->num_parents > 1) {
1996 p_index = clk_fetch_parent_index(core, parent);
1997 if (p_index < 0) {
1998 pr_debug("%s: clk %s can not be parent of clk %s\n",
1999 __func__, parent->name, core->name);
2000 return NULL;
2001 }
2002 }
2003
2004 if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2005 best_parent_rate != parent->rate)
2006 top = clk_calc_new_rates(parent, best_parent_rate);
2007
2008out:
2009 clk_calc_subtree(core, new_rate, parent, p_index);
2010
2011 return top;
2012}
2013
2014
2015
2016
2017
2018
2019static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2020 unsigned long event)
2021{
2022 struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2023 int ret = NOTIFY_DONE;
2024
2025 if (core->rate == core->new_rate)
2026 return NULL;
2027
2028 if (core->notifier_count) {
2029 ret = __clk_notify(core, event, core->rate, core->new_rate);
2030 if (ret & NOTIFY_STOP_MASK)
2031 fail_clk = core;
2032 }
2033
2034 hlist_for_each_entry(child, &core->children, child_node) {
2035
2036 if (child->new_parent && child->new_parent != core)
2037 continue;
2038 tmp_clk = clk_propagate_rate_change(child, event);
2039 if (tmp_clk)
2040 fail_clk = tmp_clk;
2041 }
2042
2043
2044 if (core->new_child) {
2045 tmp_clk = clk_propagate_rate_change(core->new_child, event);
2046 if (tmp_clk)
2047 fail_clk = tmp_clk;
2048 }
2049
2050 return fail_clk;
2051}
2052
2053
2054
2055
2056
2057static void clk_change_rate(struct clk_core *core)
2058{
2059 struct clk_core *child;
2060 struct hlist_node *tmp;
2061 unsigned long old_rate;
2062 unsigned long best_parent_rate = 0;
2063 bool skip_set_rate = false;
2064 struct clk_core *old_parent;
2065 struct clk_core *parent = NULL;
2066
2067 old_rate = core->rate;
2068
2069 if (core->new_parent) {
2070 parent = core->new_parent;
2071 best_parent_rate = core->new_parent->rate;
2072 } else if (core->parent) {
2073 parent = core->parent;
2074 best_parent_rate = core->parent->rate;
2075 }
2076
2077 if (clk_pm_runtime_get(core))
2078 return;
2079
2080 if (core->flags & CLK_SET_RATE_UNGATE) {
2081 clk_core_prepare(core);
2082 clk_core_enable_lock(core);
2083 }
2084
2085 if (core->new_parent && core->new_parent != core->parent) {
2086 old_parent = __clk_set_parent_before(core, core->new_parent);
2087 trace_clk_set_parent(core, core->new_parent);
2088
2089 if (core->ops->set_rate_and_parent) {
2090 skip_set_rate = true;
2091 core->ops->set_rate_and_parent(core->hw, core->new_rate,
2092 best_parent_rate,
2093 core->new_parent_index);
2094 } else if (core->ops->set_parent) {
2095 core->ops->set_parent(core->hw, core->new_parent_index);
2096 }
2097
2098 trace_clk_set_parent_complete(core, core->new_parent);
2099 __clk_set_parent_after(core, core->new_parent, old_parent);
2100 }
2101
2102 if (core->flags & CLK_OPS_PARENT_ENABLE)
2103 clk_core_prepare_enable(parent);
2104
2105 trace_clk_set_rate(core, core->new_rate);
2106
2107 if (!skip_set_rate && core->ops->set_rate)
2108 core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2109
2110 trace_clk_set_rate_complete(core, core->new_rate);
2111
2112 core->rate = clk_recalc(core, best_parent_rate);
2113
2114 if (core->flags & CLK_SET_RATE_UNGATE) {
2115 clk_core_disable_lock(core);
2116 clk_core_unprepare(core);
2117 }
2118
2119 if (core->flags & CLK_OPS_PARENT_ENABLE)
2120 clk_core_disable_unprepare(parent);
2121
2122 if (core->notifier_count && old_rate != core->rate)
2123 __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2124
2125 if (core->flags & CLK_RECALC_NEW_RATES)
2126 (void)clk_calc_new_rates(core, core->new_rate);
2127
2128
2129
2130
2131
2132 hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2133
2134 if (child->new_parent && child->new_parent != core)
2135 continue;
2136 clk_change_rate(child);
2137 }
2138
2139
2140 if (core->new_child)
2141 clk_change_rate(core->new_child);
2142
2143 clk_pm_runtime_put(core);
2144}
2145
2146static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2147 unsigned long req_rate)
2148{
2149 int ret, cnt;
2150 struct clk_rate_request req;
2151
2152 lockdep_assert_held(&prepare_lock);
2153
2154 if (!core)
2155 return 0;
2156
2157
2158 cnt = clk_core_rate_nuke_protect(core);
2159 if (cnt < 0)
2160 return cnt;
2161
2162 clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2163 req.rate = req_rate;
2164
2165 ret = clk_core_round_rate_nolock(core, &req);
2166
2167
2168 clk_core_rate_restore_protect(core, cnt);
2169
2170 return ret ? 0 : req.rate;
2171}
2172
2173static int clk_core_set_rate_nolock(struct clk_core *core,
2174 unsigned long req_rate)
2175{
2176 struct clk_core *top, *fail_clk;
2177 unsigned long rate;
2178 int ret = 0;
2179
2180 if (!core)
2181 return 0;
2182
2183 rate = clk_core_req_round_rate_nolock(core, req_rate);
2184
2185
2186 if (rate == clk_core_get_rate_nolock(core))
2187 return 0;
2188
2189
2190 if (clk_core_rate_is_protected(core))
2191 return -EBUSY;
2192
2193
2194 top = clk_calc_new_rates(core, req_rate);
2195 if (!top)
2196 return -EINVAL;
2197
2198 ret = clk_pm_runtime_get(core);
2199 if (ret)
2200 return ret;
2201
2202
2203 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2204 if (fail_clk) {
2205 pr_debug("%s: failed to set %s rate\n", __func__,
2206 fail_clk->name);
2207 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2208 ret = -EBUSY;
2209 goto err;
2210 }
2211
2212
2213 clk_change_rate(top);
2214
2215 core->req_rate = req_rate;
2216err:
2217 clk_pm_runtime_put(core);
2218
2219 return ret;
2220}
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243int clk_set_rate(struct clk *clk, unsigned long rate)
2244{
2245 int ret;
2246
2247 if (!clk)
2248 return 0;
2249
2250
2251 clk_prepare_lock();
2252
2253 if (clk->exclusive_count)
2254 clk_core_rate_unprotect(clk->core);
2255
2256 ret = clk_core_set_rate_nolock(clk->core, rate);
2257
2258 if (clk->exclusive_count)
2259 clk_core_rate_protect(clk->core);
2260
2261 clk_prepare_unlock();
2262
2263 return ret;
2264}
2265EXPORT_SYMBOL_GPL(clk_set_rate);
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2287{
2288 int ret;
2289
2290 if (!clk)
2291 return 0;
2292
2293
2294 clk_prepare_lock();
2295
2296
2297
2298
2299
2300
2301
2302 ret = clk_core_set_rate_nolock(clk->core, rate);
2303 if (!ret) {
2304 clk_core_rate_protect(clk->core);
2305 clk->exclusive_count++;
2306 }
2307
2308 clk_prepare_unlock();
2309
2310 return ret;
2311}
2312EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2323{
2324 int ret = 0;
2325 unsigned long old_min, old_max, rate;
2326
2327 if (!clk)
2328 return 0;
2329
2330 trace_clk_set_rate_range(clk->core, min, max);
2331
2332 if (min > max) {
2333 pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2334 __func__, clk->core->name, clk->dev_id, clk->con_id,
2335 min, max);
2336 return -EINVAL;
2337 }
2338
2339 clk_prepare_lock();
2340
2341 if (clk->exclusive_count)
2342 clk_core_rate_unprotect(clk->core);
2343
2344
2345 old_min = clk->min_rate;
2346 old_max = clk->max_rate;
2347 clk->min_rate = min;
2348 clk->max_rate = max;
2349
2350 rate = clk_core_get_rate_nolock(clk->core);
2351 if (rate < min || rate > max) {
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365 if (rate < min)
2366 rate = min;
2367 else
2368 rate = max;
2369
2370 ret = clk_core_set_rate_nolock(clk->core, rate);
2371 if (ret) {
2372
2373 clk->min_rate = old_min;
2374 clk->max_rate = old_max;
2375 }
2376 }
2377
2378 if (clk->exclusive_count)
2379 clk_core_rate_protect(clk->core);
2380
2381 clk_prepare_unlock();
2382
2383 return ret;
2384}
2385EXPORT_SYMBOL_GPL(clk_set_rate_range);
2386
2387
2388
2389
2390
2391
2392
2393
2394int clk_set_min_rate(struct clk *clk, unsigned long rate)
2395{
2396 if (!clk)
2397 return 0;
2398
2399 trace_clk_set_min_rate(clk->core, rate);
2400
2401 return clk_set_rate_range(clk, rate, clk->max_rate);
2402}
2403EXPORT_SYMBOL_GPL(clk_set_min_rate);
2404
2405
2406
2407
2408
2409
2410
2411
2412int clk_set_max_rate(struct clk *clk, unsigned long rate)
2413{
2414 if (!clk)
2415 return 0;
2416
2417 trace_clk_set_max_rate(clk->core, rate);
2418
2419 return clk_set_rate_range(clk, clk->min_rate, rate);
2420}
2421EXPORT_SYMBOL_GPL(clk_set_max_rate);
2422
2423
2424
2425
2426
2427
2428
2429struct clk *clk_get_parent(struct clk *clk)
2430{
2431 struct clk *parent;
2432
2433 if (!clk)
2434 return NULL;
2435
2436 clk_prepare_lock();
2437
2438 parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2439 clk_prepare_unlock();
2440
2441 return parent;
2442}
2443EXPORT_SYMBOL_GPL(clk_get_parent);
2444
2445static struct clk_core *__clk_init_parent(struct clk_core *core)
2446{
2447 u8 index = 0;
2448
2449 if (core->num_parents > 1 && core->ops->get_parent)
2450 index = core->ops->get_parent(core->hw);
2451
2452 return clk_core_get_parent_by_index(core, index);
2453}
2454
2455static void clk_core_reparent(struct clk_core *core,
2456 struct clk_core *new_parent)
2457{
2458 clk_reparent(core, new_parent);
2459 __clk_recalc_accuracies(core);
2460 __clk_recalc_rates(core, POST_RATE_CHANGE);
2461}
2462
2463void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2464{
2465 if (!hw)
2466 return;
2467
2468 clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2469}
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481bool clk_has_parent(struct clk *clk, struct clk *parent)
2482{
2483 struct clk_core *core, *parent_core;
2484 int i;
2485
2486
2487 if (!clk || !parent)
2488 return true;
2489
2490 core = clk->core;
2491 parent_core = parent->core;
2492
2493
2494 if (core->parent == parent_core)
2495 return true;
2496
2497 for (i = 0; i < core->num_parents; i++)
2498 if (!strcmp(core->parents[i].name, parent_core->name))
2499 return true;
2500
2501 return false;
2502}
2503EXPORT_SYMBOL_GPL(clk_has_parent);
2504
2505static int clk_core_set_parent_nolock(struct clk_core *core,
2506 struct clk_core *parent)
2507{
2508 int ret = 0;
2509 int p_index = 0;
2510 unsigned long p_rate = 0;
2511
2512 lockdep_assert_held(&prepare_lock);
2513
2514 if (!core)
2515 return 0;
2516
2517 if (core->parent == parent)
2518 return 0;
2519
2520
2521 if (core->num_parents > 1 && !core->ops->set_parent)
2522 return -EPERM;
2523
2524
2525 if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2526 return -EBUSY;
2527
2528 if (clk_core_rate_is_protected(core))
2529 return -EBUSY;
2530
2531
2532 if (parent) {
2533 p_index = clk_fetch_parent_index(core, parent);
2534 if (p_index < 0) {
2535 pr_debug("%s: clk %s can not be parent of clk %s\n",
2536 __func__, parent->name, core->name);
2537 return p_index;
2538 }
2539 p_rate = parent->rate;
2540 }
2541
2542 ret = clk_pm_runtime_get(core);
2543 if (ret)
2544 return ret;
2545
2546
2547 ret = __clk_speculate_rates(core, p_rate);
2548
2549
2550 if (ret & NOTIFY_STOP_MASK)
2551 goto runtime_put;
2552
2553
2554 ret = __clk_set_parent(core, parent, p_index);
2555
2556
2557 if (ret) {
2558 __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2559 } else {
2560 __clk_recalc_rates(core, POST_RATE_CHANGE);
2561 __clk_recalc_accuracies(core);
2562 }
2563
2564runtime_put:
2565 clk_pm_runtime_put(core);
2566
2567 return ret;
2568}
2569
2570int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2571{
2572 return clk_core_set_parent_nolock(hw->core, parent->core);
2573}
2574EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593int clk_set_parent(struct clk *clk, struct clk *parent)
2594{
2595 int ret;
2596
2597 if (!clk)
2598 return 0;
2599
2600 clk_prepare_lock();
2601
2602 if (clk->exclusive_count)
2603 clk_core_rate_unprotect(clk->core);
2604
2605 ret = clk_core_set_parent_nolock(clk->core,
2606 parent ? parent->core : NULL);
2607
2608 if (clk->exclusive_count)
2609 clk_core_rate_protect(clk->core);
2610
2611 clk_prepare_unlock();
2612
2613 return ret;
2614}
2615EXPORT_SYMBOL_GPL(clk_set_parent);
2616
2617static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2618{
2619 int ret = -EINVAL;
2620
2621 lockdep_assert_held(&prepare_lock);
2622
2623 if (!core)
2624 return 0;
2625
2626 if (clk_core_rate_is_protected(core))
2627 return -EBUSY;
2628
2629 trace_clk_set_phase(core, degrees);
2630
2631 if (core->ops->set_phase) {
2632 ret = core->ops->set_phase(core->hw, degrees);
2633 if (!ret)
2634 core->phase = degrees;
2635 }
2636
2637 trace_clk_set_phase_complete(core, degrees);
2638
2639 return ret;
2640}
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662int clk_set_phase(struct clk *clk, int degrees)
2663{
2664 int ret;
2665
2666 if (!clk)
2667 return 0;
2668
2669
2670 degrees %= 360;
2671 if (degrees < 0)
2672 degrees += 360;
2673
2674 clk_prepare_lock();
2675
2676 if (clk->exclusive_count)
2677 clk_core_rate_unprotect(clk->core);
2678
2679 ret = clk_core_set_phase_nolock(clk->core, degrees);
2680
2681 if (clk->exclusive_count)
2682 clk_core_rate_protect(clk->core);
2683
2684 clk_prepare_unlock();
2685
2686 return ret;
2687}
2688EXPORT_SYMBOL_GPL(clk_set_phase);
2689
2690static int clk_core_get_phase(struct clk_core *core)
2691{
2692 int ret;
2693
2694 lockdep_assert_held(&prepare_lock);
2695 if (!core->ops->get_phase)
2696 return 0;
2697
2698
2699 ret = core->ops->get_phase(core->hw);
2700 if (ret >= 0)
2701 core->phase = ret;
2702
2703 return ret;
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713int clk_get_phase(struct clk *clk)
2714{
2715 int ret;
2716
2717 if (!clk)
2718 return 0;
2719
2720 clk_prepare_lock();
2721 ret = clk_core_get_phase(clk->core);
2722 clk_prepare_unlock();
2723
2724 return ret;
2725}
2726EXPORT_SYMBOL_GPL(clk_get_phase);
2727
2728static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2729{
2730
2731 core->duty.num = 1;
2732 core->duty.den = 2;
2733}
2734
2735static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2736
2737static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2738{
2739 struct clk_duty *duty = &core->duty;
2740 int ret = 0;
2741
2742 if (!core->ops->get_duty_cycle)
2743 return clk_core_update_duty_cycle_parent_nolock(core);
2744
2745 ret = core->ops->get_duty_cycle(core->hw, duty);
2746 if (ret)
2747 goto reset;
2748
2749
2750 if (duty->den == 0 || duty->num > duty->den) {
2751 ret = -EINVAL;
2752 goto reset;
2753 }
2754
2755 return 0;
2756
2757reset:
2758 clk_core_reset_duty_cycle_nolock(core);
2759 return ret;
2760}
2761
2762static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2763{
2764 int ret = 0;
2765
2766 if (core->parent &&
2767 core->flags & CLK_DUTY_CYCLE_PARENT) {
2768 ret = clk_core_update_duty_cycle_nolock(core->parent);
2769 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2770 } else {
2771 clk_core_reset_duty_cycle_nolock(core);
2772 }
2773
2774 return ret;
2775}
2776
2777static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2778 struct clk_duty *duty);
2779
2780static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2781 struct clk_duty *duty)
2782{
2783 int ret;
2784
2785 lockdep_assert_held(&prepare_lock);
2786
2787 if (clk_core_rate_is_protected(core))
2788 return -EBUSY;
2789
2790 trace_clk_set_duty_cycle(core, duty);
2791
2792 if (!core->ops->set_duty_cycle)
2793 return clk_core_set_duty_cycle_parent_nolock(core, duty);
2794
2795 ret = core->ops->set_duty_cycle(core->hw, duty);
2796 if (!ret)
2797 memcpy(&core->duty, duty, sizeof(*duty));
2798
2799 trace_clk_set_duty_cycle_complete(core, duty);
2800
2801 return ret;
2802}
2803
2804static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2805 struct clk_duty *duty)
2806{
2807 int ret = 0;
2808
2809 if (core->parent &&
2810 core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2811 ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2812 memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2813 }
2814
2815 return ret;
2816}
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2830{
2831 int ret;
2832 struct clk_duty duty;
2833
2834 if (!clk)
2835 return 0;
2836
2837
2838 if (den == 0 || num > den)
2839 return -EINVAL;
2840
2841 duty.num = num;
2842 duty.den = den;
2843
2844 clk_prepare_lock();
2845
2846 if (clk->exclusive_count)
2847 clk_core_rate_unprotect(clk->core);
2848
2849 ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2850
2851 if (clk->exclusive_count)
2852 clk_core_rate_protect(clk->core);
2853
2854 clk_prepare_unlock();
2855
2856 return ret;
2857}
2858EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2859
2860static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2861 unsigned int scale)
2862{
2863 struct clk_duty *duty = &core->duty;
2864 int ret;
2865
2866 clk_prepare_lock();
2867
2868 ret = clk_core_update_duty_cycle_nolock(core);
2869 if (!ret)
2870 ret = mult_frac(scale, duty->num, duty->den);
2871
2872 clk_prepare_unlock();
2873
2874 return ret;
2875}
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2886{
2887 if (!clk)
2888 return 0;
2889
2890 return clk_core_get_scaled_duty_cycle(clk->core, scale);
2891}
2892EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905bool clk_is_match(const struct clk *p, const struct clk *q)
2906{
2907
2908 if (p == q)
2909 return true;
2910
2911
2912 if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2913 if (p->core == q->core)
2914 return true;
2915
2916 return false;
2917}
2918EXPORT_SYMBOL_GPL(clk_is_match);
2919
2920
2921
2922#ifdef CONFIG_DEBUG_FS
2923#include <linux/debugfs.h>
2924
2925static struct dentry *rootdir;
2926static int inited = 0;
2927static DEFINE_MUTEX(clk_debug_lock);
2928static HLIST_HEAD(clk_debug_list);
2929
2930static struct hlist_head *orphan_list[] = {
2931 &clk_orphan_list,
2932 NULL,
2933};
2934
2935static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2936 int level)
2937{
2938 int phase;
2939
2940 seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2941 level * 3 + 1, "",
2942 30 - level * 3, c->name,
2943 c->enable_count, c->prepare_count, c->protect_count,
2944 clk_core_get_rate_recalc(c),
2945 clk_core_get_accuracy_recalc(c));
2946
2947 phase = clk_core_get_phase(c);
2948 if (phase >= 0)
2949 seq_printf(s, "%5d", phase);
2950 else
2951 seq_puts(s, "-----");
2952
2953 seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
2954
2955 if (c->ops->is_enabled)
2956 seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
2957 else if (!c->ops->enable)
2958 seq_printf(s, " %9c\n", 'Y');
2959 else
2960 seq_printf(s, " %9c\n", '?');
2961}
2962
2963static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2964 int level)
2965{
2966 struct clk_core *child;
2967
2968 clk_summary_show_one(s, c, level);
2969
2970 hlist_for_each_entry(child, &c->children, child_node)
2971 clk_summary_show_subtree(s, child, level + 1);
2972}
2973
2974static int clk_summary_show(struct seq_file *s, void *data)
2975{
2976 struct clk_core *c;
2977 struct hlist_head **lists = (struct hlist_head **)s->private;
2978
2979 seq_puts(s, " enable prepare protect duty hardware\n");
2980 seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
2981 seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
2982
2983 clk_prepare_lock();
2984
2985 for (; *lists; lists++)
2986 hlist_for_each_entry(c, *lists, child_node)
2987 clk_summary_show_subtree(s, c, 0);
2988
2989 clk_prepare_unlock();
2990
2991 return 0;
2992}
2993DEFINE_SHOW_ATTRIBUTE(clk_summary);
2994
2995static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2996{
2997 int phase;
2998 unsigned long min_rate, max_rate;
2999
3000 clk_core_get_boundaries(c, &min_rate, &max_rate);
3001
3002
3003 seq_printf(s, "\"%s\": { ", c->name);
3004 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3005 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3006 seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3007 seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3008 seq_printf(s, "\"min_rate\": %lu,", min_rate);
3009 seq_printf(s, "\"max_rate\": %lu,", max_rate);
3010 seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3011 phase = clk_core_get_phase(c);
3012 if (phase >= 0)
3013 seq_printf(s, "\"phase\": %d,", phase);
3014 seq_printf(s, "\"duty_cycle\": %u",
3015 clk_core_get_scaled_duty_cycle(c, 100000));
3016}
3017
3018static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3019{
3020 struct clk_core *child;
3021
3022 clk_dump_one(s, c, level);
3023
3024 hlist_for_each_entry(child, &c->children, child_node) {
3025 seq_putc(s, ',');
3026 clk_dump_subtree(s, child, level + 1);
3027 }
3028
3029 seq_putc(s, '}');
3030}
3031
3032static int clk_dump_show(struct seq_file *s, void *data)
3033{
3034 struct clk_core *c;
3035 bool first_node = true;
3036 struct hlist_head **lists = (struct hlist_head **)s->private;
3037
3038 seq_putc(s, '{');
3039 clk_prepare_lock();
3040
3041 for (; *lists; lists++) {
3042 hlist_for_each_entry(c, *lists, child_node) {
3043 if (!first_node)
3044 seq_putc(s, ',');
3045 first_node = false;
3046 clk_dump_subtree(s, c, 0);
3047 }
3048 }
3049
3050 clk_prepare_unlock();
3051
3052 seq_puts(s, "}\n");
3053 return 0;
3054}
3055DEFINE_SHOW_ATTRIBUTE(clk_dump);
3056
3057#undef CLOCK_ALLOW_WRITE_DEBUGFS
3058#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3059
3060
3061
3062
3063
3064static int clk_rate_set(void *data, u64 val)
3065{
3066 struct clk_core *core = data;
3067 int ret;
3068
3069 clk_prepare_lock();
3070 ret = clk_core_set_rate_nolock(core, val);
3071 clk_prepare_unlock();
3072
3073 return ret;
3074}
3075
3076#define clk_rate_mode 0644
3077
3078static int clk_prepare_enable_set(void *data, u64 val)
3079{
3080 struct clk_core *core = data;
3081 int ret = 0;
3082
3083 if (val)
3084 ret = clk_prepare_enable(core->hw->clk);
3085 else
3086 clk_disable_unprepare(core->hw->clk);
3087
3088 return ret;
3089}
3090
3091static int clk_prepare_enable_get(void *data, u64 *val)
3092{
3093 struct clk_core *core = data;
3094
3095 *val = core->enable_count && core->prepare_count;
3096 return 0;
3097}
3098
3099DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3100 clk_prepare_enable_set, "%llu\n");
3101
3102#else
3103#define clk_rate_set NULL
3104#define clk_rate_mode 0444
3105#endif
3106
3107static int clk_rate_get(void *data, u64 *val)
3108{
3109 struct clk_core *core = data;
3110
3111 *val = core->rate;
3112 return 0;
3113}
3114
3115DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3116
3117static const struct {
3118 unsigned long flag;
3119 const char *name;
3120} clk_flags[] = {
3121#define ENTRY(f) { f, #f }
3122 ENTRY(CLK_SET_RATE_GATE),
3123 ENTRY(CLK_SET_PARENT_GATE),
3124 ENTRY(CLK_SET_RATE_PARENT),
3125 ENTRY(CLK_IGNORE_UNUSED),
3126 ENTRY(CLK_GET_RATE_NOCACHE),
3127 ENTRY(CLK_SET_RATE_NO_REPARENT),
3128 ENTRY(CLK_GET_ACCURACY_NOCACHE),
3129 ENTRY(CLK_RECALC_NEW_RATES),
3130 ENTRY(CLK_SET_RATE_UNGATE),
3131 ENTRY(CLK_IS_CRITICAL),
3132 ENTRY(CLK_OPS_PARENT_ENABLE),
3133 ENTRY(CLK_DUTY_CYCLE_PARENT),
3134#undef ENTRY
3135};
3136
3137static int clk_flags_show(struct seq_file *s, void *data)
3138{
3139 struct clk_core *core = s->private;
3140 unsigned long flags = core->flags;
3141 unsigned int i;
3142
3143 for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3144 if (flags & clk_flags[i].flag) {
3145 seq_printf(s, "%s\n", clk_flags[i].name);
3146 flags &= ~clk_flags[i].flag;
3147 }
3148 }
3149 if (flags) {
3150
3151 seq_printf(s, "0x%lx\n", flags);
3152 }
3153
3154 return 0;
3155}
3156DEFINE_SHOW_ATTRIBUTE(clk_flags);
3157
3158static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3159 unsigned int i, char terminator)
3160{
3161 struct clk_core *parent;
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175 parent = clk_core_get_parent_by_index(core, i);
3176 if (parent)
3177 seq_puts(s, parent->name);
3178 else if (core->parents[i].name)
3179 seq_puts(s, core->parents[i].name);
3180 else if (core->parents[i].fw_name)
3181 seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3182 else if (core->parents[i].index >= 0)
3183 seq_puts(s,
3184 of_clk_get_parent_name(core->of_node,
3185 core->parents[i].index));
3186 else
3187 seq_puts(s, "(missing)");
3188
3189 seq_putc(s, terminator);
3190}
3191
3192static int possible_parents_show(struct seq_file *s, void *data)
3193{
3194 struct clk_core *core = s->private;
3195 int i;
3196
3197 for (i = 0; i < core->num_parents - 1; i++)
3198 possible_parent_show(s, core, i, ' ');
3199
3200 possible_parent_show(s, core, i, '\n');
3201
3202 return 0;
3203}
3204DEFINE_SHOW_ATTRIBUTE(possible_parents);
3205
3206static int current_parent_show(struct seq_file *s, void *data)
3207{
3208 struct clk_core *core = s->private;
3209
3210 if (core->parent)
3211 seq_printf(s, "%s\n", core->parent->name);
3212
3213 return 0;
3214}
3215DEFINE_SHOW_ATTRIBUTE(current_parent);
3216
3217static int clk_duty_cycle_show(struct seq_file *s, void *data)
3218{
3219 struct clk_core *core = s->private;
3220 struct clk_duty *duty = &core->duty;
3221
3222 seq_printf(s, "%u/%u\n", duty->num, duty->den);
3223
3224 return 0;
3225}
3226DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3227
3228static int clk_min_rate_show(struct seq_file *s, void *data)
3229{
3230 struct clk_core *core = s->private;
3231 unsigned long min_rate, max_rate;
3232
3233 clk_prepare_lock();
3234 clk_core_get_boundaries(core, &min_rate, &max_rate);
3235 clk_prepare_unlock();
3236 seq_printf(s, "%lu\n", min_rate);
3237
3238 return 0;
3239}
3240DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3241
3242static int clk_max_rate_show(struct seq_file *s, void *data)
3243{
3244 struct clk_core *core = s->private;
3245 unsigned long min_rate, max_rate;
3246
3247 clk_prepare_lock();
3248 clk_core_get_boundaries(core, &min_rate, &max_rate);
3249 clk_prepare_unlock();
3250 seq_printf(s, "%lu\n", max_rate);
3251
3252 return 0;
3253}
3254DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3255
3256static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3257{
3258 struct dentry *root;
3259
3260 if (!core || !pdentry)
3261 return;
3262
3263 root = debugfs_create_dir(core->name, pdentry);
3264 core->dentry = root;
3265
3266 debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3267 &clk_rate_fops);
3268 debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3269 debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3270 debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3271 debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3272 debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3273 debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3274 debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3275 debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3276 debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3277 debugfs_create_file("clk_duty_cycle", 0444, root, core,
3278 &clk_duty_cycle_fops);
3279#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3280 debugfs_create_file("clk_prepare_enable", 0644, root, core,
3281 &clk_prepare_enable_fops);
3282#endif
3283
3284 if (core->num_parents > 0)
3285 debugfs_create_file("clk_parent", 0444, root, core,
3286 ¤t_parent_fops);
3287
3288 if (core->num_parents > 1)
3289 debugfs_create_file("clk_possible_parents", 0444, root, core,
3290 &possible_parents_fops);
3291
3292 if (core->ops->debug_init)
3293 core->ops->debug_init(core->hw, core->dentry);
3294}
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304static void clk_debug_register(struct clk_core *core)
3305{
3306 mutex_lock(&clk_debug_lock);
3307 hlist_add_head(&core->debug_node, &clk_debug_list);
3308 if (inited)
3309 clk_debug_create_one(core, rootdir);
3310 mutex_unlock(&clk_debug_lock);
3311}
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321static void clk_debug_unregister(struct clk_core *core)
3322{
3323 mutex_lock(&clk_debug_lock);
3324 hlist_del_init(&core->debug_node);
3325 debugfs_remove_recursive(core->dentry);
3326 core->dentry = NULL;
3327 mutex_unlock(&clk_debug_lock);
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339static int __init clk_debug_init(void)
3340{
3341 struct clk_core *core;
3342
3343 rootdir = debugfs_create_dir("clk", NULL);
3344
3345 debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3346 &clk_summary_fops);
3347 debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3348 &clk_dump_fops);
3349 debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3350 &clk_summary_fops);
3351 debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3352 &clk_dump_fops);
3353
3354 mutex_lock(&clk_debug_lock);
3355 hlist_for_each_entry(core, &clk_debug_list, debug_node)
3356 clk_debug_create_one(core, rootdir);
3357
3358 inited = 1;
3359 mutex_unlock(&clk_debug_lock);
3360
3361 return 0;
3362}
3363late_initcall(clk_debug_init);
3364#else
3365static inline void clk_debug_register(struct clk_core *core) { }
3366static inline void clk_debug_unregister(struct clk_core *core)
3367{
3368}
3369#endif
3370
3371static void clk_core_reparent_orphans_nolock(void)
3372{
3373 struct clk_core *orphan;
3374 struct hlist_node *tmp2;
3375
3376
3377
3378
3379
3380 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3381 struct clk_core *parent = __clk_init_parent(orphan);
3382
3383
3384
3385
3386
3387
3388
3389 if (parent) {
3390
3391 __clk_set_parent_before(orphan, parent);
3392 __clk_set_parent_after(orphan, parent, NULL);
3393 __clk_recalc_accuracies(orphan);
3394 __clk_recalc_rates(orphan, 0);
3395 }
3396 }
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406static int __clk_core_init(struct clk_core *core)
3407{
3408 int ret;
3409 struct clk_core *parent;
3410 unsigned long rate;
3411 int phase;
3412
3413 if (!core)
3414 return -EINVAL;
3415
3416 clk_prepare_lock();
3417
3418 ret = clk_pm_runtime_get(core);
3419 if (ret)
3420 goto unlock;
3421
3422
3423 if (clk_core_lookup(core->name)) {
3424 pr_debug("%s: clk %s already initialized\n",
3425 __func__, core->name);
3426 ret = -EEXIST;
3427 goto out;
3428 }
3429
3430
3431 if (core->ops->set_rate &&
3432 !((core->ops->round_rate || core->ops->determine_rate) &&
3433 core->ops->recalc_rate)) {
3434 pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3435 __func__, core->name);
3436 ret = -EINVAL;
3437 goto out;
3438 }
3439
3440 if (core->ops->set_parent && !core->ops->get_parent) {
3441 pr_err("%s: %s must implement .get_parent & .set_parent\n",
3442 __func__, core->name);
3443 ret = -EINVAL;
3444 goto out;
3445 }
3446
3447 if (core->num_parents > 1 && !core->ops->get_parent) {
3448 pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3449 __func__, core->name);
3450 ret = -EINVAL;
3451 goto out;
3452 }
3453
3454 if (core->ops->set_rate_and_parent &&
3455 !(core->ops->set_parent && core->ops->set_rate)) {
3456 pr_err("%s: %s must implement .set_parent & .set_rate\n",
3457 __func__, core->name);
3458 ret = -EINVAL;
3459 goto out;
3460 }
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476 if (core->ops->init) {
3477 ret = core->ops->init(core->hw);
3478 if (ret)
3479 goto out;
3480 }
3481
3482 parent = core->parent = __clk_init_parent(core);
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494 if (parent) {
3495 hlist_add_head(&core->child_node, &parent->children);
3496 core->orphan = parent->orphan;
3497 } else if (!core->num_parents) {
3498 hlist_add_head(&core->child_node, &clk_root_list);
3499 core->orphan = false;
3500 } else {
3501 hlist_add_head(&core->child_node, &clk_orphan_list);
3502 core->orphan = true;
3503 }
3504
3505
3506
3507
3508
3509
3510
3511
3512 if (core->ops->recalc_accuracy)
3513 core->accuracy = core->ops->recalc_accuracy(core->hw,
3514 clk_core_get_accuracy_no_lock(parent));
3515 else if (parent)
3516 core->accuracy = parent->accuracy;
3517 else
3518 core->accuracy = 0;
3519
3520
3521
3522
3523
3524
3525 phase = clk_core_get_phase(core);
3526 if (phase < 0) {
3527 ret = phase;
3528 pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3529 core->name);
3530 goto out;
3531 }
3532
3533
3534
3535
3536 clk_core_update_duty_cycle_nolock(core);
3537
3538
3539
3540
3541
3542
3543
3544 if (core->ops->recalc_rate)
3545 rate = core->ops->recalc_rate(core->hw,
3546 clk_core_get_rate_nolock(parent));
3547 else if (parent)
3548 rate = parent->rate;
3549 else
3550 rate = 0;
3551 core->rate = core->req_rate = rate;
3552
3553
3554
3555
3556
3557
3558 if (core->flags & CLK_IS_CRITICAL) {
3559 ret = clk_core_prepare(core);
3560 if (ret) {
3561 pr_warn("%s: critical clk '%s' failed to prepare\n",
3562 __func__, core->name);
3563 goto out;
3564 }
3565
3566 ret = clk_core_enable_lock(core);
3567 if (ret) {
3568 pr_warn("%s: critical clk '%s' failed to enable\n",
3569 __func__, core->name);
3570 clk_core_unprepare(core);
3571 goto out;
3572 }
3573 }
3574
3575 clk_core_reparent_orphans_nolock();
3576
3577
3578 kref_init(&core->ref);
3579out:
3580 clk_pm_runtime_put(core);
3581unlock:
3582 if (ret)
3583 hlist_del_init(&core->child_node);
3584
3585 clk_prepare_unlock();
3586
3587 if (!ret)
3588 clk_debug_register(core);
3589
3590 return ret;
3591}
3592
3593
3594
3595
3596
3597
3598static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3599{
3600 clk_prepare_lock();
3601 hlist_add_head(&clk->clks_node, &core->clks);
3602 clk_prepare_unlock();
3603}
3604
3605
3606
3607
3608
3609static void clk_core_unlink_consumer(struct clk *clk)
3610{
3611 lockdep_assert_held(&prepare_lock);
3612 hlist_del(&clk->clks_node);
3613}
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3624 const char *con_id)
3625{
3626 struct clk *clk;
3627
3628 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3629 if (!clk)
3630 return ERR_PTR(-ENOMEM);
3631
3632 clk->core = core;
3633 clk->dev_id = dev_id;
3634 clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3635 clk->max_rate = ULONG_MAX;
3636
3637 return clk;
3638}
3639
3640
3641
3642
3643
3644
3645
3646
3647static void free_clk(struct clk *clk)
3648{
3649 kfree_const(clk->con_id);
3650 kfree(clk);
3651}
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3666 const char *dev_id, const char *con_id)
3667{
3668 struct clk *clk;
3669 struct clk_core *core;
3670
3671
3672 if (IS_ERR_OR_NULL(hw))
3673 return ERR_CAST(hw);
3674
3675 core = hw->core;
3676 clk = alloc_clk(core, dev_id, con_id);
3677 if (IS_ERR(clk))
3678 return clk;
3679 clk->dev = dev;
3680
3681 if (!try_module_get(core->owner)) {
3682 free_clk(clk);
3683 return ERR_PTR(-ENOENT);
3684 }
3685
3686 kref_get(&core->ref);
3687 clk_core_link_consumer(core, clk);
3688
3689 return clk;
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
3703{
3704 struct device *dev = hw->core->dev;
3705
3706 return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
3707}
3708EXPORT_SYMBOL(clk_hw_get_clk);
3709
3710static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3711{
3712 const char *dst;
3713
3714 if (!src) {
3715 if (must_exist)
3716 return -EINVAL;
3717 return 0;
3718 }
3719
3720 *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3721 if (!dst)
3722 return -ENOMEM;
3723
3724 return 0;
3725}
3726
3727static int clk_core_populate_parent_map(struct clk_core *core,
3728 const struct clk_init_data *init)
3729{
3730 u8 num_parents = init->num_parents;
3731 const char * const *parent_names = init->parent_names;
3732 const struct clk_hw **parent_hws = init->parent_hws;
3733 const struct clk_parent_data *parent_data = init->parent_data;
3734 int i, ret = 0;
3735 struct clk_parent_map *parents, *parent;
3736
3737 if (!num_parents)
3738 return 0;
3739
3740
3741
3742
3743
3744 parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3745 core->parents = parents;
3746 if (!parents)
3747 return -ENOMEM;
3748
3749
3750 for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3751 parent->index = -1;
3752 if (parent_names) {
3753
3754 WARN(!parent_names[i],
3755 "%s: invalid NULL in %s's .parent_names\n",
3756 __func__, core->name);
3757 ret = clk_cpy_name(&parent->name, parent_names[i],
3758 true);
3759 } else if (parent_data) {
3760 parent->hw = parent_data[i].hw;
3761 parent->index = parent_data[i].index;
3762 ret = clk_cpy_name(&parent->fw_name,
3763 parent_data[i].fw_name, false);
3764 if (!ret)
3765 ret = clk_cpy_name(&parent->name,
3766 parent_data[i].name,
3767 false);
3768 } else if (parent_hws) {
3769 parent->hw = parent_hws[i];
3770 } else {
3771 ret = -EINVAL;
3772 WARN(1, "Must specify parents if num_parents > 0\n");
3773 }
3774
3775 if (ret) {
3776 do {
3777 kfree_const(parents[i].name);
3778 kfree_const(parents[i].fw_name);
3779 } while (--i >= 0);
3780 kfree(parents);
3781
3782 return ret;
3783 }
3784 }
3785
3786 return 0;
3787}
3788
3789static void clk_core_free_parent_map(struct clk_core *core)
3790{
3791 int i = core->num_parents;
3792
3793 if (!core->num_parents)
3794 return;
3795
3796 while (--i >= 0) {
3797 kfree_const(core->parents[i].name);
3798 kfree_const(core->parents[i].fw_name);
3799 }
3800
3801 kfree(core->parents);
3802}
3803
3804static struct clk *
3805__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3806{
3807 int ret;
3808 struct clk_core *core;
3809 const struct clk_init_data *init = hw->init;
3810
3811
3812
3813
3814
3815
3816 hw->init = NULL;
3817
3818 core = kzalloc(sizeof(*core), GFP_KERNEL);
3819 if (!core) {
3820 ret = -ENOMEM;
3821 goto fail_out;
3822 }
3823
3824 core->name = kstrdup_const(init->name, GFP_KERNEL);
3825 if (!core->name) {
3826 ret = -ENOMEM;
3827 goto fail_name;
3828 }
3829
3830 if (WARN_ON(!init->ops)) {
3831 ret = -EINVAL;
3832 goto fail_ops;
3833 }
3834 core->ops = init->ops;
3835
3836 if (dev && pm_runtime_enabled(dev))
3837 core->rpm_enabled = true;
3838 core->dev = dev;
3839 core->of_node = np;
3840 if (dev && dev->driver)
3841 core->owner = dev->driver->owner;
3842 core->hw = hw;
3843 core->flags = init->flags;
3844 core->num_parents = init->num_parents;
3845 core->min_rate = 0;
3846 core->max_rate = ULONG_MAX;
3847 hw->core = core;
3848
3849 ret = clk_core_populate_parent_map(core, init);
3850 if (ret)
3851 goto fail_parents;
3852
3853 INIT_HLIST_HEAD(&core->clks);
3854
3855
3856
3857
3858
3859 hw->clk = alloc_clk(core, NULL, NULL);
3860 if (IS_ERR(hw->clk)) {
3861 ret = PTR_ERR(hw->clk);
3862 goto fail_create_clk;
3863 }
3864
3865 clk_core_link_consumer(hw->core, hw->clk);
3866
3867 ret = __clk_core_init(core);
3868 if (!ret)
3869 return hw->clk;
3870
3871 clk_prepare_lock();
3872 clk_core_unlink_consumer(hw->clk);
3873 clk_prepare_unlock();
3874
3875 free_clk(hw->clk);
3876 hw->clk = NULL;
3877
3878fail_create_clk:
3879 clk_core_free_parent_map(core);
3880fail_parents:
3881fail_ops:
3882 kfree_const(core->name);
3883fail_name:
3884 kfree(core);
3885fail_out:
3886 return ERR_PTR(ret);
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897static struct device_node *dev_or_parent_of_node(struct device *dev)
3898{
3899 struct device_node *np;
3900
3901 if (!dev)
3902 return NULL;
3903
3904 np = dev_of_node(dev);
3905 if (!np)
3906 np = dev_of_node(dev->parent);
3907
3908 return np;
3909}
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3925{
3926 return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3927}
3928EXPORT_SYMBOL_GPL(clk_register);
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940int clk_hw_register(struct device *dev, struct clk_hw *hw)
3941{
3942 return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3943 hw));
3944}
3945EXPORT_SYMBOL_GPL(clk_hw_register);
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3959{
3960 return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3961}
3962EXPORT_SYMBOL_GPL(of_clk_hw_register);
3963
3964
3965static void __clk_release(struct kref *ref)
3966{
3967 struct clk_core *core = container_of(ref, struct clk_core, ref);
3968
3969 lockdep_assert_held(&prepare_lock);
3970
3971 clk_core_free_parent_map(core);
3972 kfree_const(core->name);
3973 kfree(core);
3974}
3975
3976
3977
3978
3979
3980
3981static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3982{
3983 return -ENXIO;
3984}
3985
3986static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3987{
3988 WARN_ON_ONCE(1);
3989}
3990
3991static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3992 unsigned long parent_rate)
3993{
3994 return -ENXIO;
3995}
3996
3997static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3998{
3999 return -ENXIO;
4000}
4001
4002static const struct clk_ops clk_nodrv_ops = {
4003 .enable = clk_nodrv_prepare_enable,
4004 .disable = clk_nodrv_disable_unprepare,
4005 .prepare = clk_nodrv_prepare_enable,
4006 .unprepare = clk_nodrv_disable_unprepare,
4007 .set_rate = clk_nodrv_set_rate,
4008 .set_parent = clk_nodrv_set_parent,
4009};
4010
4011static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4012 struct clk_core *target)
4013{
4014 int i;
4015 struct clk_core *child;
4016
4017 for (i = 0; i < root->num_parents; i++)
4018 if (root->parents[i].core == target)
4019 root->parents[i].core = NULL;
4020
4021 hlist_for_each_entry(child, &root->children, child_node)
4022 clk_core_evict_parent_cache_subtree(child, target);
4023}
4024
4025
4026static void clk_core_evict_parent_cache(struct clk_core *core)
4027{
4028 struct hlist_head **lists;
4029 struct clk_core *root;
4030
4031 lockdep_assert_held(&prepare_lock);
4032
4033 for (lists = all_lists; *lists; lists++)
4034 hlist_for_each_entry(root, *lists, child_node)
4035 clk_core_evict_parent_cache_subtree(root, core);
4036
4037}
4038
4039
4040
4041
4042
4043void clk_unregister(struct clk *clk)
4044{
4045 unsigned long flags;
4046 const struct clk_ops *ops;
4047
4048 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4049 return;
4050
4051 clk_debug_unregister(clk->core);
4052
4053 clk_prepare_lock();
4054
4055 ops = clk->core->ops;
4056 if (ops == &clk_nodrv_ops) {
4057 pr_err("%s: unregistered clock: %s\n", __func__,
4058 clk->core->name);
4059 goto unlock;
4060 }
4061
4062
4063
4064
4065 flags = clk_enable_lock();
4066 clk->core->ops = &clk_nodrv_ops;
4067 clk_enable_unlock(flags);
4068
4069 if (ops->terminate)
4070 ops->terminate(clk->core->hw);
4071
4072 if (!hlist_empty(&clk->core->children)) {
4073 struct clk_core *child;
4074 struct hlist_node *t;
4075
4076
4077 hlist_for_each_entry_safe(child, t, &clk->core->children,
4078 child_node)
4079 clk_core_set_parent_nolock(child, NULL);
4080 }
4081
4082 clk_core_evict_parent_cache(clk->core);
4083
4084 hlist_del_init(&clk->core->child_node);
4085
4086 if (clk->core->prepare_count)
4087 pr_warn("%s: unregistering prepared clock: %s\n",
4088 __func__, clk->core->name);
4089
4090 if (clk->core->protect_count)
4091 pr_warn("%s: unregistering protected clock: %s\n",
4092 __func__, clk->core->name);
4093
4094 kref_put(&clk->core->ref, __clk_release);
4095 free_clk(clk);
4096unlock:
4097 clk_prepare_unlock();
4098}
4099EXPORT_SYMBOL_GPL(clk_unregister);
4100
4101
4102
4103
4104
4105void clk_hw_unregister(struct clk_hw *hw)
4106{
4107 clk_unregister(hw->clk);
4108}
4109EXPORT_SYMBOL_GPL(clk_hw_unregister);
4110
4111static void devm_clk_unregister_cb(struct device *dev, void *res)
4112{
4113 clk_unregister(*(struct clk **)res);
4114}
4115
4116static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4117{
4118 clk_hw_unregister(*(struct clk_hw **)res);
4119}
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4132{
4133 struct clk *clk;
4134 struct clk **clkp;
4135
4136 clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4137 if (!clkp)
4138 return ERR_PTR(-ENOMEM);
4139
4140 clk = clk_register(dev, hw);
4141 if (!IS_ERR(clk)) {
4142 *clkp = clk;
4143 devres_add(dev, clkp);
4144 } else {
4145 devres_free(clkp);
4146 }
4147
4148 return clk;
4149}
4150EXPORT_SYMBOL_GPL(devm_clk_register);
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4162{
4163 struct clk_hw **hwp;
4164 int ret;
4165
4166 hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4167 if (!hwp)
4168 return -ENOMEM;
4169
4170 ret = clk_hw_register(dev, hw);
4171 if (!ret) {
4172 *hwp = hw;
4173 devres_add(dev, hwp);
4174 } else {
4175 devres_free(hwp);
4176 }
4177
4178 return ret;
4179}
4180EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4181
4182static int devm_clk_match(struct device *dev, void *res, void *data)
4183{
4184 struct clk *c = res;
4185 if (WARN_ON(!c))
4186 return 0;
4187 return c == data;
4188}
4189
4190static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4191{
4192 struct clk_hw *hw = res;
4193
4194 if (WARN_ON(!hw))
4195 return 0;
4196 return hw == data;
4197}
4198
4199
4200
4201
4202
4203
4204
4205
4206
4207
4208void devm_clk_unregister(struct device *dev, struct clk *clk)
4209{
4210 WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
4211}
4212EXPORT_SYMBOL_GPL(devm_clk_unregister);
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4224{
4225 WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
4226 hw));
4227}
4228EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4229
4230static void devm_clk_release(struct device *dev, void *res)
4231{
4232 clk_put(*(struct clk **)res);
4233}
4234
4235
4236
4237
4238
4239
4240
4241
4242
4243
4244
4245struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4246 const char *con_id)
4247{
4248 struct clk *clk;
4249 struct clk **clkp;
4250
4251
4252
4253
4254
4255 WARN_ON_ONCE(dev != hw->core->dev);
4256
4257 clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4258 if (!clkp)
4259 return ERR_PTR(-ENOMEM);
4260
4261 clk = clk_hw_get_clk(hw, con_id);
4262 if (!IS_ERR(clk)) {
4263 *clkp = clk;
4264 devres_add(dev, clkp);
4265 } else {
4266 devres_free(clkp);
4267 }
4268
4269 return clk;
4270}
4271EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4272
4273
4274
4275
4276
4277void __clk_put(struct clk *clk)
4278{
4279 struct module *owner;
4280
4281 if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4282 return;
4283
4284 clk_prepare_lock();
4285
4286
4287
4288
4289
4290
4291 if (WARN_ON(clk->exclusive_count)) {
4292
4293 clk->core->protect_count -= (clk->exclusive_count - 1);
4294 clk_core_rate_unprotect(clk->core);
4295 clk->exclusive_count = 0;
4296 }
4297
4298 hlist_del(&clk->clks_node);
4299 if (clk->min_rate > clk->core->req_rate ||
4300 clk->max_rate < clk->core->req_rate)
4301 clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4302
4303 owner = clk->core->owner;
4304 kref_put(&clk->core->ref, __clk_release);
4305
4306 clk_prepare_unlock();
4307
4308 module_put(owner);
4309
4310 free_clk(clk);
4311}
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4336{
4337 struct clk_notifier *cn;
4338 int ret = -ENOMEM;
4339
4340 if (!clk || !nb)
4341 return -EINVAL;
4342
4343 clk_prepare_lock();
4344
4345
4346 list_for_each_entry(cn, &clk_notifier_list, node)
4347 if (cn->clk == clk)
4348 goto found;
4349
4350
4351 cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4352 if (!cn)
4353 goto out;
4354
4355 cn->clk = clk;
4356 srcu_init_notifier_head(&cn->notifier_head);
4357
4358 list_add(&cn->node, &clk_notifier_list);
4359
4360found:
4361 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4362
4363 clk->core->notifier_count++;
4364
4365out:
4366 clk_prepare_unlock();
4367
4368 return ret;
4369}
4370EXPORT_SYMBOL_GPL(clk_notifier_register);
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4384{
4385 struct clk_notifier *cn;
4386 int ret = -ENOENT;
4387
4388 if (!clk || !nb)
4389 return -EINVAL;
4390
4391 clk_prepare_lock();
4392
4393 list_for_each_entry(cn, &clk_notifier_list, node) {
4394 if (cn->clk == clk) {
4395 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4396
4397 clk->core->notifier_count--;
4398
4399
4400 if (!cn->notifier_head.head) {
4401 srcu_cleanup_notifier_head(&cn->notifier_head);
4402 list_del(&cn->node);
4403 kfree(cn);
4404 }
4405 break;
4406 }
4407 }
4408
4409 clk_prepare_unlock();
4410
4411 return ret;
4412}
4413EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4414
4415struct clk_notifier_devres {
4416 struct clk *clk;
4417 struct notifier_block *nb;
4418};
4419
4420static void devm_clk_notifier_release(struct device *dev, void *res)
4421{
4422 struct clk_notifier_devres *devres = res;
4423
4424 clk_notifier_unregister(devres->clk, devres->nb);
4425}
4426
4427int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4428 struct notifier_block *nb)
4429{
4430 struct clk_notifier_devres *devres;
4431 int ret;
4432
4433 devres = devres_alloc(devm_clk_notifier_release,
4434 sizeof(*devres), GFP_KERNEL);
4435
4436 if (!devres)
4437 return -ENOMEM;
4438
4439 ret = clk_notifier_register(clk, nb);
4440 if (!ret) {
4441 devres->clk = clk;
4442 devres->nb = nb;
4443 } else {
4444 devres_free(devres);
4445 }
4446
4447 return ret;
4448}
4449EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4450
4451#ifdef CONFIG_OF
4452static void clk_core_reparent_orphans(void)
4453{
4454 clk_prepare_lock();
4455 clk_core_reparent_orphans_nolock();
4456 clk_prepare_unlock();
4457}
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468
4469struct of_clk_provider {
4470 struct list_head link;
4471
4472 struct device_node *node;
4473 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4474 struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4475 void *data;
4476};
4477
4478extern struct of_device_id __clk_of_table;
4479static const struct of_device_id __clk_of_table_sentinel
4480 __used __section("__clk_of_table_end");
4481
4482static LIST_HEAD(of_clk_providers);
4483static DEFINE_MUTEX(of_clk_mutex);
4484
4485struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4486 void *data)
4487{
4488 return data;
4489}
4490EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4491
4492struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4493{
4494 return data;
4495}
4496EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4497
4498struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4499{
4500 struct clk_onecell_data *clk_data = data;
4501 unsigned int idx = clkspec->args[0];
4502
4503 if (idx >= clk_data->clk_num) {
4504 pr_err("%s: invalid clock index %u\n", __func__, idx);
4505 return ERR_PTR(-EINVAL);
4506 }
4507
4508 return clk_data->clks[idx];
4509}
4510EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4511
4512struct clk_hw *
4513of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4514{
4515 struct clk_hw_onecell_data *hw_data = data;
4516 unsigned int idx = clkspec->args[0];
4517
4518 if (idx >= hw_data->num) {
4519 pr_err("%s: invalid index %u\n", __func__, idx);
4520 return ERR_PTR(-EINVAL);
4521 }
4522
4523 return hw_data->hws[idx];
4524}
4525EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535int of_clk_add_provider(struct device_node *np,
4536 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4537 void *data),
4538 void *data)
4539{
4540 struct of_clk_provider *cp;
4541 int ret;
4542
4543 if (!np)
4544 return 0;
4545
4546 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4547 if (!cp)
4548 return -ENOMEM;
4549
4550 cp->node = of_node_get(np);
4551 cp->data = data;
4552 cp->get = clk_src_get;
4553
4554 mutex_lock(&of_clk_mutex);
4555 list_add(&cp->link, &of_clk_providers);
4556 mutex_unlock(&of_clk_mutex);
4557 pr_debug("Added clock from %pOF\n", np);
4558
4559 clk_core_reparent_orphans();
4560
4561 ret = of_clk_set_defaults(np, true);
4562 if (ret < 0)
4563 of_clk_del_provider(np);
4564
4565 fwnode_dev_initialized(&np->fwnode, true);
4566
4567 return ret;
4568}
4569EXPORT_SYMBOL_GPL(of_clk_add_provider);
4570
4571
4572
4573
4574
4575
4576
4577int of_clk_add_hw_provider(struct device_node *np,
4578 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4579 void *data),
4580 void *data)
4581{
4582 struct of_clk_provider *cp;
4583 int ret;
4584
4585 if (!np)
4586 return 0;
4587
4588 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4589 if (!cp)
4590 return -ENOMEM;
4591
4592 cp->node = of_node_get(np);
4593 cp->data = data;
4594 cp->get_hw = get;
4595
4596 mutex_lock(&of_clk_mutex);
4597 list_add(&cp->link, &of_clk_providers);
4598 mutex_unlock(&of_clk_mutex);
4599 pr_debug("Added clk_hw provider from %pOF\n", np);
4600
4601 clk_core_reparent_orphans();
4602
4603 ret = of_clk_set_defaults(np, true);
4604 if (ret < 0)
4605 of_clk_del_provider(np);
4606
4607 fwnode_dev_initialized(&np->fwnode, true);
4608
4609 return ret;
4610}
4611EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4612
4613static void devm_of_clk_release_provider(struct device *dev, void *res)
4614{
4615 of_clk_del_provider(*(struct device_node **)res);
4616}
4617
4618
4619
4620
4621
4622
4623static struct device_node *get_clk_provider_node(struct device *dev)
4624{
4625 struct device_node *np, *parent_np;
4626
4627 np = dev->of_node;
4628 parent_np = dev->parent ? dev->parent->of_node : NULL;
4629
4630 if (!of_find_property(np, "#clock-cells", NULL))
4631 if (of_find_property(parent_np, "#clock-cells", NULL))
4632 np = parent_np;
4633
4634 return np;
4635}
4636
4637
4638
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649
4650
4651int devm_of_clk_add_hw_provider(struct device *dev,
4652 struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4653 void *data),
4654 void *data)
4655{
4656 struct device_node **ptr, *np;
4657 int ret;
4658
4659 ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4660 GFP_KERNEL);
4661 if (!ptr)
4662 return -ENOMEM;
4663
4664 np = get_clk_provider_node(dev);
4665 ret = of_clk_add_hw_provider(np, get, data);
4666 if (!ret) {
4667 *ptr = np;
4668 devres_add(dev, ptr);
4669 } else {
4670 devres_free(ptr);
4671 }
4672
4673 return ret;
4674}
4675EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4676
4677
4678
4679
4680
4681void of_clk_del_provider(struct device_node *np)
4682{
4683 struct of_clk_provider *cp;
4684
4685 if (!np)
4686 return;
4687
4688 mutex_lock(&of_clk_mutex);
4689 list_for_each_entry(cp, &of_clk_providers, link) {
4690 if (cp->node == np) {
4691 list_del(&cp->link);
4692 fwnode_dev_initialized(&np->fwnode, false);
4693 of_node_put(cp->node);
4694 kfree(cp);
4695 break;
4696 }
4697 }
4698 mutex_unlock(&of_clk_mutex);
4699}
4700EXPORT_SYMBOL_GPL(of_clk_del_provider);
4701
4702static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4703{
4704 struct device_node **np = res;
4705
4706 if (WARN_ON(!np || !*np))
4707 return 0;
4708
4709 return *np == data;
4710}
4711
4712
4713
4714
4715
4716void devm_of_clk_del_provider(struct device *dev)
4717{
4718 int ret;
4719 struct device_node *np = get_clk_provider_node(dev);
4720
4721 ret = devres_release(dev, devm_of_clk_release_provider,
4722 devm_clk_provider_match, np);
4723
4724 WARN_ON(ret);
4725}
4726EXPORT_SYMBOL(devm_of_clk_del_provider);
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766static int of_parse_clkspec(const struct device_node *np, int index,
4767 const char *name, struct of_phandle_args *out_args)
4768{
4769 int ret = -ENOENT;
4770
4771
4772 while (np) {
4773
4774
4775
4776
4777
4778
4779 if (name)
4780 index = of_property_match_string(np, "clock-names", name);
4781 ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4782 index, out_args);
4783 if (!ret)
4784 break;
4785 if (name && index >= 0)
4786 break;
4787
4788
4789
4790
4791
4792
4793 np = np->parent;
4794 if (np && !of_get_property(np, "clock-ranges", NULL))
4795 break;
4796 index = 0;
4797 }
4798
4799 return ret;
4800}
4801
4802static struct clk_hw *
4803__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4804 struct of_phandle_args *clkspec)
4805{
4806 struct clk *clk;
4807
4808 if (provider->get_hw)
4809 return provider->get_hw(clkspec, provider->data);
4810
4811 clk = provider->get(clkspec, provider->data);
4812 if (IS_ERR(clk))
4813 return ERR_CAST(clk);
4814 return __clk_get_hw(clk);
4815}
4816
4817static struct clk_hw *
4818of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4819{
4820 struct of_clk_provider *provider;
4821 struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4822
4823 if (!clkspec)
4824 return ERR_PTR(-EINVAL);
4825
4826 mutex_lock(&of_clk_mutex);
4827 list_for_each_entry(provider, &of_clk_providers, link) {
4828 if (provider->node == clkspec->np) {
4829 hw = __of_clk_get_hw_from_provider(provider, clkspec);
4830 if (!IS_ERR(hw))
4831 break;
4832 }
4833 }
4834 mutex_unlock(&of_clk_mutex);
4835
4836 return hw;
4837}
4838
4839
4840
4841
4842
4843
4844
4845
4846
4847struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4848{
4849 struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4850
4851 return clk_hw_create_clk(NULL, hw, NULL, __func__);
4852}
4853EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4854
4855struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4856 const char *con_id)
4857{
4858 int ret;
4859 struct clk_hw *hw;
4860 struct of_phandle_args clkspec;
4861
4862 ret = of_parse_clkspec(np, index, con_id, &clkspec);
4863 if (ret)
4864 return ERR_PTR(ret);
4865
4866 hw = of_clk_get_hw_from_clkspec(&clkspec);
4867 of_node_put(