blob: 62ecac53b0a236be9cc45d9cd85a2841419b9247 [file] [log] [blame]
Mike Turquetteb24764902012-03-15 23:11:19 -07001/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Standard functionality for the common clock API. See Documentation/clk.txt
10 */
11
12#include <linux/clk-private.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/spinlock.h>
16#include <linux/err.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19
20static DEFINE_SPINLOCK(enable_lock);
21static DEFINE_MUTEX(prepare_lock);
22
23static HLIST_HEAD(clk_root_list);
24static HLIST_HEAD(clk_orphan_list);
25static LIST_HEAD(clk_notifier_list);
26
27/*** debugfs support ***/
28
29#ifdef CONFIG_COMMON_CLK_DEBUG
30#include <linux/debugfs.h>
31
32static struct dentry *rootdir;
33static struct dentry *orphandir;
34static int inited = 0;
35
36/* caller must hold prepare_lock */
37static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
38{
39 struct dentry *d;
40 int ret = -ENOMEM;
41
42 if (!clk || !pdentry) {
43 ret = -EINVAL;
44 goto out;
45 }
46
47 d = debugfs_create_dir(clk->name, pdentry);
48 if (!d)
49 goto out;
50
51 clk->dentry = d;
52
53 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
54 (u32 *)&clk->rate);
55 if (!d)
56 goto err_out;
57
58 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
59 (u32 *)&clk->flags);
60 if (!d)
61 goto err_out;
62
63 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
64 (u32 *)&clk->prepare_count);
65 if (!d)
66 goto err_out;
67
68 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
69 (u32 *)&clk->enable_count);
70 if (!d)
71 goto err_out;
72
73 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
74 (u32 *)&clk->notifier_count);
75 if (!d)
76 goto err_out;
77
78 ret = 0;
79 goto out;
80
81err_out:
82 debugfs_remove(clk->dentry);
83out:
84 return ret;
85}
86
87/* caller must hold prepare_lock */
88static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
89{
90 struct clk *child;
91 struct hlist_node *tmp;
92 int ret = -EINVAL;;
93
94 if (!clk || !pdentry)
95 goto out;
96
97 ret = clk_debug_create_one(clk, pdentry);
98
99 if (ret)
100 goto out;
101
102 hlist_for_each_entry(child, tmp, &clk->children, child_node)
103 clk_debug_create_subtree(child, clk->dentry);
104
105 ret = 0;
106out:
107 return ret;
108}
109
110/**
111 * clk_debug_register - add a clk node to the debugfs clk tree
112 * @clk: the clk being added to the debugfs clk tree
113 *
114 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
115 * initialized. Otherwise it bails out early since the debugfs clk tree
116 * will be created lazily by clk_debug_init as part of a late_initcall.
117 *
118 * Caller must hold prepare_lock. Only clk_init calls this function (so
119 * far) so this is taken care.
120 */
121static int clk_debug_register(struct clk *clk)
122{
123 struct clk *parent;
124 struct dentry *pdentry;
125 int ret = 0;
126
127 if (!inited)
128 goto out;
129
130 parent = clk->parent;
131
132 /*
133 * Check to see if a clk is a root clk. Also check that it is
134 * safe to add this clk to debugfs
135 */
136 if (!parent)
137 if (clk->flags & CLK_IS_ROOT)
138 pdentry = rootdir;
139 else
140 pdentry = orphandir;
141 else
142 if (parent->dentry)
143 pdentry = parent->dentry;
144 else
145 goto out;
146
147 ret = clk_debug_create_subtree(clk, pdentry);
148
149out:
150 return ret;
151}
152
153/**
154 * clk_debug_init - lazily create the debugfs clk tree visualization
155 *
156 * clks are often initialized very early during boot before memory can
157 * be dynamically allocated and well before debugfs is setup.
158 * clk_debug_init walks the clk tree hierarchy while holding
159 * prepare_lock and creates the topology as part of a late_initcall,
160 * thus insuring that clks initialized very early will still be
161 * represented in the debugfs clk tree. This function should only be
162 * called once at boot-time, and all other clks added dynamically will
163 * be done so with clk_debug_register.
164 */
165static int __init clk_debug_init(void)
166{
167 struct clk *clk;
168 struct hlist_node *tmp;
169
170 rootdir = debugfs_create_dir("clk", NULL);
171
172 if (!rootdir)
173 return -ENOMEM;
174
175 orphandir = debugfs_create_dir("orphans", rootdir);
176
177 if (!orphandir)
178 return -ENOMEM;
179
180 mutex_lock(&prepare_lock);
181
182 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
183 clk_debug_create_subtree(clk, rootdir);
184
185 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
186 clk_debug_create_subtree(clk, orphandir);
187
188 inited = 1;
189
190 mutex_unlock(&prepare_lock);
191
192 return 0;
193}
194late_initcall(clk_debug_init);
195#else
196static inline int clk_debug_register(struct clk *clk) { return 0; }
Mike Turquette70d347e2012-03-26 11:53:47 -0700197#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700198
199#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
200/* caller must hold prepare_lock */
201static void clk_disable_unused_subtree(struct clk *clk)
202{
203 struct clk *child;
204 struct hlist_node *tmp;
205 unsigned long flags;
206
207 if (!clk)
208 goto out;
209
210 hlist_for_each_entry(child, tmp, &clk->children, child_node)
211 clk_disable_unused_subtree(child);
212
213 spin_lock_irqsave(&enable_lock, flags);
214
215 if (clk->enable_count)
216 goto unlock_out;
217
218 if (clk->flags & CLK_IGNORE_UNUSED)
219 goto unlock_out;
220
221 if (__clk_is_enabled(clk) && clk->ops->disable)
222 clk->ops->disable(clk->hw);
223
224unlock_out:
225 spin_unlock_irqrestore(&enable_lock, flags);
226
227out:
228 return;
229}
230
231static int clk_disable_unused(void)
232{
233 struct clk *clk;
234 struct hlist_node *tmp;
235
236 mutex_lock(&prepare_lock);
237
238 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
239 clk_disable_unused_subtree(clk);
240
241 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
242 clk_disable_unused_subtree(clk);
243
244 mutex_unlock(&prepare_lock);
245
246 return 0;
247}
248late_initcall(clk_disable_unused);
Mike Turquette70d347e2012-03-26 11:53:47 -0700249#endif
Mike Turquetteb24764902012-03-15 23:11:19 -0700250
251/*** helper functions ***/
252
253inline const char *__clk_get_name(struct clk *clk)
254{
255 return !clk ? NULL : clk->name;
256}
257
258inline struct clk_hw *__clk_get_hw(struct clk *clk)
259{
260 return !clk ? NULL : clk->hw;
261}
262
263inline u8 __clk_get_num_parents(struct clk *clk)
264{
265 return !clk ? -EINVAL : clk->num_parents;
266}
267
268inline struct clk *__clk_get_parent(struct clk *clk)
269{
270 return !clk ? NULL : clk->parent;
271}
272
273inline int __clk_get_enable_count(struct clk *clk)
274{
275 return !clk ? -EINVAL : clk->enable_count;
276}
277
278inline int __clk_get_prepare_count(struct clk *clk)
279{
280 return !clk ? -EINVAL : clk->prepare_count;
281}
282
283unsigned long __clk_get_rate(struct clk *clk)
284{
285 unsigned long ret;
286
287 if (!clk) {
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530288 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700289 goto out;
290 }
291
292 ret = clk->rate;
293
294 if (clk->flags & CLK_IS_ROOT)
295 goto out;
296
297 if (!clk->parent)
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530298 ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700299
300out:
301 return ret;
302}
303
304inline unsigned long __clk_get_flags(struct clk *clk)
305{
306 return !clk ? -EINVAL : clk->flags;
307}
308
309int __clk_is_enabled(struct clk *clk)
310{
311 int ret;
312
313 if (!clk)
314 return -EINVAL;
315
316 /*
317 * .is_enabled is only mandatory for clocks that gate
318 * fall back to software usage counter if .is_enabled is missing
319 */
320 if (!clk->ops->is_enabled) {
321 ret = clk->enable_count ? 1 : 0;
322 goto out;
323 }
324
325 ret = clk->ops->is_enabled(clk->hw);
326out:
327 return ret;
328}
329
330static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
331{
332 struct clk *child;
333 struct clk *ret;
334 struct hlist_node *tmp;
335
336 if (!strcmp(clk->name, name))
337 return clk;
338
339 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
340 ret = __clk_lookup_subtree(name, child);
341 if (ret)
342 return ret;
343 }
344
345 return NULL;
346}
347
348struct clk *__clk_lookup(const char *name)
349{
350 struct clk *root_clk;
351 struct clk *ret;
352 struct hlist_node *tmp;
353
354 if (!name)
355 return NULL;
356
357 /* search the 'proper' clk tree first */
358 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
359 ret = __clk_lookup_subtree(name, root_clk);
360 if (ret)
361 return ret;
362 }
363
364 /* if not found, then search the orphan tree */
365 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
366 ret = __clk_lookup_subtree(name, root_clk);
367 if (ret)
368 return ret;
369 }
370
371 return NULL;
372}
373
374/*** clk api ***/
375
376void __clk_unprepare(struct clk *clk)
377{
378 if (!clk)
379 return;
380
381 if (WARN_ON(clk->prepare_count == 0))
382 return;
383
384 if (--clk->prepare_count > 0)
385 return;
386
387 WARN_ON(clk->enable_count > 0);
388
389 if (clk->ops->unprepare)
390 clk->ops->unprepare(clk->hw);
391
392 __clk_unprepare(clk->parent);
393}
394
395/**
396 * clk_unprepare - undo preparation of a clock source
397 * @clk: the clk being unprepare
398 *
399 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
400 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
401 * if the operation may sleep. One example is a clk which is accessed over
402 * I2c. In the complex case a clk gate operation may require a fast and a slow
403 * part. It is this reason that clk_unprepare and clk_disable are not mutually
404 * exclusive. In fact clk_disable must be called before clk_unprepare.
405 */
406void clk_unprepare(struct clk *clk)
407{
408 mutex_lock(&prepare_lock);
409 __clk_unprepare(clk);
410 mutex_unlock(&prepare_lock);
411}
412EXPORT_SYMBOL_GPL(clk_unprepare);
413
414int __clk_prepare(struct clk *clk)
415{
416 int ret = 0;
417
418 if (!clk)
419 return 0;
420
421 if (clk->prepare_count == 0) {
422 ret = __clk_prepare(clk->parent);
423 if (ret)
424 return ret;
425
426 if (clk->ops->prepare) {
427 ret = clk->ops->prepare(clk->hw);
428 if (ret) {
429 __clk_unprepare(clk->parent);
430 return ret;
431 }
432 }
433 }
434
435 clk->prepare_count++;
436
437 return 0;
438}
439
440/**
441 * clk_prepare - prepare a clock source
442 * @clk: the clk being prepared
443 *
444 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
445 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
446 * operation may sleep. One example is a clk which is accessed over I2c. In
447 * the complex case a clk ungate operation may require a fast and a slow part.
448 * It is this reason that clk_prepare and clk_enable are not mutually
449 * exclusive. In fact clk_prepare must be called before clk_enable.
450 * Returns 0 on success, -EERROR otherwise.
451 */
452int clk_prepare(struct clk *clk)
453{
454 int ret;
455
456 mutex_lock(&prepare_lock);
457 ret = __clk_prepare(clk);
458 mutex_unlock(&prepare_lock);
459
460 return ret;
461}
462EXPORT_SYMBOL_GPL(clk_prepare);
463
464static void __clk_disable(struct clk *clk)
465{
466 if (!clk)
467 return;
468
469 if (WARN_ON(clk->enable_count == 0))
470 return;
471
472 if (--clk->enable_count > 0)
473 return;
474
475 if (clk->ops->disable)
476 clk->ops->disable(clk->hw);
477
478 __clk_disable(clk->parent);
479}
480
481/**
482 * clk_disable - gate a clock
483 * @clk: the clk being gated
484 *
485 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
486 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
487 * clk if the operation is fast and will never sleep. One example is a
488 * SoC-internal clk which is controlled via simple register writes. In the
489 * complex case a clk gate operation may require a fast and a slow part. It is
490 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
491 * In fact clk_disable must be called before clk_unprepare.
492 */
493void clk_disable(struct clk *clk)
494{
495 unsigned long flags;
496
497 spin_lock_irqsave(&enable_lock, flags);
498 __clk_disable(clk);
499 spin_unlock_irqrestore(&enable_lock, flags);
500}
501EXPORT_SYMBOL_GPL(clk_disable);
502
503static int __clk_enable(struct clk *clk)
504{
505 int ret = 0;
506
507 if (!clk)
508 return 0;
509
510 if (WARN_ON(clk->prepare_count == 0))
511 return -ESHUTDOWN;
512
513 if (clk->enable_count == 0) {
514 ret = __clk_enable(clk->parent);
515
516 if (ret)
517 return ret;
518
519 if (clk->ops->enable) {
520 ret = clk->ops->enable(clk->hw);
521 if (ret) {
522 __clk_disable(clk->parent);
523 return ret;
524 }
525 }
526 }
527
528 clk->enable_count++;
529 return 0;
530}
531
532/**
533 * clk_enable - ungate a clock
534 * @clk: the clk being ungated
535 *
536 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
537 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
538 * if the operation will never sleep. One example is a SoC-internal clk which
539 * is controlled via simple register writes. In the complex case a clk ungate
540 * operation may require a fast and a slow part. It is this reason that
541 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
542 * must be called before clk_enable. Returns 0 on success, -EERROR
543 * otherwise.
544 */
545int clk_enable(struct clk *clk)
546{
547 unsigned long flags;
548 int ret;
549
550 spin_lock_irqsave(&enable_lock, flags);
551 ret = __clk_enable(clk);
552 spin_unlock_irqrestore(&enable_lock, flags);
553
554 return ret;
555}
556EXPORT_SYMBOL_GPL(clk_enable);
557
558/**
559 * clk_get_rate - return the rate of clk
560 * @clk: the clk whose rate is being returned
561 *
562 * Simply returns the cached rate of the clk. Does not query the hardware. If
Rajendra Nayak34e44fe2012-03-26 19:01:48 +0530563 * clk is NULL then returns 0.
Mike Turquetteb24764902012-03-15 23:11:19 -0700564 */
565unsigned long clk_get_rate(struct clk *clk)
566{
567 unsigned long rate;
568
569 mutex_lock(&prepare_lock);
570 rate = __clk_get_rate(clk);
571 mutex_unlock(&prepare_lock);
572
573 return rate;
574}
575EXPORT_SYMBOL_GPL(clk_get_rate);
576
577/**
578 * __clk_round_rate - round the given rate for a clk
579 * @clk: round the rate of this clock
580 *
581 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
582 */
583unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
584{
Shawn Guo81536e02012-04-12 20:50:17 +0800585 unsigned long parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700586
587 if (!clk)
588 return -EINVAL;
589
590 if (!clk->ops->round_rate)
591 return clk->rate;
592
Shawn Guo81536e02012-04-12 20:50:17 +0800593 if (clk->parent)
594 parent_rate = clk->parent->rate;
595
596 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700597}
598
599/**
600 * clk_round_rate - round the given rate for a clk
601 * @clk: the clk for which we are rounding a rate
602 * @rate: the rate which is to be rounded
603 *
604 * Takes in a rate as input and rounds it to a rate that the clk can actually
605 * use which is then returned. If clk doesn't support round_rate operation
606 * then the parent rate is returned.
607 */
608long clk_round_rate(struct clk *clk, unsigned long rate)
609{
610 unsigned long ret;
611
612 mutex_lock(&prepare_lock);
613 ret = __clk_round_rate(clk, rate);
614 mutex_unlock(&prepare_lock);
615
616 return ret;
617}
618EXPORT_SYMBOL_GPL(clk_round_rate);
619
620/**
621 * __clk_notify - call clk notifier chain
622 * @clk: struct clk * that is changing rate
623 * @msg: clk notifier type (see include/linux/clk.h)
624 * @old_rate: old clk rate
625 * @new_rate: new clk rate
626 *
627 * Triggers a notifier call chain on the clk rate-change notification
628 * for 'clk'. Passes a pointer to the struct clk and the previous
629 * and current rates to the notifier callback. Intended to be called by
630 * internal clock code only. Returns NOTIFY_DONE from the last driver
631 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
632 * a driver returns that.
633 */
634static int __clk_notify(struct clk *clk, unsigned long msg,
635 unsigned long old_rate, unsigned long new_rate)
636{
637 struct clk_notifier *cn;
638 struct clk_notifier_data cnd;
639 int ret = NOTIFY_DONE;
640
641 cnd.clk = clk;
642 cnd.old_rate = old_rate;
643 cnd.new_rate = new_rate;
644
645 list_for_each_entry(cn, &clk_notifier_list, node) {
646 if (cn->clk == clk) {
647 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
648 &cnd);
649 break;
650 }
651 }
652
653 return ret;
654}
655
656/**
657 * __clk_recalc_rates
658 * @clk: first clk in the subtree
659 * @msg: notification type (see include/linux/clk.h)
660 *
661 * Walks the subtree of clks starting with clk and recalculates rates as it
662 * goes. Note that if a clk does not implement the .recalc_rate callback then
663 * it is assumed that the clock will take on the rate of it's parent.
664 *
665 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
666 * if necessary.
667 *
668 * Caller must hold prepare_lock.
669 */
670static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
671{
672 unsigned long old_rate;
673 unsigned long parent_rate = 0;
674 struct hlist_node *tmp;
675 struct clk *child;
676
677 old_rate = clk->rate;
678
679 if (clk->parent)
680 parent_rate = clk->parent->rate;
681
682 if (clk->ops->recalc_rate)
683 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
684 else
685 clk->rate = parent_rate;
686
687 /*
688 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
689 * & ABORT_RATE_CHANGE notifiers
690 */
691 if (clk->notifier_count && msg)
692 __clk_notify(clk, msg, old_rate, clk->rate);
693
694 hlist_for_each_entry(child, tmp, &clk->children, child_node)
695 __clk_recalc_rates(child, msg);
696}
697
698/**
699 * __clk_speculate_rates
700 * @clk: first clk in the subtree
701 * @parent_rate: the "future" rate of clk's parent
702 *
703 * Walks the subtree of clks starting with clk, speculating rates as it
704 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
705 *
706 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
707 * pre-rate change notifications and returns early if no clks in the
708 * subtree have subscribed to the notifications. Note that if a clk does not
709 * implement the .recalc_rate callback then it is assumed that the clock will
710 * take on the rate of it's parent.
711 *
712 * Caller must hold prepare_lock.
713 */
714static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
715{
716 struct hlist_node *tmp;
717 struct clk *child;
718 unsigned long new_rate;
719 int ret = NOTIFY_DONE;
720
721 if (clk->ops->recalc_rate)
722 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
723 else
724 new_rate = parent_rate;
725
726 /* abort the rate change if a driver returns NOTIFY_BAD */
727 if (clk->notifier_count)
728 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
729
730 if (ret == NOTIFY_BAD)
731 goto out;
732
733 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
734 ret = __clk_speculate_rates(child, new_rate);
735 if (ret == NOTIFY_BAD)
736 break;
737 }
738
739out:
740 return ret;
741}
742
743static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
744{
745 struct clk *child;
746 struct hlist_node *tmp;
747
748 clk->new_rate = new_rate;
749
750 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
751 if (child->ops->recalc_rate)
752 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
753 else
754 child->new_rate = new_rate;
755 clk_calc_subtree(child, child->new_rate);
756 }
757}
758
759/*
760 * calculate the new rates returning the topmost clock that has to be
761 * changed.
762 */
763static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
764{
765 struct clk *top = clk;
Shawn Guo81536e02012-04-12 20:50:17 +0800766 unsigned long best_parent_rate = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -0700767 unsigned long new_rate;
768
Mike Turquette7452b212012-03-26 14:45:36 -0700769 /* sanity */
770 if (IS_ERR_OR_NULL(clk))
771 return NULL;
772
773 /* never propagate up to the parent */
774 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
775 if (!clk->ops->round_rate) {
776 clk->new_rate = clk->rate;
777 return NULL;
Mike Turquette7452b212012-03-26 14:45:36 -0700778 }
779 }
780
781 /* need clk->parent from here on out */
782 if (!clk->parent) {
783 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
Mike Turquetteb24764902012-03-15 23:11:19 -0700784 return NULL;
785 }
786
Mike Turquette7452b212012-03-26 14:45:36 -0700787 if (!clk->ops->round_rate) {
Mike Turquetteb24764902012-03-15 23:11:19 -0700788 top = clk_calc_new_rates(clk->parent, rate);
789 new_rate = clk->new_rate = clk->parent->new_rate;
790
791 goto out;
792 }
793
Shawn Guo81536e02012-04-12 20:50:17 +0800794 best_parent_rate = clk->parent->rate;
Mike Turquette7452b212012-03-26 14:45:36 -0700795 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700796
797 if (best_parent_rate != clk->parent->rate) {
798 top = clk_calc_new_rates(clk->parent, best_parent_rate);
799
800 goto out;
801 }
802
803out:
804 clk_calc_subtree(clk, new_rate);
805
806 return top;
807}
808
809/*
810 * Notify about rate changes in a subtree. Always walk down the whole tree
811 * so that in case of an error we can walk down the whole tree again and
812 * abort the change.
813 */
814static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
815{
816 struct hlist_node *tmp;
817 struct clk *child, *fail_clk = NULL;
818 int ret = NOTIFY_DONE;
819
820 if (clk->rate == clk->new_rate)
821 return 0;
822
823 if (clk->notifier_count) {
824 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
825 if (ret == NOTIFY_BAD)
826 fail_clk = clk;
827 }
828
829 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
830 clk = clk_propagate_rate_change(child, event);
831 if (clk)
832 fail_clk = clk;
833 }
834
835 return fail_clk;
836}
837
838/*
839 * walk down a subtree and set the new rates notifying the rate
840 * change on the way
841 */
842static void clk_change_rate(struct clk *clk)
843{
844 struct clk *child;
845 unsigned long old_rate;
846 struct hlist_node *tmp;
847
848 old_rate = clk->rate;
849
850 if (clk->ops->set_rate)
Shawn Guo1c0035d2012-04-12 20:50:18 +0800851 clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
Mike Turquetteb24764902012-03-15 23:11:19 -0700852
853 if (clk->ops->recalc_rate)
854 clk->rate = clk->ops->recalc_rate(clk->hw,
855 clk->parent->rate);
856 else
857 clk->rate = clk->parent->rate;
858
859 if (clk->notifier_count && old_rate != clk->rate)
860 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
861
862 hlist_for_each_entry(child, tmp, &clk->children, child_node)
863 clk_change_rate(child);
864}
865
866/**
867 * clk_set_rate - specify a new rate for clk
868 * @clk: the clk whose rate is being changed
869 * @rate: the new rate for clk
870 *
Mike Turquette5654dc92012-03-26 11:51:34 -0700871 * In the simplest case clk_set_rate will only adjust the rate of clk.
Mike Turquetteb24764902012-03-15 23:11:19 -0700872 *
Mike Turquette5654dc92012-03-26 11:51:34 -0700873 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
874 * propagate up to clk's parent; whether or not this happens depends on the
875 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
876 * after calling .round_rate then upstream parent propagation is ignored. If
877 * *parent_rate comes back with a new rate for clk's parent then we propagate
878 * up to clk's parent and set it's rate. Upward propagation will continue
879 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
880 * .round_rate stops requesting changes to clk's parent_rate.
Mike Turquetteb24764902012-03-15 23:11:19 -0700881 *
Mike Turquette5654dc92012-03-26 11:51:34 -0700882 * Rate changes are accomplished via tree traversal that also recalculates the
883 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
Mike Turquetteb24764902012-03-15 23:11:19 -0700884 *
885 * Returns 0 on success, -EERROR otherwise.
886 */
887int clk_set_rate(struct clk *clk, unsigned long rate)
888{
889 struct clk *top, *fail_clk;
890 int ret = 0;
891
892 /* prevent racing with updates to the clock topology */
893 mutex_lock(&prepare_lock);
894
895 /* bail early if nothing to do */
896 if (rate == clk->rate)
897 goto out;
898
899 /* calculate new rates and get the topmost changed clock */
900 top = clk_calc_new_rates(clk, rate);
901 if (!top) {
902 ret = -EINVAL;
903 goto out;
904 }
905
906 /* notify that we are about to change rates */
907 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
908 if (fail_clk) {
909 pr_warn("%s: failed to set %s rate\n", __func__,
910 fail_clk->name);
911 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
912 ret = -EBUSY;
913 goto out;
914 }
915
916 /* change the rates */
917 clk_change_rate(top);
918
919 mutex_unlock(&prepare_lock);
920
921 return 0;
922out:
923 mutex_unlock(&prepare_lock);
924
925 return ret;
926}
927EXPORT_SYMBOL_GPL(clk_set_rate);
928
929/**
930 * clk_get_parent - return the parent of a clk
931 * @clk: the clk whose parent gets returned
932 *
933 * Simply returns clk->parent. Returns NULL if clk is NULL.
934 */
935struct clk *clk_get_parent(struct clk *clk)
936{
937 struct clk *parent;
938
939 mutex_lock(&prepare_lock);
940 parent = __clk_get_parent(clk);
941 mutex_unlock(&prepare_lock);
942
943 return parent;
944}
945EXPORT_SYMBOL_GPL(clk_get_parent);
946
947/*
948 * .get_parent is mandatory for clocks with multiple possible parents. It is
949 * optional for single-parent clocks. Always call .get_parent if it is
950 * available and WARN if it is missing for multi-parent clocks.
951 *
952 * For single-parent clocks without .get_parent, first check to see if the
953 * .parents array exists, and if so use it to avoid an expensive tree
954 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
955 */
956static struct clk *__clk_init_parent(struct clk *clk)
957{
958 struct clk *ret = NULL;
959 u8 index;
960
961 /* handle the trivial cases */
962
963 if (!clk->num_parents)
964 goto out;
965
966 if (clk->num_parents == 1) {
967 if (IS_ERR_OR_NULL(clk->parent))
968 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
969 ret = clk->parent;
970 goto out;
971 }
972
973 if (!clk->ops->get_parent) {
974 WARN(!clk->ops->get_parent,
975 "%s: multi-parent clocks must implement .get_parent\n",
976 __func__);
977 goto out;
978 };
979
980 /*
981 * Do our best to cache parent clocks in clk->parents. This prevents
982 * unnecessary and expensive calls to __clk_lookup. We don't set
983 * clk->parent here; that is done by the calling function
984 */
985
986 index = clk->ops->get_parent(clk->hw);
987
988 if (!clk->parents)
989 clk->parents =
990 kmalloc((sizeof(struct clk*) * clk->num_parents),
991 GFP_KERNEL);
992
993 if (!clk->parents)
994 ret = __clk_lookup(clk->parent_names[index]);
995 else if (!clk->parents[index])
996 ret = clk->parents[index] =
997 __clk_lookup(clk->parent_names[index]);
998 else
999 ret = clk->parents[index];
1000
1001out:
1002 return ret;
1003}
1004
1005void __clk_reparent(struct clk *clk, struct clk *new_parent)
1006{
1007#ifdef CONFIG_COMMON_CLK_DEBUG
1008 struct dentry *d;
1009 struct dentry *new_parent_d;
1010#endif
1011
1012 if (!clk || !new_parent)
1013 return;
1014
1015 hlist_del(&clk->child_node);
1016
1017 if (new_parent)
1018 hlist_add_head(&clk->child_node, &new_parent->children);
1019 else
1020 hlist_add_head(&clk->child_node, &clk_orphan_list);
1021
1022#ifdef CONFIG_COMMON_CLK_DEBUG
1023 if (!inited)
1024 goto out;
1025
1026 if (new_parent)
1027 new_parent_d = new_parent->dentry;
1028 else
1029 new_parent_d = orphandir;
1030
1031 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1032 new_parent_d, clk->name);
1033 if (d)
1034 clk->dentry = d;
1035 else
1036 pr_debug("%s: failed to rename debugfs entry for %s\n",
1037 __func__, clk->name);
1038out:
1039#endif
1040
1041 clk->parent = new_parent;
1042
1043 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1044}
1045
1046static int __clk_set_parent(struct clk *clk, struct clk *parent)
1047{
1048 struct clk *old_parent;
1049 unsigned long flags;
1050 int ret = -EINVAL;
1051 u8 i;
1052
1053 old_parent = clk->parent;
1054
1055 /* find index of new parent clock using cached parent ptrs */
1056 for (i = 0; i < clk->num_parents; i++)
1057 if (clk->parents[i] == parent)
1058 break;
1059
1060 /*
1061 * find index of new parent clock using string name comparison
1062 * also try to cache the parent to avoid future calls to __clk_lookup
1063 */
1064 if (i == clk->num_parents)
1065 for (i = 0; i < clk->num_parents; i++)
1066 if (!strcmp(clk->parent_names[i], parent->name)) {
1067 clk->parents[i] = __clk_lookup(parent->name);
1068 break;
1069 }
1070
1071 if (i == clk->num_parents) {
1072 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1073 __func__, parent->name, clk->name);
1074 goto out;
1075 }
1076
1077 /* migrate prepare and enable */
1078 if (clk->prepare_count)
1079 __clk_prepare(parent);
1080
1081 /* FIXME replace with clk_is_enabled(clk) someday */
1082 spin_lock_irqsave(&enable_lock, flags);
1083 if (clk->enable_count)
1084 __clk_enable(parent);
1085 spin_unlock_irqrestore(&enable_lock, flags);
1086
1087 /* change clock input source */
1088 ret = clk->ops->set_parent(clk->hw, i);
1089
1090 /* clean up old prepare and enable */
1091 spin_lock_irqsave(&enable_lock, flags);
1092 if (clk->enable_count)
1093 __clk_disable(old_parent);
1094 spin_unlock_irqrestore(&enable_lock, flags);
1095
1096 if (clk->prepare_count)
1097 __clk_unprepare(old_parent);
1098
1099out:
1100 return ret;
1101}
1102
1103/**
1104 * clk_set_parent - switch the parent of a mux clk
1105 * @clk: the mux clk whose input we are switching
1106 * @parent: the new input to clk
1107 *
1108 * Re-parent clk to use parent as it's new input source. If clk has the
1109 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1110 * operation to succeed. After successfully changing clk's parent
1111 * clk_set_parent will update the clk topology, sysfs topology and
1112 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1113 * success, -EERROR otherwise.
1114 */
1115int clk_set_parent(struct clk *clk, struct clk *parent)
1116{
1117 int ret = 0;
1118
1119 if (!clk || !clk->ops)
1120 return -EINVAL;
1121
1122 if (!clk->ops->set_parent)
1123 return -ENOSYS;
1124
1125 /* prevent racing with updates to the clock topology */
1126 mutex_lock(&prepare_lock);
1127
1128 if (clk->parent == parent)
1129 goto out;
1130
1131 /* propagate PRE_RATE_CHANGE notifications */
1132 if (clk->notifier_count)
1133 ret = __clk_speculate_rates(clk, parent->rate);
1134
1135 /* abort if a driver objects */
1136 if (ret == NOTIFY_STOP)
1137 goto out;
1138
1139 /* only re-parent if the clock is not in use */
1140 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1141 ret = -EBUSY;
1142 else
1143 ret = __clk_set_parent(clk, parent);
1144
1145 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1146 if (ret) {
1147 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1148 goto out;
1149 }
1150
1151 /* propagate rate recalculation downstream */
1152 __clk_reparent(clk, parent);
1153
1154out:
1155 mutex_unlock(&prepare_lock);
1156
1157 return ret;
1158}
1159EXPORT_SYMBOL_GPL(clk_set_parent);
1160
1161/**
1162 * __clk_init - initialize the data structures in a struct clk
1163 * @dev: device initializing this clk, placeholder for now
1164 * @clk: clk being initialized
1165 *
1166 * Initializes the lists in struct clk, queries the hardware for the
1167 * parent and rate and sets them both.
1168 *
1169 * Any struct clk passed into __clk_init must have the following members
1170 * populated:
1171 * .name
1172 * .ops
1173 * .hw
1174 * .parent_names
1175 * .num_parents
1176 * .flags
1177 *
1178 * Essentially, everything that would normally be passed into clk_register is
1179 * assumed to be initialized already in __clk_init. The other members may be
1180 * populated, but are optional.
1181 *
1182 * __clk_init is only exposed via clk-private.h and is intended for use with
1183 * very large numbers of clocks that need to be statically initialized. It is
1184 * a layering violation to include clk-private.h from any code which implements
1185 * a clock's .ops; as such any statically initialized clock data MUST be in a
Mike Turquetted1302a32012-03-29 14:30:40 -07001186 * separate C file from the logic that implements it's operations. Returns 0
1187 * on success, otherwise an error code.
Mike Turquetteb24764902012-03-15 23:11:19 -07001188 */
Mike Turquetted1302a32012-03-29 14:30:40 -07001189int __clk_init(struct device *dev, struct clk *clk)
Mike Turquetteb24764902012-03-15 23:11:19 -07001190{
Mike Turquetted1302a32012-03-29 14:30:40 -07001191 int i, ret = 0;
Mike Turquetteb24764902012-03-15 23:11:19 -07001192 struct clk *orphan;
1193 struct hlist_node *tmp, *tmp2;
1194
1195 if (!clk)
Mike Turquetted1302a32012-03-29 14:30:40 -07001196 return -EINVAL;
Mike Turquetteb24764902012-03-15 23:11:19 -07001197
1198 mutex_lock(&prepare_lock);
1199
1200 /* check to see if a clock with this name is already registered */
Mike Turquetted1302a32012-03-29 14:30:40 -07001201 if (__clk_lookup(clk->name)) {
1202 pr_debug("%s: clk %s already initialized\n",
1203 __func__, clk->name);
1204 ret = -EEXIST;
Mike Turquetteb24764902012-03-15 23:11:19 -07001205 goto out;
Mike Turquetted1302a32012-03-29 14:30:40 -07001206 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001207
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001208 /* check that clk_ops are sane. See Documentation/clk.txt */
1209 if (clk->ops->set_rate &&
1210 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1211 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1212 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001213 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001214 goto out;
1215 }
1216
1217 if (clk->ops->set_parent && !clk->ops->get_parent) {
1218 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1219 __func__, clk->name);
Mike Turquetted1302a32012-03-29 14:30:40 -07001220 ret = -EINVAL;
Mike Turquetted4d7e3d2012-03-26 16:15:52 -07001221 goto out;
1222 }
1223
Mike Turquetteb24764902012-03-15 23:11:19 -07001224 /* throw a WARN if any entries in parent_names are NULL */
1225 for (i = 0; i < clk->num_parents; i++)
1226 WARN(!clk->parent_names[i],
1227 "%s: invalid NULL in %s's .parent_names\n",
1228 __func__, clk->name);
1229
1230 /*
1231 * Allocate an array of struct clk *'s to avoid unnecessary string
1232 * look-ups of clk's possible parents. This can fail for clocks passed
1233 * in to clk_init during early boot; thus any access to clk->parents[]
1234 * must always check for a NULL pointer and try to populate it if
1235 * necessary.
1236 *
1237 * If clk->parents is not NULL we skip this entire block. This allows
1238 * for clock drivers to statically initialize clk->parents.
1239 */
1240 if (clk->num_parents && !clk->parents) {
1241 clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
1242 GFP_KERNEL);
1243 /*
1244 * __clk_lookup returns NULL for parents that have not been
1245 * clk_init'd; thus any access to clk->parents[] must check
1246 * for a NULL pointer. We can always perform lazy lookups for
1247 * missing parents later on.
1248 */
1249 if (clk->parents)
1250 for (i = 0; i < clk->num_parents; i++)
1251 clk->parents[i] =
1252 __clk_lookup(clk->parent_names[i]);
1253 }
1254
1255 clk->parent = __clk_init_parent(clk);
1256
1257 /*
1258 * Populate clk->parent if parent has already been __clk_init'd. If
1259 * parent has not yet been __clk_init'd then place clk in the orphan
1260 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1261 * clk list.
1262 *
1263 * Every time a new clk is clk_init'd then we walk the list of orphan
1264 * clocks and re-parent any that are children of the clock currently
1265 * being clk_init'd.
1266 */
1267 if (clk->parent)
1268 hlist_add_head(&clk->child_node,
1269 &clk->parent->children);
1270 else if (clk->flags & CLK_IS_ROOT)
1271 hlist_add_head(&clk->child_node, &clk_root_list);
1272 else
1273 hlist_add_head(&clk->child_node, &clk_orphan_list);
1274
1275 /*
1276 * Set clk's rate. The preferred method is to use .recalc_rate. For
1277 * simple clocks and lazy developers the default fallback is to use the
1278 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1279 * then rate is set to zero.
1280 */
1281 if (clk->ops->recalc_rate)
1282 clk->rate = clk->ops->recalc_rate(clk->hw,
1283 __clk_get_rate(clk->parent));
1284 else if (clk->parent)
1285 clk->rate = clk->parent->rate;
1286 else
1287 clk->rate = 0;
1288
1289 /*
1290 * walk the list of orphan clocks and reparent any that are children of
1291 * this clock
1292 */
1293 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node)
1294 for (i = 0; i < orphan->num_parents; i++)
1295 if (!strcmp(clk->name, orphan->parent_names[i])) {
1296 __clk_reparent(orphan, clk);
1297 break;
1298 }
1299
1300 /*
1301 * optional platform-specific magic
1302 *
1303 * The .init callback is not used by any of the basic clock types, but
1304 * exists for weird hardware that must perform initialization magic.
1305 * Please consider other ways of solving initialization problems before
1306 * using this callback, as it's use is discouraged.
1307 */
1308 if (clk->ops->init)
1309 clk->ops->init(clk->hw);
1310
1311 clk_debug_register(clk);
1312
1313out:
1314 mutex_unlock(&prepare_lock);
1315
Mike Turquetted1302a32012-03-29 14:30:40 -07001316 return ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001317}
1318
1319/**
1320 * clk_register - allocate a new clock, register it and return an opaque cookie
1321 * @dev: device that is registering this clock
1322 * @name: clock name
1323 * @ops: operations this clock supports
1324 * @hw: link to hardware-specific clock data
1325 * @parent_names: array of string names for all possible parents
1326 * @num_parents: number of possible parents
1327 * @flags: framework-level hints and quirks
1328 *
1329 * clk_register is the primary interface for populating the clock tree with new
1330 * clock nodes. It returns a pointer to the newly allocated struct clk which
1331 * cannot be dereferenced by driver code but may be used in conjuction with the
Mike Turquetted1302a32012-03-29 14:30:40 -07001332 * rest of the clock API. In the event of an error clk_register will return an
1333 * error code; drivers must test for an error code after calling clk_register.
Mike Turquetteb24764902012-03-15 23:11:19 -07001334 */
1335struct clk *clk_register(struct device *dev, const char *name,
1336 const struct clk_ops *ops, struct clk_hw *hw,
Mark Brownd305fb72012-03-21 20:01:20 +00001337 const char **parent_names, u8 num_parents, unsigned long flags)
Mike Turquetteb24764902012-03-15 23:11:19 -07001338{
Mike Turquetted1302a32012-03-29 14:30:40 -07001339 int i, ret;
Mike Turquetteb24764902012-03-15 23:11:19 -07001340 struct clk *clk;
1341
1342 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
Mike Turquetted1302a32012-03-29 14:30:40 -07001343 if (!clk) {
1344 pr_err("%s: could not allocate clk\n", __func__);
1345 ret = -ENOMEM;
1346 goto fail_out;
1347 }
Mike Turquetteb24764902012-03-15 23:11:19 -07001348
1349 clk->name = name;
1350 clk->ops = ops;
1351 clk->hw = hw;
1352 clk->flags = flags;
Mike Turquetteb24764902012-03-15 23:11:19 -07001353 clk->num_parents = num_parents;
1354 hw->clk = clk;
1355
Mike Turquetted1302a32012-03-29 14:30:40 -07001356 /* allocate local copy in case parent_names is __initdata */
1357 clk->parent_names = kzalloc((sizeof(char*) * num_parents),
1358 GFP_KERNEL);
Mike Turquetteb24764902012-03-15 23:11:19 -07001359
Mike Turquetted1302a32012-03-29 14:30:40 -07001360 if (!clk->parent_names) {
1361 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1362 ret = -ENOMEM;
1363 goto fail_parent_names;
1364 }
1365
1366
1367 /* copy each string name in case parent_names is __initdata */
1368 for (i = 0; i < num_parents; i++) {
1369 clk->parent_names[i] = kstrdup(parent_names[i], GFP_KERNEL);
1370 if (!clk->parent_names[i]) {
1371 pr_err("%s: could not copy parent_names\n", __func__);
1372 ret = -ENOMEM;
1373 goto fail_parent_names_copy;
1374 }
1375 }
1376
1377 ret = __clk_init(dev, clk);
1378 if (!ret)
1379 return clk;
1380
1381fail_parent_names_copy:
1382 while (--i >= 0)
1383 kfree(clk->parent_names[i]);
1384 kfree(clk->parent_names);
1385fail_parent_names:
1386 kfree(clk);
1387fail_out:
1388 return ERR_PTR(ret);
Mike Turquetteb24764902012-03-15 23:11:19 -07001389}
1390EXPORT_SYMBOL_GPL(clk_register);
1391
1392/*** clk rate change notifiers ***/
1393
1394/**
1395 * clk_notifier_register - add a clk rate change notifier
1396 * @clk: struct clk * to watch
1397 * @nb: struct notifier_block * with callback info
1398 *
1399 * Request notification when clk's rate changes. This uses an SRCU
1400 * notifier because we want it to block and notifier unregistrations are
1401 * uncommon. The callbacks associated with the notifier must not
1402 * re-enter into the clk framework by calling any top-level clk APIs;
1403 * this will cause a nested prepare_lock mutex.
1404 *
1405 * Pre-change notifier callbacks will be passed the current, pre-change
1406 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1407 * post-change rate of the clk is passed via struct
1408 * clk_notifier_data.new_rate.
1409 *
1410 * Post-change notifiers will pass the now-current, post-change rate of
1411 * the clk in both struct clk_notifier_data.old_rate and struct
1412 * clk_notifier_data.new_rate.
1413 *
1414 * Abort-change notifiers are effectively the opposite of pre-change
1415 * notifiers: the original pre-change clk rate is passed in via struct
1416 * clk_notifier_data.new_rate and the failed post-change rate is passed
1417 * in via struct clk_notifier_data.old_rate.
1418 *
1419 * clk_notifier_register() must be called from non-atomic context.
1420 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1421 * allocation failure; otherwise, passes along the return value of
1422 * srcu_notifier_chain_register().
1423 */
1424int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1425{
1426 struct clk_notifier *cn;
1427 int ret = -ENOMEM;
1428
1429 if (!clk || !nb)
1430 return -EINVAL;
1431
1432 mutex_lock(&prepare_lock);
1433
1434 /* search the list of notifiers for this clk */
1435 list_for_each_entry(cn, &clk_notifier_list, node)
1436 if (cn->clk == clk)
1437 break;
1438
1439 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1440 if (cn->clk != clk) {
1441 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1442 if (!cn)
1443 goto out;
1444
1445 cn->clk = clk;
1446 srcu_init_notifier_head(&cn->notifier_head);
1447
1448 list_add(&cn->node, &clk_notifier_list);
1449 }
1450
1451 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1452
1453 clk->notifier_count++;
1454
1455out:
1456 mutex_unlock(&prepare_lock);
1457
1458 return ret;
1459}
1460EXPORT_SYMBOL_GPL(clk_notifier_register);
1461
1462/**
1463 * clk_notifier_unregister - remove a clk rate change notifier
1464 * @clk: struct clk *
1465 * @nb: struct notifier_block * with callback info
1466 *
1467 * Request no further notification for changes to 'clk' and frees memory
1468 * allocated in clk_notifier_register.
1469 *
1470 * Returns -EINVAL if called with null arguments; otherwise, passes
1471 * along the return value of srcu_notifier_chain_unregister().
1472 */
1473int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1474{
1475 struct clk_notifier *cn = NULL;
1476 int ret = -EINVAL;
1477
1478 if (!clk || !nb)
1479 return -EINVAL;
1480
1481 mutex_lock(&prepare_lock);
1482
1483 list_for_each_entry(cn, &clk_notifier_list, node)
1484 if (cn->clk == clk)
1485 break;
1486
1487 if (cn->clk == clk) {
1488 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1489
1490 clk->notifier_count--;
1491
1492 /* XXX the notifier code should handle this better */
1493 if (!cn->notifier_head.head) {
1494 srcu_cleanup_notifier_head(&cn->notifier_head);
1495 kfree(cn);
1496 }
1497
1498 } else {
1499 ret = -ENOENT;
1500 }
1501
1502 mutex_unlock(&prepare_lock);
1503
1504 return ret;
1505}
1506EXPORT_SYMBOL_GPL(clk_notifier_unregister);