1
2
3
4
5
6
7
8
9
10
11#include <linux/rtc.h>
12#include <linux/sched.h>
13#include <linux/module.h>
14#include <linux/log2.h>
15#include <linux/workqueue.h>
16
17#define CREATE_TRACE_POINTS
18#include <trace/events/rtc.h>
19
20static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
21static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
22
23static void rtc_add_offset(struct rtc_device *rtc, struct rtc_time *tm)
24{
25 time64_t secs;
26
27 if (!rtc->offset_secs)
28 return;
29
30 secs = rtc_tm_to_time64(tm);
31
32
33
34
35
36
37
38 if ((rtc->start_secs > rtc->range_min && secs >= rtc->start_secs) ||
39 (rtc->start_secs < rtc->range_min &&
40 secs <= (rtc->start_secs + rtc->range_max - rtc->range_min)))
41 return;
42
43 rtc_time64_to_tm(secs + rtc->offset_secs, tm);
44}
45
46static void rtc_subtract_offset(struct rtc_device *rtc, struct rtc_time *tm)
47{
48 time64_t secs;
49
50 if (!rtc->offset_secs)
51 return;
52
53 secs = rtc_tm_to_time64(tm);
54
55
56
57
58
59
60
61 if (secs >= rtc->range_min && secs <= rtc->range_max)
62 return;
63
64 rtc_time64_to_tm(secs - rtc->offset_secs, tm);
65}
66
67static int rtc_valid_range(struct rtc_device *rtc, struct rtc_time *tm)
68{
69 if (rtc->range_min != rtc->range_max) {
70 time64_t time = rtc_tm_to_time64(tm);
71 time64_t range_min = rtc->set_start_time ? rtc->start_secs :
72 rtc->range_min;
73 timeu64_t range_max = rtc->set_start_time ?
74 (rtc->start_secs + rtc->range_max - rtc->range_min) :
75 rtc->range_max;
76
77 if (time < range_min || time > range_max)
78 return -ERANGE;
79 }
80
81 return 0;
82}
83
84static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
85{
86 int err;
87
88 if (!rtc->ops) {
89 err = -ENODEV;
90 } else if (!rtc->ops->read_time) {
91 err = -EINVAL;
92 } else {
93 memset(tm, 0, sizeof(struct rtc_time));
94 err = rtc->ops->read_time(rtc->dev.parent, tm);
95 if (err < 0) {
96 dev_dbg(&rtc->dev, "read_time: fail to read: %d\n",
97 err);
98 return err;
99 }
100
101 rtc_add_offset(rtc, tm);
102
103 err = rtc_valid_tm(tm);
104 if (err < 0)
105 dev_dbg(&rtc->dev, "read_time: rtc_time isn't valid\n");
106 }
107 return err;
108}
109
110int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
111{
112 int err;
113
114 err = mutex_lock_interruptible(&rtc->ops_lock);
115 if (err)
116 return err;
117
118 err = __rtc_read_time(rtc, tm);
119 mutex_unlock(&rtc->ops_lock);
120
121 trace_rtc_read_time(rtc_tm_to_time64(tm), err);
122 return err;
123}
124EXPORT_SYMBOL_GPL(rtc_read_time);
125
126int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
127{
128 int err, uie;
129
130 err = rtc_valid_tm(tm);
131 if (err != 0)
132 return err;
133
134 err = rtc_valid_range(rtc, tm);
135 if (err)
136 return err;
137
138 rtc_subtract_offset(rtc, tm);
139
140#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
141 uie = rtc->uie_rtctimer.enabled || rtc->uie_irq_active;
142#else
143 uie = rtc->uie_rtctimer.enabled;
144#endif
145 if (uie) {
146 err = rtc_update_irq_enable(rtc, 0);
147 if (err)
148 return err;
149 }
150
151 err = mutex_lock_interruptible(&rtc->ops_lock);
152 if (err)
153 return err;
154
155 if (!rtc->ops)
156 err = -ENODEV;
157 else if (rtc->ops->set_time)
158 err = rtc->ops->set_time(rtc->dev.parent, tm);
159 else
160 err = -EINVAL;
161
162 pm_stay_awake(rtc->dev.parent);
163 mutex_unlock(&rtc->ops_lock);
164
165 schedule_work(&rtc->irqwork);
166
167 if (uie) {
168 err = rtc_update_irq_enable(rtc, 1);
169 if (err)
170 return err;
171 }
172
173 trace_rtc_set_time(rtc_tm_to_time64(tm), err);
174 return err;
175}
176EXPORT_SYMBOL_GPL(rtc_set_time);
177
178static int rtc_read_alarm_internal(struct rtc_device *rtc,
179 struct rtc_wkalrm *alarm)
180{
181 int err;
182
183 err = mutex_lock_interruptible(&rtc->ops_lock);
184 if (err)
185 return err;
186
187 if (!rtc->ops) {
188 err = -ENODEV;
189 } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
190 err = -EINVAL;
191 } else {
192 alarm->enabled = 0;
193 alarm->pending = 0;
194 alarm->time.tm_sec = -1;
195 alarm->time.tm_min = -1;
196 alarm->time.tm_hour = -1;
197 alarm->time.tm_mday = -1;
198 alarm->time.tm_mon = -1;
199 alarm->time.tm_year = -1;
200 alarm->time.tm_wday = -1;
201 alarm->time.tm_yday = -1;
202 alarm->time.tm_isdst = -1;
203 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
204 }
205
206 mutex_unlock(&rtc->ops_lock);
207
208 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
209 return err;
210}
211
212int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
213{
214 int err;
215 struct rtc_time before, now;
216 int first_time = 1;
217 time64_t t_now, t_alm;
218 enum { none, day, month, year } missing = none;
219 unsigned int days;
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263 err = rtc_read_time(rtc, &before);
264 if (err < 0)
265 return err;
266 do {
267 if (!first_time)
268 memcpy(&before, &now, sizeof(struct rtc_time));
269 first_time = 0;
270
271
272 err = rtc_read_alarm_internal(rtc, alarm);
273 if (err)
274 return err;
275
276
277 if (rtc_valid_tm(&alarm->time) == 0) {
278 rtc_add_offset(rtc, &alarm->time);
279 return 0;
280 }
281
282
283 err = rtc_read_time(rtc, &now);
284 if (err < 0)
285 return err;
286
287
288 } while (before.tm_min != now.tm_min ||
289 before.tm_hour != now.tm_hour ||
290 before.tm_mon != now.tm_mon ||
291 before.tm_year != now.tm_year);
292
293
294
295
296 if (alarm->time.tm_sec == -1)
297 alarm->time.tm_sec = now.tm_sec;
298 if (alarm->time.tm_min == -1)
299 alarm->time.tm_min = now.tm_min;
300 if (alarm->time.tm_hour == -1)
301 alarm->time.tm_hour = now.tm_hour;
302
303
304 if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
305 alarm->time.tm_mday = now.tm_mday;
306 missing = day;
307 }
308 if ((unsigned int)alarm->time.tm_mon >= 12) {
309 alarm->time.tm_mon = now.tm_mon;
310 if (missing == none)
311 missing = month;
312 }
313 if (alarm->time.tm_year == -1) {
314 alarm->time.tm_year = now.tm_year;
315 if (missing == none)
316 missing = year;
317 }
318
319
320
321
322 err = rtc_valid_tm(&alarm->time);
323 if (err)
324 goto done;
325
326
327 t_now = rtc_tm_to_time64(&now);
328 t_alm = rtc_tm_to_time64(&alarm->time);
329 if (t_now < t_alm)
330 goto done;
331
332 switch (missing) {
333
334
335
336
337
338 case day:
339 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
340 t_alm += 24 * 60 * 60;
341 rtc_time64_to_tm(t_alm, &alarm->time);
342 break;
343
344
345
346
347
348
349 case month:
350 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
351 do {
352 if (alarm->time.tm_mon < 11) {
353 alarm->time.tm_mon++;
354 } else {
355 alarm->time.tm_mon = 0;
356 alarm->time.tm_year++;
357 }
358 days = rtc_month_days(alarm->time.tm_mon,
359 alarm->time.tm_year);
360 } while (days < alarm->time.tm_mday);
361 break;
362
363
364 case year:
365 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
366 do {
367 alarm->time.tm_year++;
368 } while (!is_leap_year(alarm->time.tm_year + 1900) &&
369 rtc_valid_tm(&alarm->time) != 0);
370 break;
371
372 default:
373 dev_warn(&rtc->dev, "alarm rollover not handled\n");
374 }
375
376 err = rtc_valid_tm(&alarm->time);
377
378done:
379 if (err)
380 dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
381 &alarm->time);
382
383 return err;
384}
385
386int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
387{
388 int err;
389
390 err = mutex_lock_interruptible(&rtc->ops_lock);
391 if (err)
392 return err;
393 if (!rtc->ops) {
394 err = -ENODEV;
395 } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) {
396 err = -EINVAL;
397 } else {
398 memset(alarm, 0, sizeof(struct rtc_wkalrm));
399 alarm->enabled = rtc->aie_timer.enabled;
400 alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
401 }
402 mutex_unlock(&rtc->ops_lock);
403
404 trace_rtc_read_alarm(rtc_tm_to_time64(&alarm->time), err);
405 return err;
406}
407EXPORT_SYMBOL_GPL(rtc_read_alarm);
408
409static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
410{
411 struct rtc_time tm;
412 time64_t now, scheduled;
413 int err;
414
415 err = rtc_valid_tm(&alarm->time);
416 if (err)
417 return err;
418
419 scheduled = rtc_tm_to_time64(&alarm->time);
420
421
422 err = __rtc_read_time(rtc, &tm);
423 if (err)
424 return err;
425 now = rtc_tm_to_time64(&tm);
426 if (scheduled <= now)
427 return -ETIME;
428
429
430
431
432
433
434
435 rtc_subtract_offset(rtc, &alarm->time);
436
437 if (!rtc->ops)
438 err = -ENODEV;
439 else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
440 err = -EINVAL;
441 else
442 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
443
444 trace_rtc_set_alarm(rtc_tm_to_time64(&alarm->time), err);
445 return err;
446}
447
448int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
449{
450 int err;
451
452 if (!rtc->ops)
453 return -ENODEV;
454 else if (!test_bit(RTC_FEATURE_ALARM, rtc->features))
455 return -EINVAL;
456
457 err = rtc_valid_tm(&alarm->time);
458 if (err != 0)
459 return err;
460
461 err = rtc_valid_range(rtc, &alarm->time);
462 if (err)
463 return err;
464
465 err = mutex_lock_interruptible(&rtc->ops_lock);
466 if (err)
467 return err;
468 if (rtc->aie_timer.enabled)
469 rtc_timer_remove(rtc, &rtc->aie_timer);
470
471 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
472 rtc->aie_timer.period = 0;
473 if (alarm->enabled)
474 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
475
476 mutex_unlock(&rtc->ops_lock);
477
478 return err;
479}
480EXPORT_SYMBOL_GPL(rtc_set_alarm);
481
482
483int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
484{
485 int err;
486 struct rtc_time now;
487
488 err = rtc_valid_tm(&alarm->time);
489 if (err != 0)
490 return err;
491
492 err = rtc_read_time(rtc, &now);
493 if (err)
494 return err;
495
496 err = mutex_lock_interruptible(&rtc->ops_lock);
497 if (err)
498 return err;
499
500 rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
501 rtc->aie_timer.period = 0;
502
503
504 if (alarm->enabled && (rtc_tm_to_ktime(now) <
505 rtc->aie_timer.node.expires)) {
506 rtc->aie_timer.enabled = 1;
507 timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
508 trace_rtc_timer_enqueue(&rtc->aie_timer);
509 }
510 mutex_unlock(&rtc->ops_lock);
511 return err;
512}
513EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
514
515int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
516{
517 int err;
518
519 err = mutex_lock_interruptible(&rtc->ops_lock);
520 if (err)
521 return err;
522
523 if (rtc->aie_timer.enabled != enabled) {
524 if (enabled)
525 err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
526 else
527 rtc_timer_remove(rtc, &rtc->aie_timer);
528 }
529
530 if (err)
531 ;
532 else if (!rtc->ops)
533 err = -ENODEV;
534 else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
535 err = -EINVAL;
536 else
537 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
538
539 mutex_unlock(&rtc->ops_lock);
540
541 trace_rtc_alarm_irq_enable(enabled, err);
542 return err;
543}
544EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
545
546int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
547{
548 int err;
549
550 err = mutex_lock_interruptible(&rtc->ops_lock);
551 if (err)
552 return err;
553
554#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
555 if (enabled == 0 && rtc->uie_irq_active) {
556 mutex_unlock(&rtc->ops_lock);
557 return rtc_dev_update_irq_enable_emul(rtc, 0);
558 }
559#endif
560
561 if (rtc->uie_rtctimer.enabled == enabled)
562 goto out;
563
564 if (rtc->uie_unsupported || !test_bit(RTC_FEATURE_ALARM, rtc->features)) {
565 mutex_unlock(&rtc->ops_lock);
566#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
567 return rtc_dev_update_irq_enable_emul(rtc, enabled);
568#else
569 return -EINVAL;
570#endif
571 }
572
573 if (enabled) {
574 struct rtc_time tm;
575 ktime_t now, onesec;
576
577 err = __rtc_read_time(rtc, &tm);
578 if (err)
579 goto out;
580 onesec = ktime_set(1, 0);
581 now = rtc_tm_to_ktime(tm);
582 rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
583 rtc->uie_rtctimer.period = ktime_set(1, 0);
584 err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
585 } else {
586 rtc_timer_remove(rtc, &rtc->uie_rtctimer);
587 }
588
589out:
590 mutex_unlock(&rtc->ops_lock);
591
592 return err;
593}
594EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
595
596
597
598
599
600
601
602
603
604
605
606void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
607{
608 unsigned long flags;
609
610
611 spin_lock_irqsave(&rtc->irq_lock, flags);
612 rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF | mode);
613 spin_unlock_irqrestore(&rtc->irq_lock, flags);
614
615 wake_up_interruptible(&rtc->irq_queue);
616 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
617}
618
619
620
621
622
623
624
625void rtc_aie_update_irq(struct rtc_device *rtc)
626{
627 rtc_handle_legacy_irq(rtc, 1, RTC_AF);
628}
629
630
631
632
633
634
635
636void rtc_uie_update_irq(struct rtc_device *rtc)
637{
638 rtc_handle_legacy_irq(rtc, 1, RTC_UF);
639}
640
641
642
643
644
645
646
647
648
649enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
650{
651 struct rtc_device *rtc;
652 ktime_t period;
653 u64 count;
654
655 rtc = container_of(timer, struct rtc_device, pie_timer);
656
657 period = NSEC_PER_SEC / rtc->irq_freq;
658 count = hrtimer_forward_now(timer, period);
659
660 rtc_handle_legacy_irq(rtc, count, RTC_PF);
661
662 return HRTIMER_RESTART;
663}
664
665
666
667
668
669
670
671
672void rtc_update_irq(struct rtc_device *rtc,
673 unsigned long num, unsigned long events)
674{
675 if (IS_ERR_OR_NULL(rtc))
676 return;
677
678 pm_stay_awake(rtc->dev.parent);
679 schedule_work(&rtc->irqwork);
680}
681EXPORT_SYMBOL_GPL(rtc_update_irq);
682
683struct rtc_device *rtc_class_open(const char *name)
684{
685 struct device *dev;
686 struct rtc_device *rtc = NULL;
687
688 dev = class_find_device_by_name(rtc_class, name);
689 if (dev)
690 rtc = to_rtc_device(dev);
691
692 if (rtc) {
693 if (!try_module_get(rtc->owner)) {
694 put_device(dev);
695 rtc = NULL;
696 }
697 }
698
699 return rtc;
700}
701EXPORT_SYMBOL_GPL(rtc_class_open);
702
703void rtc_class_close(struct rtc_device *rtc)
704{
705 module_put(rtc->owner);
706 put_device(&rtc->dev);
707}
708EXPORT_SYMBOL_GPL(rtc_class_close);
709
710static int rtc_update_hrtimer(struct rtc_device *rtc, int enabled)
711{
712
713
714
715
716
717
718
719
720
721
722 if (hrtimer_try_to_cancel(&rtc->pie_timer) < 0)
723 return -1;
724
725 if (enabled) {
726 ktime_t period = NSEC_PER_SEC / rtc->irq_freq;
727
728 hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
729 }
730 return 0;
731}
732
733
734
735
736
737
738
739
740
741
742int rtc_irq_set_state(struct rtc_device *rtc, int enabled)
743{
744 int err = 0;
745
746 while (rtc_update_hrtimer(rtc, enabled) < 0)
747 cpu_relax();
748
749 rtc->pie_enabled = enabled;
750
751 trace_rtc_irq_set_state(enabled, err);
752 return err;
753}
754
755
756
757
758
759
760
761
762
763
764int rtc_irq_set_freq(struct rtc_device *rtc, int freq)
765{
766 int err = 0;
767
768 if (freq <= 0 || freq > RTC_MAX_FREQ)
769 return -EINVAL;
770
771 rtc->irq_freq = freq;
772 while (rtc->pie_enabled && rtc_update_hrtimer(rtc, 1) < 0)
773 cpu_relax();
774
775 trace_rtc_irq_set_freq(freq, err);
776 return err;
777}
778
779
780
781
782
783
784
785
786
787
788
789
790
791static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
792{
793 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
794 struct rtc_time tm;
795 ktime_t now;
796
797 timer->enabled = 1;
798 __rtc_read_time(rtc, &tm);
799 now = rtc_tm_to_ktime(tm);
800
801
802 while (next) {
803 if (next->expires >= now)
804 break;
805 next = timerqueue_iterate_next(next);
806 }
807
808 timerqueue_add(&rtc->timerqueue, &timer->node);
809 trace_rtc_timer_enqueue(timer);
810 if (!next || ktime_before(timer->node.expires, next->expires)) {
811 struct rtc_wkalrm alarm;
812 int err;
813
814 alarm.time = rtc_ktime_to_tm(timer->node.expires);
815 alarm.enabled = 1;
816 err = __rtc_set_alarm(rtc, &alarm);
817 if (err == -ETIME) {
818 pm_stay_awake(rtc->dev.parent);
819 schedule_work(&rtc->irqwork);
820 } else if (err) {
821 timerqueue_del(&rtc->timerqueue, &timer->node);
822 trace_rtc_timer_dequeue(timer);
823 timer->enabled = 0;
824 return err;
825 }
826 }
827 return 0;
828}
829
830static void rtc_alarm_disable(struct rtc_device *rtc)
831{
832 if (!rtc->ops || !test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->alarm_irq_enable)
833 return;
834
835 rtc->ops->alarm_irq_enable(rtc->dev.parent, false);
836 trace_rtc_alarm_irq_enable(0, 0);
837}
838
839
840
841
842
843
844
845
846
847
848
849
850
851static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
852{
853 struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
854
855 timerqueue_del(&rtc->timerqueue, &timer->node);
856 trace_rtc_timer_dequeue(timer);
857 timer->enabled = 0;
858 if (next == &timer->node) {
859 struct rtc_wkalrm alarm;
860 int err;
861
862 next = timerqueue_getnext(&rtc->timerqueue);
863 if (!next) {
864 rtc_alarm_disable(rtc);
865 return;
866 }
867 alarm.time = rtc_ktime_to_tm(next->expires);
868 alarm.enabled = 1;
869 err = __rtc_set_alarm(rtc, &alarm);
870 if (err == -ETIME) {
871 pm_stay_awake(rtc->dev.parent);
872 schedule_work(&rtc->irqwork);
873 }
874 }
875}
876
877
878
879
880
881
882
883
884
885
886void rtc_timer_do_work(struct work_struct *work)
887{
888 struct rtc_timer *timer;
889 struct timerqueue_node *next;
890 ktime_t now;
891 struct rtc_time tm;
892
893 struct rtc_device *rtc =
894 container_of(work, struct rtc_device, irqwork);
895
896 mutex_lock(&rtc->ops_lock);
897again:
898 __rtc_read_time(rtc, &tm);
899 now = rtc_tm_to_ktime(tm);
900 while ((next = timerqueue_getnext(&rtc->timerqueue))) {
901 if (next->expires > now)
902 break;
903
904
905 timer = container_of(next, struct rtc_timer, node);
906 timerqueue_del(&rtc->timerqueue, &timer->node);
907 trace_rtc_timer_dequeue(timer);
908 timer->enabled = 0;
909 if (timer->func)
910 timer->func(timer->rtc);
911
912 trace_rtc_timer_fired(timer);
913
914 if (ktime_to_ns(timer->period)) {
915 timer->node.expires = ktime_add(timer->node.expires,
916 timer->period);
917 timer->enabled = 1;
918 timerqueue_add(&rtc->timerqueue, &timer->node);
919 trace_rtc_timer_enqueue(timer);
920 }
921 }
922
923
924 if (next) {
925 struct rtc_wkalrm alarm;
926 int err;
927 int retry = 3;
928
929 alarm.time = rtc_ktime_to_tm(next->expires);
930 alarm.enabled = 1;
931reprogram:
932 err = __rtc_set_alarm(rtc, &alarm);
933 if (err == -ETIME) {
934 goto again;
935 } else if (err) {
936 if (retry-- > 0)
937 goto reprogram;
938
939 timer = container_of(next, struct rtc_timer, node);
940 timerqueue_del(&rtc->timerqueue, &timer->node);
941 trace_rtc_timer_dequeue(timer);
942 timer->enabled = 0;
943 dev_err(&rtc->dev, "__rtc_set_alarm: err=%d\n", err);
944 goto again;
945 }
946 } else {
947 rtc_alarm_disable(rtc);
948 }
949
950 pm_relax(rtc->dev.parent);
951 mutex_unlock(&rtc->ops_lock);
952}
953
954
955
956
957
958
959
960
961void rtc_timer_init(struct rtc_timer *timer, void (*f)(struct rtc_device *r),
962 struct rtc_device *rtc)
963{
964 timerqueue_init(&timer->node);
965 timer->enabled = 0;
966 timer->func = f;
967 timer->rtc = rtc;
968}
969
970
971
972
973
974
975
976
977
978int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer *timer,
979 ktime_t expires, ktime_t period)
980{
981 int ret = 0;
982
983 mutex_lock(&rtc->ops_lock);
984 if (timer->enabled)
985 rtc_timer_remove(rtc, timer);
986
987 timer->node.expires = expires;
988 timer->period = period;
989
990 ret = rtc_timer_enqueue(rtc, timer);
991
992 mutex_unlock(&rtc->ops_lock);
993 return ret;
994}
995
996
997
998
999
1000
1001
1002void rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer *timer)
1003{
1004 mutex_lock(&rtc->ops_lock);
1005 if (timer->enabled)
1006 rtc_timer_remove(rtc, timer);
1007 mutex_unlock(&rtc->ops_lock);
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021int rtc_read_offset(struct rtc_device *rtc, long *offset)
1022{
1023 int ret;
1024
1025 if (!rtc->ops)
1026 return -ENODEV;
1027
1028 if (!rtc->ops->read_offset)
1029 return -EINVAL;
1030
1031 mutex_lock(&rtc->ops_lock);
1032 ret = rtc->ops->read_offset(rtc->dev.parent, offset);
1033 mutex_unlock(&rtc->ops_lock);
1034
1035 trace_rtc_read_offset(*offset, ret);
1036 return ret;
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056int rtc_set_offset(struct rtc_device *rtc, long offset)
1057{
1058 int ret;
1059
1060 if (!rtc->ops)
1061 return -ENODEV;
1062
1063 if (!rtc->ops->set_offset)
1064 return -EINVAL;
1065
1066 mutex_lock(&rtc->ops_lock);
1067 ret = rtc->ops->set_offset(rtc->dev.parent, offset);
1068 mutex_unlock(&rtc->ops_lock);
1069
1070 trace_rtc_set_offset(offset, ret);
1071 return ret;
1072}
1073