1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/rtc.h>
15#include <linux/log2.h>
16
17int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
18{
19 int err;
20
21 err = mutex_lock_interruptible(&rtc->ops_lock);
22 if (err)
23 return err;
24
25 if (!rtc->ops)
26 err = -ENODEV;
27 else if (!rtc->ops->read_time)
28 err = -EINVAL;
29 else {
30 memset(tm, 0, sizeof(struct rtc_time));
31 err = rtc->ops->read_time(rtc->dev.parent, tm);
32 }
33
34 mutex_unlock(&rtc->ops_lock);
35 return err;
36}
37EXPORT_SYMBOL_GPL(rtc_read_time);
38
39int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
40{
41 int err;
42
43 err = rtc_valid_tm(tm);
44 if (err != 0)
45 return err;
46
47 err = mutex_lock_interruptible(&rtc->ops_lock);
48 if (err)
49 return err;
50
51 if (!rtc->ops)
52 err = -ENODEV;
53 else if (rtc->ops->set_time)
54 err = rtc->ops->set_time(rtc->dev.parent, tm);
55 else if (rtc->ops->set_mmss) {
56 unsigned long secs;
57 err = rtc_tm_to_time(tm, &secs);
58 if (err == 0)
59 err = rtc->ops->set_mmss(rtc->dev.parent, secs);
60 } else
61 err = -EINVAL;
62
63 mutex_unlock(&rtc->ops_lock);
64 return err;
65}
66EXPORT_SYMBOL_GPL(rtc_set_time);
67
68int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
69{
70 int err;
71
72 err = mutex_lock_interruptible(&rtc->ops_lock);
73 if (err)
74 return err;
75
76 if (!rtc->ops)
77 err = -ENODEV;
78 else if (rtc->ops->set_mmss)
79 err = rtc->ops->set_mmss(rtc->dev.parent, secs);
80 else if (rtc->ops->read_time && rtc->ops->set_time) {
81 struct rtc_time new, old;
82
83 err = rtc->ops->read_time(rtc->dev.parent, &old);
84 if (err == 0) {
85 rtc_time_to_tm(secs, &new);
86
87
88
89
90
91
92
93 if (!((old.tm_hour == 23 && old.tm_min == 59) ||
94 (new.tm_hour == 23 && new.tm_min == 59)))
95 err = rtc->ops->set_time(rtc->dev.parent,
96 &new);
97 }
98 }
99 else
100 err = -EINVAL;
101
102 mutex_unlock(&rtc->ops_lock);
103
104 return err;
105}
106EXPORT_SYMBOL_GPL(rtc_set_mmss);
107
108static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
109{
110 int err;
111
112 err = mutex_lock_interruptible(&rtc->ops_lock);
113 if (err)
114 return err;
115
116 if (rtc->ops == NULL)
117 err = -ENODEV;
118 else if (!rtc->ops->read_alarm)
119 err = -EINVAL;
120 else {
121 memset(alarm, 0, sizeof(struct rtc_wkalrm));
122 err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
123 }
124
125 mutex_unlock(&rtc->ops_lock);
126 return err;
127}
128
129int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
130{
131 int err;
132 struct rtc_time before, now;
133 int first_time = 1;
134 unsigned long t_now, t_alm;
135 enum { none, day, month, year } missing = none;
136 unsigned days;
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180 err = rtc_read_time(rtc, &before);
181 if (err < 0)
182 return err;
183 do {
184 if (!first_time)
185 memcpy(&before, &now, sizeof(struct rtc_time));
186 first_time = 0;
187
188
189 err = rtc_read_alarm_internal(rtc, alarm);
190 if (err)
191 return err;
192 if (!alarm->enabled)
193 return 0;
194
195
196 if (rtc_valid_tm(&alarm->time) == 0)
197 return 0;
198
199
200 err = rtc_read_time(rtc, &now);
201 if (err < 0)
202 return err;
203
204
205 } while ( before.tm_min != now.tm_min
206 || before.tm_hour != now.tm_hour
207 || before.tm_mon != now.tm_mon
208 || before.tm_year != now.tm_year);
209
210
211
212
213 if (alarm->time.tm_sec == -1)
214 alarm->time.tm_sec = now.tm_sec;
215 if (alarm->time.tm_min == -1)
216 alarm->time.tm_min = now.tm_min;
217 if (alarm->time.tm_hour == -1)
218 alarm->time.tm_hour = now.tm_hour;
219
220
221 if (alarm->time.tm_mday == -1) {
222 alarm->time.tm_mday = now.tm_mday;
223 missing = day;
224 }
225 if (alarm->time.tm_mon == -1) {
226 alarm->time.tm_mon = now.tm_mon;
227 if (missing == none)
228 missing = month;
229 }
230 if (alarm->time.tm_year == -1) {
231 alarm->time.tm_year = now.tm_year;
232 if (missing == none)
233 missing = year;
234 }
235
236
237 rtc_tm_to_time(&now, &t_now);
238 rtc_tm_to_time(&alarm->time, &t_alm);
239 if (t_now < t_alm)
240 goto done;
241
242 switch (missing) {
243
244
245
246
247
248
249 case day:
250 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
251 t_alm += 24 * 60 * 60;
252 rtc_time_to_tm(t_alm, &alarm->time);
253 break;
254
255
256
257
258
259
260 case month:
261 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
262 do {
263 if (alarm->time.tm_mon < 11)
264 alarm->time.tm_mon++;
265 else {
266 alarm->time.tm_mon = 0;
267 alarm->time.tm_year++;
268 }
269 days = rtc_month_days(alarm->time.tm_mon,
270 alarm->time.tm_year);
271 } while (days < alarm->time.tm_mday);
272 break;
273
274
275 case year:
276 dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
277 do {
278 alarm->time.tm_year++;
279 } while (rtc_valid_tm(&alarm->time) != 0);
280 break;
281
282 default:
283 dev_warn(&rtc->dev, "alarm rollover not handled\n");
284 }
285
286done:
287 return 0;
288}
289EXPORT_SYMBOL_GPL(rtc_read_alarm);
290
291int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
292{
293 int err;
294
295 err = rtc_valid_tm(&alarm->time);
296 if (err != 0)
297 return err;
298
299 err = mutex_lock_interruptible(&rtc->ops_lock);
300 if (err)
301 return err;
302
303 if (!rtc->ops)
304 err = -ENODEV;
305 else if (!rtc->ops->set_alarm)
306 err = -EINVAL;
307 else
308 err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
309
310 mutex_unlock(&rtc->ops_lock);
311 return err;
312}
313EXPORT_SYMBOL_GPL(rtc_set_alarm);
314
315int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
316{
317 int err = mutex_lock_interruptible(&rtc->ops_lock);
318 if (err)
319 return err;
320
321 if (!rtc->ops)
322 err = -ENODEV;
323 else if (!rtc->ops->alarm_irq_enable)
324 err = -EINVAL;
325 else
326 err = rtc->ops->alarm_irq_enable(rtc->dev.parent, enabled);
327
328 mutex_unlock(&rtc->ops_lock);
329 return err;
330}
331EXPORT_SYMBOL_GPL(rtc_alarm_irq_enable);
332
333int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
334{
335 int err = mutex_lock_interruptible(&rtc->ops_lock);
336 if (err)
337 return err;
338
339#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
340 if (enabled == 0 && rtc->uie_irq_active) {
341 mutex_unlock(&rtc->ops_lock);
342 return rtc_dev_update_irq_enable_emul(rtc, enabled);
343 }
344#endif
345
346 if (!rtc->ops)
347 err = -ENODEV;
348 else if (!rtc->ops->update_irq_enable)
349 err = -EINVAL;
350 else
351 err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
352
353 mutex_unlock(&rtc->ops_lock);
354
355#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
356
357
358
359
360
361
362 if (err == -EINVAL)
363 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
364#endif
365 return err;
366}
367EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
368
369
370
371
372
373
374
375
376void rtc_update_irq(struct rtc_device *rtc,
377 unsigned long num, unsigned long events)
378{
379 spin_lock(&rtc->irq_lock);
380 rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
381 spin_unlock(&rtc->irq_lock);
382
383 spin_lock(&rtc->irq_task_lock);
384 if (rtc->irq_task)
385 rtc->irq_task->func(rtc->irq_task->private_data);
386 spin_unlock(&rtc->irq_task_lock);
387
388 wake_up_interruptible(&rtc->irq_queue);
389 kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
390}
391EXPORT_SYMBOL_GPL(rtc_update_irq);
392
393static int __rtc_match(struct device *dev, void *data)
394{
395 char *name = (char *)data;
396
397 if (strcmp(dev_name(dev), name) == 0)
398 return 1;
399 return 0;
400}
401
402struct rtc_device *rtc_class_open(char *name)
403{
404 struct device *dev;
405 struct rtc_device *rtc = NULL;
406
407 dev = class_find_device(rtc_class, NULL, name, __rtc_match);
408 if (dev)
409 rtc = to_rtc_device(dev);
410
411 if (rtc) {
412 if (!try_module_get(rtc->owner)) {
413 put_device(dev);
414 rtc = NULL;
415 }
416 }
417
418 return rtc;
419}
420EXPORT_SYMBOL_GPL(rtc_class_open);
421
422void rtc_class_close(struct rtc_device *rtc)
423{
424 module_put(rtc->owner);
425 put_device(&rtc->dev);
426}
427EXPORT_SYMBOL_GPL(rtc_class_close);
428
429int rtc_irq_register(struct rtc_device *rtc, struct rtc_task *task)
430{
431 int retval = -EBUSY;
432
433 if (task == NULL || task->func == NULL)
434 return -EINVAL;
435
436
437 if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
438 return -EBUSY;
439
440 spin_lock_irq(&rtc->irq_task_lock);
441 if (rtc->irq_task == NULL) {
442 rtc->irq_task = task;
443 retval = 0;
444 }
445 spin_unlock_irq(&rtc->irq_task_lock);
446
447 clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags);
448
449 return retval;
450}
451EXPORT_SYMBOL_GPL(rtc_irq_register);
452
453void rtc_irq_unregister(struct rtc_device *rtc, struct rtc_task *task)
454{
455 spin_lock_irq(&rtc->irq_task_lock);
456 if (rtc->irq_task == task)
457 rtc->irq_task = NULL;
458 spin_unlock_irq(&rtc->irq_task_lock);
459}
460EXPORT_SYMBOL_GPL(rtc_irq_unregister);
461
462
463
464
465
466
467
468
469
470
471
472int rtc_irq_set_state(struct rtc_device *rtc, struct rtc_task *task, int enabled)
473{
474 int err = 0;
475 unsigned long flags;
476
477 if (rtc->ops->irq_set_state == NULL)
478 return -ENXIO;
479
480 spin_lock_irqsave(&rtc->irq_task_lock, flags);
481 if (rtc->irq_task != NULL && task == NULL)
482 err = -EBUSY;
483 if (rtc->irq_task != task)
484 err = -EACCES;
485 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
486
487 if (err == 0)
488 err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
489
490 return err;
491}
492EXPORT_SYMBOL_GPL(rtc_irq_set_state);
493
494
495
496
497
498
499
500
501
502
503
504int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
505{
506 int err = 0;
507 unsigned long flags;
508
509 if (rtc->ops->irq_set_freq == NULL)
510 return -ENXIO;
511
512 spin_lock_irqsave(&rtc->irq_task_lock, flags);
513 if (rtc->irq_task != NULL && task == NULL)
514 err = -EBUSY;
515 if (rtc->irq_task != task)
516 err = -EACCES;
517 spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
518
519 if (err == 0) {
520 err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
521 if (err == 0)
522 rtc->irq_freq = freq;
523 }
524 return err;
525}
526EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
527