1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/workqueue.h>
26#include <linux/capability.h>
27#include <linux/list.h>
28#include <linux/mutex.h>
29#include <linux/rfkill.h>
30#include <linux/sched.h>
31#include <linux/spinlock.h>
32#include <linux/miscdevice.h>
33#include <linux/wait.h>
34#include <linux/poll.h>
35#include <linux/fs.h>
36#include <linux/slab.h>
37
38#include "rfkill.h"
39
40#define POLL_INTERVAL (5 * HZ)
41
42#define RFKILL_BLOCK_HW BIT(0)
43#define RFKILL_BLOCK_SW BIT(1)
44#define RFKILL_BLOCK_SW_PREV BIT(2)
45#define RFKILL_BLOCK_ANY (RFKILL_BLOCK_HW |\
46 RFKILL_BLOCK_SW |\
47 RFKILL_BLOCK_SW_PREV)
48#define RFKILL_BLOCK_SW_SETCALL BIT(31)
49
50struct rfkill {
51 spinlock_t lock;
52
53 const char *name;
54 enum rfkill_type type;
55
56 unsigned long state;
57
58 u32 idx;
59
60 bool registered;
61 bool persistent;
62
63 const struct rfkill_ops *ops;
64 void *data;
65
66#ifdef CONFIG_RFKILL_LEDS
67 struct led_trigger led_trigger;
68 const char *ledtrigname;
69#endif
70
71 struct device dev;
72 struct list_head node;
73
74 struct delayed_work poll_work;
75 struct work_struct uevent_work;
76 struct work_struct sync_work;
77};
78#define to_rfkill(d) container_of(d, struct rfkill, dev)
79
80struct rfkill_int_event {
81 struct list_head list;
82 struct rfkill_event ev;
83};
84
85struct rfkill_data {
86 struct list_head list;
87 struct list_head events;
88 struct mutex mtx;
89 wait_queue_head_t read_wait;
90 bool input_handler;
91};
92
93
94MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
95MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
96MODULE_DESCRIPTION("RF switch support");
97MODULE_LICENSE("GPL");
98
99
100
101
102
103
104
105
106
107
108
109
110
111static LIST_HEAD(rfkill_list);
112static DEFINE_MUTEX(rfkill_global_mutex);
113static LIST_HEAD(rfkill_fds);
114
115static unsigned int rfkill_default_state = 1;
116module_param_named(default_state, rfkill_default_state, uint, 0444);
117MODULE_PARM_DESC(default_state,
118 "Default initial state for all radio types, 0 = radio off");
119
120static struct {
121 bool cur, sav;
122} rfkill_global_states[NUM_RFKILL_TYPES];
123
124static bool rfkill_epo_lock_active;
125
126
127#ifdef CONFIG_RFKILL_LEDS
128static void rfkill_led_trigger_event(struct rfkill *rfkill)
129{
130 struct led_trigger *trigger;
131
132 if (!rfkill->registered)
133 return;
134
135 trigger = &rfkill->led_trigger;
136
137 if (rfkill->state & RFKILL_BLOCK_ANY)
138 led_trigger_event(trigger, LED_OFF);
139 else
140 led_trigger_event(trigger, LED_FULL);
141}
142
143static void rfkill_led_trigger_activate(struct led_classdev *led)
144{
145 struct rfkill *rfkill;
146
147 rfkill = container_of(led->trigger, struct rfkill, led_trigger);
148
149 rfkill_led_trigger_event(rfkill);
150}
151
152const char *rfkill_get_led_trigger_name(struct rfkill *rfkill)
153{
154 return rfkill->led_trigger.name;
155}
156EXPORT_SYMBOL(rfkill_get_led_trigger_name);
157
158void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name)
159{
160 BUG_ON(!rfkill);
161
162 rfkill->ledtrigname = name;
163}
164EXPORT_SYMBOL(rfkill_set_led_trigger_name);
165
166static int rfkill_led_trigger_register(struct rfkill *rfkill)
167{
168 rfkill->led_trigger.name = rfkill->ledtrigname
169 ? : dev_name(&rfkill->dev);
170 rfkill->led_trigger.activate = rfkill_led_trigger_activate;
171 return led_trigger_register(&rfkill->led_trigger);
172}
173
174static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
175{
176 led_trigger_unregister(&rfkill->led_trigger);
177}
178#else
179static void rfkill_led_trigger_event(struct rfkill *rfkill)
180{
181}
182
183static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
184{
185 return 0;
186}
187
188static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
189{
190}
191#endif
192
193static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
194 enum rfkill_operation op)
195{
196 unsigned long flags;
197
198 ev->idx = rfkill->idx;
199 ev->type = rfkill->type;
200 ev->op = op;
201
202 spin_lock_irqsave(&rfkill->lock, flags);
203 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW);
204 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW |
205 RFKILL_BLOCK_SW_PREV));
206 spin_unlock_irqrestore(&rfkill->lock, flags);
207}
208
209static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
210{
211 struct rfkill_data *data;
212 struct rfkill_int_event *ev;
213
214 list_for_each_entry(data, &rfkill_fds, list) {
215 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
216 if (!ev)
217 continue;
218 rfkill_fill_event(&ev->ev, rfkill, op);
219 mutex_lock(&data->mtx);
220 list_add_tail(&ev->list, &data->events);
221 mutex_unlock(&data->mtx);
222 wake_up_interruptible(&data->read_wait);
223 }
224}
225
226static void rfkill_event(struct rfkill *rfkill)
227{
228 if (!rfkill->registered)
229 return;
230
231 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
232
233
234 rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
235}
236
237static bool __rfkill_set_hw_state(struct rfkill *rfkill,
238 bool blocked, bool *change)
239{
240 unsigned long flags;
241 bool prev, any;
242
243 BUG_ON(!rfkill);
244
245 spin_lock_irqsave(&rfkill->lock, flags);
246 prev = !!(rfkill->state & RFKILL_BLOCK_HW);
247 if (blocked)
248 rfkill->state |= RFKILL_BLOCK_HW;
249 else
250 rfkill->state &= ~RFKILL_BLOCK_HW;
251 *change = prev != blocked;
252 any = rfkill->state & RFKILL_BLOCK_ANY;
253 spin_unlock_irqrestore(&rfkill->lock, flags);
254
255 rfkill_led_trigger_event(rfkill);
256
257 return any;
258}
259
260
261
262
263
264
265
266
267
268
269static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
270{
271 unsigned long flags;
272 int err;
273
274 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
275 return;
276
277
278
279
280
281
282 if (rfkill->ops->query)
283 rfkill->ops->query(rfkill, rfkill->data);
284
285 spin_lock_irqsave(&rfkill->lock, flags);
286 if (rfkill->state & RFKILL_BLOCK_SW)
287 rfkill->state |= RFKILL_BLOCK_SW_PREV;
288 else
289 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
290
291 if (blocked)
292 rfkill->state |= RFKILL_BLOCK_SW;
293 else
294 rfkill->state &= ~RFKILL_BLOCK_SW;
295
296 rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
297 spin_unlock_irqrestore(&rfkill->lock, flags);
298
299 err = rfkill->ops->set_block(rfkill->data, blocked);
300
301 spin_lock_irqsave(&rfkill->lock, flags);
302 if (err) {
303
304
305
306
307
308 if (rfkill->state & RFKILL_BLOCK_SW_PREV)
309 rfkill->state |= RFKILL_BLOCK_SW;
310 else
311 rfkill->state &= ~RFKILL_BLOCK_SW;
312 }
313 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL;
314 rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
315 spin_unlock_irqrestore(&rfkill->lock, flags);
316
317 rfkill_led_trigger_event(rfkill);
318 rfkill_event(rfkill);
319}
320
321#ifdef CONFIG_RFKILL_INPUT
322static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
323
324
325
326
327
328
329
330
331
332
333
334
335static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
336{
337 struct rfkill *rfkill;
338
339 rfkill_global_states[type].cur = blocked;
340 list_for_each_entry(rfkill, &rfkill_list, node) {
341 if (rfkill->type != type)
342 continue;
343
344 rfkill_set_block(rfkill, blocked);
345 }
346}
347
348
349
350
351
352
353
354
355
356
357
358void rfkill_switch_all(enum rfkill_type type, bool blocked)
359{
360 if (atomic_read(&rfkill_input_disabled))
361 return;
362
363 mutex_lock(&rfkill_global_mutex);
364
365 if (!rfkill_epo_lock_active)
366 __rfkill_switch_all(type, blocked);
367
368 mutex_unlock(&rfkill_global_mutex);
369}
370
371
372
373
374
375
376
377
378
379
380void rfkill_epo(void)
381{
382 struct rfkill *rfkill;
383 int i;
384
385 if (atomic_read(&rfkill_input_disabled))
386 return;
387
388 mutex_lock(&rfkill_global_mutex);
389
390 rfkill_epo_lock_active = true;
391 list_for_each_entry(rfkill, &rfkill_list, node)
392 rfkill_set_block(rfkill, true);
393
394 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
395 rfkill_global_states[i].sav = rfkill_global_states[i].cur;
396 rfkill_global_states[i].cur = true;
397 }
398
399 mutex_unlock(&rfkill_global_mutex);
400}
401
402
403
404
405
406
407
408
409void rfkill_restore_states(void)
410{
411 int i;
412
413 if (atomic_read(&rfkill_input_disabled))
414 return;
415
416 mutex_lock(&rfkill_global_mutex);
417
418 rfkill_epo_lock_active = false;
419 for (i = 0; i < NUM_RFKILL_TYPES; i++)
420 __rfkill_switch_all(i, rfkill_global_states[i].sav);
421 mutex_unlock(&rfkill_global_mutex);
422}
423
424
425
426
427
428
429
430void rfkill_remove_epo_lock(void)
431{
432 if (atomic_read(&rfkill_input_disabled))
433 return;
434
435 mutex_lock(&rfkill_global_mutex);
436 rfkill_epo_lock_active = false;
437 mutex_unlock(&rfkill_global_mutex);
438}
439
440
441
442
443
444
445
446
447
448
449bool rfkill_is_epo_lock_active(void)
450{
451 return rfkill_epo_lock_active;
452}
453
454
455
456
457
458
459
460
461bool rfkill_get_global_sw_state(const enum rfkill_type type)
462{
463 return rfkill_global_states[type].cur;
464}
465#endif
466
467
468bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
469{
470 bool ret, change;
471
472 ret = __rfkill_set_hw_state(rfkill, blocked, &change);
473
474 if (!rfkill->registered)
475 return ret;
476
477 if (change)
478 schedule_work(&rfkill->uevent_work);
479
480 return ret;
481}
482EXPORT_SYMBOL(rfkill_set_hw_state);
483
484static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
485{
486 u32 bit = RFKILL_BLOCK_SW;
487
488
489 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL)
490 bit = RFKILL_BLOCK_SW_PREV;
491
492 if (blocked)
493 rfkill->state |= bit;
494 else
495 rfkill->state &= ~bit;
496}
497
498bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
499{
500 unsigned long flags;
501 bool prev, hwblock;
502
503 BUG_ON(!rfkill);
504
505 spin_lock_irqsave(&rfkill->lock, flags);
506 prev = !!(rfkill->state & RFKILL_BLOCK_SW);
507 __rfkill_set_sw_state(rfkill, blocked);
508 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW);
509 blocked = blocked || hwblock;
510 spin_unlock_irqrestore(&rfkill->lock, flags);
511
512 if (!rfkill->registered)
513 return blocked;
514
515 if (prev != blocked && !hwblock)
516 schedule_work(&rfkill->uevent_work);
517
518 rfkill_led_trigger_event(rfkill);
519
520 return blocked;
521}
522EXPORT_SYMBOL(rfkill_set_sw_state);
523
524void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
525{
526 unsigned long flags;
527
528 BUG_ON(!rfkill);
529 BUG_ON(rfkill->registered);
530
531 spin_lock_irqsave(&rfkill->lock, flags);
532 __rfkill_set_sw_state(rfkill, blocked);
533 rfkill->persistent = true;
534 spin_unlock_irqrestore(&rfkill->lock, flags);
535}
536EXPORT_SYMBOL(rfkill_init_sw_state);
537
538void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
539{
540 unsigned long flags;
541 bool swprev, hwprev;
542
543 BUG_ON(!rfkill);
544
545 spin_lock_irqsave(&rfkill->lock, flags);
546
547
548
549
550
551 swprev = !!(rfkill->state & RFKILL_BLOCK_SW);
552 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW);
553 __rfkill_set_sw_state(rfkill, sw);
554 if (hw)
555 rfkill->state |= RFKILL_BLOCK_HW;
556 else
557 rfkill->state &= ~RFKILL_BLOCK_HW;
558
559 spin_unlock_irqrestore(&rfkill->lock, flags);
560
561 if (!rfkill->registered) {
562 rfkill->persistent = true;
563 } else {
564 if (swprev != sw || hwprev != hw)
565 schedule_work(&rfkill->uevent_work);
566
567 rfkill_led_trigger_event(rfkill);
568 }
569}
570EXPORT_SYMBOL(rfkill_set_states);
571
572static ssize_t rfkill_name_show(struct device *dev,
573 struct device_attribute *attr,
574 char *buf)
575{
576 struct rfkill *rfkill = to_rfkill(dev);
577
578 return sprintf(buf, "%s\n", rfkill->name);
579}
580
581static const char *rfkill_get_type_str(enum rfkill_type type)
582{
583 BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_FM + 1);
584
585 switch (type) {
586 case RFKILL_TYPE_WLAN:
587 return "wlan";
588 case RFKILL_TYPE_BLUETOOTH:
589 return "bluetooth";
590 case RFKILL_TYPE_UWB:
591 return "ultrawideband";
592 case RFKILL_TYPE_WIMAX:
593 return "wimax";
594 case RFKILL_TYPE_WWAN:
595 return "wwan";
596 case RFKILL_TYPE_GPS:
597 return "gps";
598 case RFKILL_TYPE_FM:
599 return "fm";
600 default:
601 BUG();
602 }
603}
604
605static ssize_t rfkill_type_show(struct device *dev,
606 struct device_attribute *attr,
607 char *buf)
608{
609 struct rfkill *rfkill = to_rfkill(dev);
610
611 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
612}
613
614static ssize_t rfkill_idx_show(struct device *dev,
615 struct device_attribute *attr,
616 char *buf)
617{
618 struct rfkill *rfkill = to_rfkill(dev);
619
620 return sprintf(buf, "%d\n", rfkill->idx);
621}
622
623static ssize_t rfkill_persistent_show(struct device *dev,
624 struct device_attribute *attr,
625 char *buf)
626{
627 struct rfkill *rfkill = to_rfkill(dev);
628
629 return sprintf(buf, "%d\n", rfkill->persistent);
630}
631
632static ssize_t rfkill_hard_show(struct device *dev,
633 struct device_attribute *attr,
634 char *buf)
635{
636 struct rfkill *rfkill = to_rfkill(dev);
637
638 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
639}
640
641static ssize_t rfkill_soft_show(struct device *dev,
642 struct device_attribute *attr,
643 char *buf)
644{
645 struct rfkill *rfkill = to_rfkill(dev);
646
647 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
648}
649
650static ssize_t rfkill_soft_store(struct device *dev,
651 struct device_attribute *attr,
652 const char *buf, size_t count)
653{
654 struct rfkill *rfkill = to_rfkill(dev);
655 unsigned long state;
656 int err;
657
658 if (!capable(CAP_NET_ADMIN))
659 return -EPERM;
660
661 err = strict_strtoul(buf, 0, &state);
662 if (err)
663 return err;
664
665 if (state > 1 )
666 return -EINVAL;
667
668 mutex_lock(&rfkill_global_mutex);
669 rfkill_set_block(rfkill, state);
670 mutex_unlock(&rfkill_global_mutex);
671
672 return err ?: count;
673}
674
675static u8 user_state_from_blocked(unsigned long state)
676{
677 if (state & RFKILL_BLOCK_HW)
678 return RFKILL_USER_STATE_HARD_BLOCKED;
679 if (state & RFKILL_BLOCK_SW)
680 return RFKILL_USER_STATE_SOFT_BLOCKED;
681
682 return RFKILL_USER_STATE_UNBLOCKED;
683}
684
685static ssize_t rfkill_state_show(struct device *dev,
686 struct device_attribute *attr,
687 char *buf)
688{
689 struct rfkill *rfkill = to_rfkill(dev);
690
691 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
692}
693
694static ssize_t rfkill_state_store(struct device *dev,
695 struct device_attribute *attr,
696 const char *buf, size_t count)
697{
698 struct rfkill *rfkill = to_rfkill(dev);
699 unsigned long state;
700 int err;
701
702 if (!capable(CAP_NET_ADMIN))
703 return -EPERM;
704
705 err = strict_strtoul(buf, 0, &state);
706 if (err)
707 return err;
708
709 if (state != RFKILL_USER_STATE_SOFT_BLOCKED &&
710 state != RFKILL_USER_STATE_UNBLOCKED)
711 return -EINVAL;
712
713 mutex_lock(&rfkill_global_mutex);
714 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
715 mutex_unlock(&rfkill_global_mutex);
716
717 return err ?: count;
718}
719
720static ssize_t rfkill_claim_show(struct device *dev,
721 struct device_attribute *attr,
722 char *buf)
723{
724 return sprintf(buf, "%d\n", 0);
725}
726
727static ssize_t rfkill_claim_store(struct device *dev,
728 struct device_attribute *attr,
729 const char *buf, size_t count)
730{
731 return -EOPNOTSUPP;
732}
733
734static struct device_attribute rfkill_dev_attrs[] = {
735 __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
736 __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
737 __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
738 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
739 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
740 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
741 __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
742 __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
743 __ATTR_NULL
744};
745
746static void rfkill_release(struct device *dev)
747{
748 struct rfkill *rfkill = to_rfkill(dev);
749
750 kfree(rfkill);
751}
752
753static int rfkill_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
754{
755 struct rfkill *rfkill = to_rfkill(dev);
756 unsigned long flags;
757 u32 state;
758 int error;
759
760 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name);
761 if (error)
762 return error;
763 error = add_uevent_var(env, "RFKILL_TYPE=%s",
764 rfkill_get_type_str(rfkill->type));
765 if (error)
766 return error;
767 spin_lock_irqsave(&rfkill->lock, flags);
768 state = rfkill->state;
769 spin_unlock_irqrestore(&rfkill->lock, flags);
770 error = add_uevent_var(env, "RFKILL_STATE=%d",
771 user_state_from_blocked(state));
772 return error;
773}
774
775void rfkill_pause_polling(struct rfkill *rfkill)
776{
777 BUG_ON(!rfkill);
778
779 if (!rfkill->ops->poll)
780 return;
781
782 cancel_delayed_work_sync(&rfkill->poll_work);
783}
784EXPORT_SYMBOL(rfkill_pause_polling);
785
786void rfkill_resume_polling(struct rfkill *rfkill)
787{
788 BUG_ON(!rfkill);
789
790 if (!rfkill->ops->poll)
791 return;
792
793 schedule_work(&rfkill->poll_work.work);
794}
795EXPORT_SYMBOL(rfkill_resume_polling);
796
797static int rfkill_suspend(struct device *dev, pm_message_t state)
798{
799 struct rfkill *rfkill = to_rfkill(dev);
800
801 rfkill_pause_polling(rfkill);
802
803 return 0;
804}
805
806static int rfkill_resume(struct device *dev)
807{
808 struct rfkill *rfkill = to_rfkill(dev);
809 bool cur;
810
811 if (!rfkill->persistent) {
812 cur = !!(rfkill->state & RFKILL_BLOCK_SW);
813 rfkill_set_block(rfkill, cur);
814 }
815
816 rfkill_resume_polling(rfkill);
817
818 return 0;
819}
820
821static struct class rfkill_class = {
822 .name = "rfkill",
823 .dev_release = rfkill_release,
824 .dev_attrs = rfkill_dev_attrs,
825 .dev_uevent = rfkill_dev_uevent,
826 .suspend = rfkill_suspend,
827 .resume = rfkill_resume,
828};
829
830bool rfkill_blocked(struct rfkill *rfkill)
831{
832 unsigned long flags;
833 u32 state;
834
835 spin_lock_irqsave(&rfkill->lock, flags);
836 state = rfkill->state;
837 spin_unlock_irqrestore(&rfkill->lock, flags);
838
839 return !!(state & RFKILL_BLOCK_ANY);
840}
841EXPORT_SYMBOL(rfkill_blocked);
842
843
844struct rfkill * __must_check rfkill_alloc(const char *name,
845 struct device *parent,
846 const enum rfkill_type type,
847 const struct rfkill_ops *ops,
848 void *ops_data)
849{
850 struct rfkill *rfkill;
851 struct device *dev;
852
853 if (WARN_ON(!ops))
854 return NULL;
855
856 if (WARN_ON(!ops->set_block))
857 return NULL;
858
859 if (WARN_ON(!name))
860 return NULL;
861
862 if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES))
863 return NULL;
864
865 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
866 if (!rfkill)
867 return NULL;
868
869 spin_lock_init(&rfkill->lock);
870 INIT_LIST_HEAD(&rfkill->node);
871 rfkill->type = type;
872 rfkill->name = name;
873 rfkill->ops = ops;
874 rfkill->data = ops_data;
875
876 dev = &rfkill->dev;
877 dev->class = &rfkill_class;
878 dev->parent = parent;
879 device_initialize(dev);
880
881 return rfkill;
882}
883EXPORT_SYMBOL(rfkill_alloc);
884
885static void rfkill_poll(struct work_struct *work)
886{
887 struct rfkill *rfkill;
888
889 rfkill = container_of(work, struct rfkill, poll_work.work);
890
891
892
893
894
895
896 rfkill->ops->poll(rfkill, rfkill->data);
897
898 schedule_delayed_work(&rfkill->poll_work,
899 round_jiffies_relative(POLL_INTERVAL));
900}
901
902static void rfkill_uevent_work(struct work_struct *work)
903{
904 struct rfkill *rfkill;
905
906 rfkill = container_of(work, struct rfkill, uevent_work);
907
908 mutex_lock(&rfkill_global_mutex);
909 rfkill_event(rfkill);
910 mutex_unlock(&rfkill_global_mutex);
911}
912
913static void rfkill_sync_work(struct work_struct *work)
914{
915 struct rfkill *rfkill;
916 bool cur;
917
918 rfkill = container_of(work, struct rfkill, sync_work);
919
920 mutex_lock(&rfkill_global_mutex);
921 cur = rfkill_global_states[rfkill->type].cur;
922 rfkill_set_block(rfkill, cur);
923 mutex_unlock(&rfkill_global_mutex);
924}
925
926int __must_check rfkill_register(struct rfkill *rfkill)
927{
928 static unsigned long rfkill_no;
929 struct device *dev = &rfkill->dev;
930 int error;
931
932 BUG_ON(!rfkill);
933
934 mutex_lock(&rfkill_global_mutex);
935
936 if (rfkill->registered) {
937 error = -EALREADY;
938 goto unlock;
939 }
940
941 rfkill->idx = rfkill_no;
942 dev_set_name(dev, "rfkill%lu", rfkill_no);
943 rfkill_no++;
944
945 list_add_tail(&rfkill->node, &rfkill_list);
946
947 error = device_add(dev);
948 if (error)
949 goto remove;
950
951 error = rfkill_led_trigger_register(rfkill);
952 if (error)
953 goto devdel;
954
955 rfkill->registered = true;
956
957 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll);
958 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work);
959 INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
960
961 if (rfkill->ops->poll)
962 schedule_delayed_work(&rfkill->poll_work,
963 round_jiffies_relative(POLL_INTERVAL));
964
965 if (!rfkill->persistent || rfkill_epo_lock_active) {
966 schedule_work(&rfkill->sync_work);
967 } else {
968#ifdef CONFIG_RFKILL_INPUT
969 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW);
970
971 if (!atomic_read(&rfkill_input_disabled))
972 __rfkill_switch_all(rfkill->type, soft_blocked);
973#endif
974 }
975
976 rfkill_send_events(rfkill, RFKILL_OP_ADD);
977
978 mutex_unlock(&rfkill_global_mutex);
979 return 0;
980
981 devdel:
982 device_del(&rfkill->dev);
983 remove:
984 list_del_init(&rfkill->node);
985 unlock:
986 mutex_unlock(&rfkill_global_mutex);
987 return error;
988}
989EXPORT_SYMBOL(rfkill_register);
990
991void rfkill_unregister(struct rfkill *rfkill)
992{
993 BUG_ON(!rfkill);
994
995 if (rfkill->ops->poll)
996 cancel_delayed_work_sync(&rfkill->poll_work);
997
998 cancel_work_sync(&rfkill->uevent_work);
999 cancel_work_sync(&rfkill->sync_work);
1000
1001 rfkill->registered = false;
1002
1003 device_del(&rfkill->dev);
1004
1005 mutex_lock(&rfkill_global_mutex);
1006 rfkill_send_events(rfkill, RFKILL_OP_DEL);
1007 list_del_init(&rfkill->node);
1008 mutex_unlock(&rfkill_global_mutex);
1009
1010 rfkill_led_trigger_unregister(rfkill);
1011}
1012EXPORT_SYMBOL(rfkill_unregister);
1013
1014void rfkill_destroy(struct rfkill *rfkill)
1015{
1016 if (rfkill)
1017 put_device(&rfkill->dev);
1018}
1019EXPORT_SYMBOL(rfkill_destroy);
1020
1021static int rfkill_fop_open(struct inode *inode, struct file *file)
1022{
1023 struct rfkill_data *data;
1024 struct rfkill *rfkill;
1025 struct rfkill_int_event *ev, *tmp;
1026
1027 data = kzalloc(sizeof(*data), GFP_KERNEL);
1028 if (!data)
1029 return -ENOMEM;
1030
1031 INIT_LIST_HEAD(&data->events);
1032 mutex_init(&data->mtx);
1033 init_waitqueue_head(&data->read_wait);
1034
1035 mutex_lock(&rfkill_global_mutex);
1036 mutex_lock(&data->mtx);
1037
1038
1039
1040
1041 list_add(&data->list, &rfkill_fds);
1042
1043 list_for_each_entry(rfkill, &rfkill_list, node) {
1044 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1045 if (!ev)
1046 goto free;
1047 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1048 list_add_tail(&ev->list, &data->events);
1049 }
1050 mutex_unlock(&data->mtx);
1051 mutex_unlock(&rfkill_global_mutex);
1052
1053 file->private_data = data;
1054
1055 return nonseekable_open(inode, file);
1056
1057 free:
1058 mutex_unlock(&data->mtx);
1059 mutex_unlock(&rfkill_global_mutex);
1060 mutex_destroy(&data->mtx);
1061 list_for_each_entry_safe(ev, tmp, &data->events, list)
1062 kfree(ev);
1063 kfree(data);
1064 return -ENOMEM;
1065}
1066
1067static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
1068{
1069 struct rfkill_data *data = file->private_data;
1070 unsigned int res = POLLOUT | POLLWRNORM;
1071
1072 poll_wait(file, &data->read_wait, wait);
1073
1074 mutex_lock(&data->mtx);
1075 if (!list_empty(&data->events))
1076 res = POLLIN | POLLRDNORM;
1077 mutex_unlock(&data->mtx);
1078
1079 return res;
1080}
1081
1082static bool rfkill_readable(struct rfkill_data *data)
1083{
1084 bool r;
1085
1086 mutex_lock(&data->mtx);
1087 r = !list_empty(&data->events);
1088 mutex_unlock(&data->mtx);
1089
1090 return r;
1091}
1092
1093static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
1094 size_t count, loff_t *pos)
1095{
1096 struct rfkill_data *data = file->private_data;
1097 struct rfkill_int_event *ev;
1098 unsigned long sz;
1099 int ret;
1100
1101 mutex_lock(&data->mtx);
1102
1103 while (list_empty(&data->events)) {
1104 if (file->f_flags & O_NONBLOCK) {
1105 ret = -EAGAIN;
1106 goto out;
1107 }
1108 mutex_unlock(&data->mtx);
1109 ret = wait_event_interruptible(data->read_wait,
1110 rfkill_readable(data));
1111 mutex_lock(&data->mtx);
1112
1113 if (ret)
1114 goto out;
1115 }
1116
1117 ev = list_first_entry(&data->events, struct rfkill_int_event,
1118 list);
1119
1120 sz = min_t(unsigned long, sizeof(ev->ev), count);
1121 ret = sz;
1122 if (copy_to_user(buf, &ev->ev, sz))
1123 ret = -EFAULT;
1124
1125 list_del(&ev->list);
1126 kfree(ev);
1127 out:
1128 mutex_unlock(&data->mtx);
1129 return ret;
1130}
1131
1132static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
1133 size_t count, loff_t *pos)
1134{
1135 struct rfkill *rfkill;
1136 struct rfkill_event ev;
1137
1138
1139 if (count < RFKILL_EVENT_SIZE_V1 - 1)
1140 return -EINVAL;
1141
1142
1143
1144
1145
1146
1147 count = min(count, sizeof(ev));
1148 if (copy_from_user(&ev, buf, count))
1149 return -EFAULT;
1150
1151 if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
1152 return -EINVAL;
1153
1154 if (ev.type >= NUM_RFKILL_TYPES)
1155 return -EINVAL;
1156
1157 mutex_lock(&rfkill_global_mutex);
1158
1159 if (ev.op == RFKILL_OP_CHANGE_ALL) {
1160 if (ev.type == RFKILL_TYPE_ALL) {
1161 enum rfkill_type i;
1162 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1163 rfkill_global_states[i].cur = ev.soft;
1164 } else {
1165 rfkill_global_states[ev.type].cur = ev.soft;
1166 }
1167 }
1168
1169 list_for_each_entry(rfkill, &rfkill_list, node) {
1170 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL)
1171 continue;
1172
1173 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL)
1174 continue;
1175
1176 rfkill_set_block(rfkill, ev.soft);
1177 }
1178 mutex_unlock(&rfkill_global_mutex);
1179
1180 return count;
1181}
1182
1183static int rfkill_fop_release(struct inode *inode, struct file *file)
1184{
1185 struct rfkill_data *data = file->private_data;
1186 struct rfkill_int_event *ev, *tmp;
1187
1188 mutex_lock(&rfkill_global_mutex);
1189 list_del(&data->list);
1190 mutex_unlock(&rfkill_global_mutex);
1191
1192 mutex_destroy(&data->mtx);
1193 list_for_each_entry_safe(ev, tmp, &data->events, list)
1194 kfree(ev);
1195
1196#ifdef CONFIG_RFKILL_INPUT
1197 if (data->input_handler)
1198 if (atomic_dec_return(&rfkill_input_disabled) == 0)
1199 printk(KERN_DEBUG "rfkill: input handler enabled\n");
1200#endif
1201
1202 kfree(data);
1203
1204 return 0;
1205}
1206
1207#ifdef CONFIG_RFKILL_INPUT
1208static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
1209 unsigned long arg)
1210{
1211 struct rfkill_data *data = file->private_data;
1212
1213 if (_IOC_TYPE(cmd) != RFKILL_IOC_MAGIC)
1214 return -ENOSYS;
1215
1216 if (_IOC_NR(cmd) != RFKILL_IOC_NOINPUT)
1217 return -ENOSYS;
1218
1219 mutex_lock(&data->mtx);
1220
1221 if (!data->input_handler) {
1222 if (atomic_inc_return(&rfkill_input_disabled) == 1)
1223 printk(KERN_DEBUG "rfkill: input handler disabled\n");
1224 data->input_handler = true;
1225 }
1226
1227 mutex_unlock(&data->mtx);
1228
1229 return 0;
1230}
1231#endif
1232
1233static const struct file_operations rfkill_fops = {
1234 .owner = THIS_MODULE,
1235 .open = rfkill_fop_open,
1236 .read = rfkill_fop_read,
1237 .write = rfkill_fop_write,
1238 .poll = rfkill_fop_poll,
1239 .release = rfkill_fop_release,
1240#ifdef CONFIG_RFKILL_INPUT
1241 .unlocked_ioctl = rfkill_fop_ioctl,
1242 .compat_ioctl = rfkill_fop_ioctl,
1243#endif
1244};
1245
1246static struct miscdevice rfkill_miscdev = {
1247 .name = "rfkill",
1248 .fops = &rfkill_fops,
1249 .minor = MISC_DYNAMIC_MINOR,
1250};
1251
1252static int __init rfkill_init(void)
1253{
1254 int error;
1255 int i;
1256
1257 for (i = 0; i < NUM_RFKILL_TYPES; i++)
1258 rfkill_global_states[i].cur = !rfkill_default_state;
1259
1260 error = class_register(&rfkill_class);
1261 if (error)
1262 goto out;
1263
1264 error = misc_register(&rfkill_miscdev);
1265 if (error) {
1266 class_unregister(&rfkill_class);
1267 goto out;
1268 }
1269
1270#ifdef CONFIG_RFKILL_INPUT
1271 error = rfkill_handler_init();
1272 if (error) {
1273 misc_deregister(&rfkill_miscdev);
1274 class_unregister(&rfkill_class);
1275 goto out;
1276 }
1277#endif
1278
1279 out:
1280 return error;
1281}
1282subsys_initcall(rfkill_init);
1283
1284static void __exit rfkill_exit(void)
1285{
1286#ifdef CONFIG_RFKILL_INPUT
1287 rfkill_handler_exit();
1288#endif
1289 misc_deregister(&rfkill_miscdev);
1290 class_unregister(&rfkill_class);
1291}
1292module_exit(rfkill_exit);
1293