1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static struct kmem_cache *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled = 0;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 if (!handler_enabled) {
60 err("Signal request ignored by handler\n");
61 return -ENOPROTOOPT;
62 }
63
64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int tipc_handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void tipc_handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 if (!handler_enabled)
116 return;
117
118 handler_enabled = 0;
119 tasklet_disable(&tipc_tasklet);
120 tasklet_kill(&tipc_tasklet);
121
122 spin_lock_bh(&qitem_lock);
123 list_for_each_safe(l, n, &signal_queue_head) {
124 item = list_entry(l, struct queue_item, next_signal);
125 list_del(&item->next_signal);
126 kmem_cache_free(tipc_queue_item_cache, item);
127 }
128 spin_unlock_bh(&qitem_lock);
129
130 kmem_cache_destroy(tipc_queue_item_cache);
131}
132
133