1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38#include "user_reg.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58struct tipc_user {
59 int next;
60 tipc_mode_event callback;
61 void *usr_handle;
62 struct list_head ports;
63};
64
65#define MAX_USERID 64
66#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
67
68static struct tipc_user *users = NULL;
69static u32 next_free_user = MAX_USERID + 1;
70static DEFINE_SPINLOCK(reg_lock);
71
72
73
74
75
76
77
78
79static int reg_init(void)
80{
81 u32 i;
82
83 spin_lock_bh(®_lock);
84 if (!users) {
85 users = kzalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) {
87 for (i = 1; i <= MAX_USERID; i++) {
88 users[i].next = i - 1;
89 }
90 next_free_user = MAX_USERID;
91 }
92 }
93 spin_unlock_bh(®_lock);
94 return users ? 0 : -ENOMEM;
95}
96
97
98
99
100
101static void reg_callback(struct tipc_user *user_ptr)
102{
103 tipc_mode_event cb;
104 void *arg;
105
106 spin_lock_bh(®_lock);
107 cb = user_ptr->callback;
108 arg = user_ptr->usr_handle;
109 spin_unlock_bh(®_lock);
110
111 if (cb)
112 cb(arg, tipc_mode, tipc_own_addr);
113}
114
115
116
117
118
119int tipc_reg_start(void)
120{
121 u32 u;
122 int res;
123
124 if ((res = reg_init()))
125 return res;
126
127 for (u = 1; u <= MAX_USERID; u++) {
128 if (users[u].callback)
129 tipc_k_signal((Handler)reg_callback,
130 (unsigned long)&users[u]);
131 }
132 return 0;
133}
134
135
136
137
138
139void tipc_reg_stop(void)
140{
141 int id;
142
143 if (!users)
144 return;
145
146 for (id = 1; id <= MAX_USERID; id++) {
147 if (users[id].callback)
148 reg_callback(&users[id]);
149 }
150 kfree(users);
151 users = NULL;
152}
153
154
155
156
157
158
159
160int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
161{
162 struct tipc_user *user_ptr;
163
164 if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
165 return -ENOPROTOOPT;
166 if (!users)
167 reg_init();
168
169 spin_lock_bh(®_lock);
170 if (!next_free_user) {
171 spin_unlock_bh(®_lock);
172 return -EBUSY;
173 }
174 user_ptr = &users[next_free_user];
175 *userid = next_free_user;
176 next_free_user = user_ptr->next;
177 user_ptr->next = -1;
178 spin_unlock_bh(®_lock);
179
180 user_ptr->callback = cb;
181 user_ptr->usr_handle = usr_handle;
182 INIT_LIST_HEAD(&user_ptr->ports);
183 atomic_inc(&tipc_user_count);
184
185 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
186 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
187 return 0;
188}
189
190
191
192
193
194void tipc_detach(u32 userid)
195{
196 struct tipc_user *user_ptr;
197 struct list_head ports_temp;
198 struct user_port *up_ptr, *temp_up_ptr;
199
200 if ((userid == 0) || (userid > MAX_USERID))
201 return;
202
203 spin_lock_bh(®_lock);
204 if ((!users) || (users[userid].next >= 0)) {
205 spin_unlock_bh(®_lock);
206 return;
207 }
208
209 user_ptr = &users[userid];
210 user_ptr->callback = NULL;
211 INIT_LIST_HEAD(&ports_temp);
212 list_splice(&user_ptr->ports, &ports_temp);
213 user_ptr->next = next_free_user;
214 next_free_user = userid;
215 spin_unlock_bh(®_lock);
216
217 atomic_dec(&tipc_user_count);
218
219 list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
220 tipc_deleteport(up_ptr->ref);
221 }
222}
223
224
225
226
227
228int tipc_reg_add_port(struct user_port *up_ptr)
229{
230 struct tipc_user *user_ptr;
231
232 if (up_ptr->user_ref == 0)
233 return 0;
234 if (up_ptr->user_ref > MAX_USERID)
235 return -EINVAL;
236 if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
237 return -ENOPROTOOPT;
238
239 spin_lock_bh(®_lock);
240 user_ptr = &users[up_ptr->user_ref];
241 list_add(&up_ptr->uport_list, &user_ptr->ports);
242 spin_unlock_bh(®_lock);
243 return 0;
244}
245
246
247
248
249
250int tipc_reg_remove_port(struct user_port *up_ptr)
251{
252 if (up_ptr->user_ref == 0)
253 return 0;
254 if (up_ptr->user_ref > MAX_USERID)
255 return -EINVAL;
256 if (!users )
257 return -ENOPROTOOPT;
258
259 spin_lock_bh(®_lock);
260 list_del_init(&up_ptr->uport_list);
261 spin_unlock_bh(®_lock);
262 return 0;
263}
264
265