1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38#include "core.h"
39#include "link.h"
40#include "name_distr.h"
41
42int sysctl_tipc_named_timeout __read_mostly = 2000;
43
44struct distr_queue_item {
45 struct distr_item i;
46 u32 dtype;
47 u32 node;
48 unsigned long expires;
49 struct list_head next;
50};
51
52
53
54
55
56
57static void publ_to_item(struct distr_item *i, struct publication *p)
58{
59 i->type = htonl(p->sr.type);
60 i->lower = htonl(p->sr.lower);
61 i->upper = htonl(p->sr.upper);
62 i->port = htonl(p->sk.ref);
63 i->key = htonl(p->key);
64}
65
66
67
68
69
70
71
72
73
74
75static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
76 u32 dest)
77{
78 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
79 u32 self = tipc_own_addr(net);
80 struct tipc_msg *msg;
81
82 if (buf != NULL) {
83 msg = buf_msg(buf);
84 tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
85 type, INT_H_SIZE, dest);
86 msg_set_size(msg, INT_H_SIZE + size);
87 }
88 return buf;
89}
90
91
92
93
94
95
96struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
97{
98 struct name_table *nt = tipc_name_table(net);
99 struct distr_item *item;
100 struct sk_buff *skb;
101
102 if (p->scope == TIPC_NODE_SCOPE) {
103 list_add_tail_rcu(&p->binding_node, &nt->node_scope);
104 return NULL;
105 }
106 write_lock_bh(&nt->cluster_scope_lock);
107 list_add_tail(&p->binding_node, &nt->cluster_scope);
108 write_unlock_bh(&nt->cluster_scope_lock);
109 skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
110 if (!skb) {
111 pr_warn("Publication distribution failure\n");
112 return NULL;
113 }
114 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
115 msg_set_non_legacy(buf_msg(skb));
116 item = (struct distr_item *)msg_data(buf_msg(skb));
117 publ_to_item(item, p);
118 return skb;
119}
120
121
122
123
124
125
126struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
127{
128 struct name_table *nt = tipc_name_table(net);
129 struct distr_item *item;
130 struct sk_buff *skb;
131
132 write_lock_bh(&nt->cluster_scope_lock);
133 list_del(&p->binding_node);
134 write_unlock_bh(&nt->cluster_scope_lock);
135 if (p->scope == TIPC_NODE_SCOPE)
136 return NULL;
137
138 skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
139 if (!skb) {
140 pr_warn("Withdrawal distribution failure\n");
141 return NULL;
142 }
143 msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
144 msg_set_non_legacy(buf_msg(skb));
145 item = (struct distr_item *)msg_data(buf_msg(skb));
146 publ_to_item(item, p);
147 return skb;
148}
149
150
151
152
153
154
155
156
157
158static void named_distribute(struct net *net, struct sk_buff_head *list,
159 u32 dnode, struct list_head *pls, u16 seqno)
160{
161 struct publication *publ;
162 struct sk_buff *skb = NULL;
163 struct distr_item *item = NULL;
164 u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
165 ITEM_SIZE) * ITEM_SIZE;
166 u32 msg_rem = msg_dsz;
167 struct tipc_msg *hdr;
168
169 list_for_each_entry(publ, pls, binding_node) {
170
171 if (!skb) {
172 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
173 dnode);
174 if (!skb) {
175 pr_warn("Bulk publication failure\n");
176 return;
177 }
178 hdr = buf_msg(skb);
179 msg_set_bc_ack_invalid(hdr, true);
180 msg_set_bulk(hdr);
181 msg_set_non_legacy(hdr);
182 item = (struct distr_item *)msg_data(hdr);
183 }
184
185
186 publ_to_item(item, publ);
187 item++;
188 msg_rem -= ITEM_SIZE;
189
190
191 if (!msg_rem) {
192 __skb_queue_tail(list, skb);
193 skb = NULL;
194 msg_rem = msg_dsz;
195 }
196 }
197 if (skb) {
198 hdr = buf_msg(skb);
199 msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
200 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
201 __skb_queue_tail(list, skb);
202 }
203 hdr = buf_msg(skb_peek_tail(list));
204 msg_set_last_bulk(hdr);
205 msg_set_named_seqno(hdr, seqno);
206}
207
208
209
210
211
212
213
214void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
215{
216 struct name_table *nt = tipc_name_table(net);
217 struct tipc_net *tn = tipc_net(net);
218 struct sk_buff_head head;
219 u16 seqno;
220
221 __skb_queue_head_init(&head);
222 spin_lock_bh(&tn->nametbl_lock);
223 if (!(capabilities & TIPC_NAMED_BCAST))
224 nt->rc_dests++;
225 seqno = nt->snd_nxt;
226 spin_unlock_bh(&tn->nametbl_lock);
227
228 read_lock_bh(&nt->cluster_scope_lock);
229 named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
230 tipc_node_xmit(net, &head, dnode, 0);
231 read_unlock_bh(&nt->cluster_scope_lock);
232}
233
234
235
236
237
238
239
240
241
242
243static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
244{
245 struct tipc_net *tn = tipc_net(net);
246 struct publication *_p;
247 struct tipc_uaddr ua;
248
249 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
250 p->sr.lower, p->sr.upper);
251 spin_lock_bh(&tn->nametbl_lock);
252 _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
253 if (_p)
254 tipc_node_unsubscribe(net, &_p->binding_node, addr);
255 spin_unlock_bh(&tn->nametbl_lock);
256 if (_p)
257 kfree_rcu(_p, rcu);
258}
259
260void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
261 u32 addr, u16 capabilities)
262{
263 struct name_table *nt = tipc_name_table(net);
264 struct tipc_net *tn = tipc_net(net);
265
266 struct publication *publ, *tmp;
267
268 list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
269 tipc_publ_purge(net, publ, addr);
270 spin_lock_bh(&tn->nametbl_lock);
271 if (!(capabilities & TIPC_NAMED_BCAST))
272 nt->rc_dests--;
273 spin_unlock_bh(&tn->nametbl_lock);
274}
275
276
277
278
279
280
281
282
283
284
285
286
287static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
288 u32 node, u32 dtype)
289{
290 struct publication *p = NULL;
291 struct tipc_socket_addr sk;
292 struct tipc_uaddr ua;
293 u32 key = ntohl(i->key);
294
295 tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
296 ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
297 sk.ref = ntohl(i->port);
298 sk.node = node;
299
300 if (dtype == PUBLICATION) {
301 p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
302 if (p) {
303 tipc_node_subscribe(net, &p->binding_node, node);
304 return true;
305 }
306 } else if (dtype == WITHDRAWAL) {
307 p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
308 if (p) {
309 tipc_node_unsubscribe(net, &p->binding_node, node);
310 kfree_rcu(p, rcu);
311 return true;
312 }
313 pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
314 ua.sr.type, ua.sr.lower, node);
315 } else {
316 pr_warn("Unrecognized name table message received\n");
317 }
318 return false;
319}
320
321static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
322 u16 *rcv_nxt, bool *open)
323{
324 struct sk_buff *skb, *tmp;
325 struct tipc_msg *hdr;
326 u16 seqno;
327
328 spin_lock_bh(&namedq->lock);
329 skb_queue_walk_safe(namedq, skb, tmp) {
330 if (unlikely(skb_linearize(skb))) {
331 __skb_unlink(skb, namedq);
332 kfree_skb(skb);
333 continue;
334 }
335 hdr = buf_msg(skb);
336 seqno = msg_named_seqno(hdr);
337 if (msg_is_last_bulk(hdr)) {
338 *rcv_nxt = seqno;
339 *open = true;
340 }
341
342 if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
343 __skb_unlink(skb, namedq);
344 spin_unlock_bh(&namedq->lock);
345 return skb;
346 }
347
348 if (*open && (*rcv_nxt == seqno)) {
349 (*rcv_nxt)++;
350 __skb_unlink(skb, namedq);
351 spin_unlock_bh(&namedq->lock);
352 return skb;
353 }
354
355 if (less(seqno, *rcv_nxt)) {
356 __skb_unlink(skb, namedq);
357 kfree_skb(skb);
358 continue;
359 }
360 }
361 spin_unlock_bh(&namedq->lock);
362 return NULL;
363}
364
365
366
367
368
369
370
371
372void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
373 u16 *rcv_nxt, bool *open)
374{
375 struct tipc_net *tn = tipc_net(net);
376 struct distr_item *item;
377 struct tipc_msg *hdr;
378 struct sk_buff *skb;
379 u32 count, node;
380
381 spin_lock_bh(&tn->nametbl_lock);
382 while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
383 hdr = buf_msg(skb);
384 node = msg_orignode(hdr);
385 item = (struct distr_item *)msg_data(hdr);
386 count = msg_data_sz(hdr) / ITEM_SIZE;
387 while (count--) {
388 tipc_update_nametbl(net, item, node, msg_type(hdr));
389 item++;
390 }
391 kfree_skb(skb);
392 }
393 spin_unlock_bh(&tn->nametbl_lock);
394}
395
396
397
398
399
400
401
402
403
404void tipc_named_reinit(struct net *net)
405{
406 struct name_table *nt = tipc_name_table(net);
407 struct tipc_net *tn = tipc_net(net);
408 struct publication *p;
409 u32 self = tipc_own_addr(net);
410
411 spin_lock_bh(&tn->nametbl_lock);
412
413 list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
414 p->sk.node = self;
415 list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
416 p->sk.node = self;
417 nt->rc_dests = 0;
418 spin_unlock_bh(&tn->nametbl_lock);
419}
420