1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/types.h>
34#include <linux/rbtree.h>
35
36#include <asm-generic/bitops/le.h>
37
38#include "rds.h"
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80static atomic_t rds_cong_generation = ATOMIC_INIT(0);
81
82
83
84
85static LIST_HEAD(rds_cong_monitor);
86static DEFINE_RWLOCK(rds_cong_monitor_lock);
87
88
89
90
91
92
93
94
95
96
97
98
99
100static DEFINE_SPINLOCK(rds_cong_lock);
101static struct rb_root rds_cong_tree = RB_ROOT;
102
103static struct rds_cong_map *rds_cong_tree_walk(__be32 addr,
104 struct rds_cong_map *insert)
105{
106 struct rb_node **p = &rds_cong_tree.rb_node;
107 struct rb_node *parent = NULL;
108 struct rds_cong_map *map;
109
110 while (*p) {
111 parent = *p;
112 map = rb_entry(parent, struct rds_cong_map, m_rb_node);
113
114 if (addr < map->m_addr)
115 p = &(*p)->rb_left;
116 else if (addr > map->m_addr)
117 p = &(*p)->rb_right;
118 else
119 return map;
120 }
121
122 if (insert) {
123 rb_link_node(&insert->m_rb_node, parent, p);
124 rb_insert_color(&insert->m_rb_node, &rds_cong_tree);
125 }
126 return NULL;
127}
128
129
130
131
132
133
134static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
135{
136 struct rds_cong_map *map;
137 struct rds_cong_map *ret = NULL;
138 unsigned long zp;
139 unsigned long i;
140 unsigned long flags;
141
142 map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
143 if (map == NULL)
144 return NULL;
145
146 map->m_addr = addr;
147 init_waitqueue_head(&map->m_waitq);
148 INIT_LIST_HEAD(&map->m_conn_list);
149
150 for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
151 zp = get_zeroed_page(GFP_KERNEL);
152 if (zp == 0)
153 goto out;
154 map->m_page_addrs[i] = zp;
155 }
156
157 spin_lock_irqsave(&rds_cong_lock, flags);
158 ret = rds_cong_tree_walk(addr, map);
159 spin_unlock_irqrestore(&rds_cong_lock, flags);
160
161 if (ret == NULL) {
162 ret = map;
163 map = NULL;
164 }
165
166out:
167 if (map) {
168 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
169 free_page(map->m_page_addrs[i]);
170 kfree(map);
171 }
172
173 rdsdebug("map %p for addr %x\n", ret, be32_to_cpu(addr));
174
175 return ret;
176}
177
178
179
180
181
182void rds_cong_add_conn(struct rds_connection *conn)
183{
184 unsigned long flags;
185
186 rdsdebug("conn %p now on map %p\n", conn, conn->c_lcong);
187 spin_lock_irqsave(&rds_cong_lock, flags);
188 list_add_tail(&conn->c_map_item, &conn->c_lcong->m_conn_list);
189 spin_unlock_irqrestore(&rds_cong_lock, flags);
190}
191
192void rds_cong_remove_conn(struct rds_connection *conn)
193{
194 unsigned long flags;
195
196 rdsdebug("removing conn %p from map %p\n", conn, conn->c_lcong);
197 spin_lock_irqsave(&rds_cong_lock, flags);
198 list_del_init(&conn->c_map_item);
199 spin_unlock_irqrestore(&rds_cong_lock, flags);
200}
201
202int rds_cong_get_maps(struct rds_connection *conn)
203{
204 conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
205 conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
206
207 if (conn->c_lcong == NULL || conn->c_fcong == NULL)
208 return -ENOMEM;
209
210 return 0;
211}
212
213void rds_cong_queue_updates(struct rds_cong_map *map)
214{
215 struct rds_connection *conn;
216 unsigned long flags;
217
218 spin_lock_irqsave(&rds_cong_lock, flags);
219
220 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
221 if (!test_and_set_bit(0, &conn->c_map_queued)) {
222 rds_stats_inc(s_cong_update_queued);
223 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
224 }
225 }
226
227 spin_unlock_irqrestore(&rds_cong_lock, flags);
228}
229
230void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
231{
232 rdsdebug("waking map %p for %pI4\n",
233 map, &map->m_addr);
234 rds_stats_inc(s_cong_update_received);
235 atomic_inc(&rds_cong_generation);
236 if (waitqueue_active(&map->m_waitq))
237 wake_up(&map->m_waitq);
238 if (waitqueue_active(&rds_poll_waitq))
239 wake_up_all(&rds_poll_waitq);
240
241 if (portmask && !list_empty(&rds_cong_monitor)) {
242 unsigned long flags;
243 struct rds_sock *rs;
244
245 read_lock_irqsave(&rds_cong_monitor_lock, flags);
246 list_for_each_entry(rs, &rds_cong_monitor, rs_cong_list) {
247 spin_lock(&rs->rs_lock);
248 rs->rs_cong_notify |= (rs->rs_cong_mask & portmask);
249 rs->rs_cong_mask &= ~portmask;
250 spin_unlock(&rs->rs_lock);
251 if (rs->rs_cong_notify)
252 rds_wake_sk_sleep(rs);
253 }
254 read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
255 }
256}
257
258int rds_cong_updated_since(unsigned long *recent)
259{
260 unsigned long gen = atomic_read(&rds_cong_generation);
261
262 if (likely(*recent == gen))
263 return 0;
264 *recent = gen;
265 return 1;
266}
267
268
269
270
271
272
273
274
275void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
276{
277 unsigned long i;
278 unsigned long off;
279
280 rdsdebug("setting congestion for %pI4:%u in map %p\n",
281 &map->m_addr, ntohs(port), map);
282
283 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
284 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
285
286 generic___set_le_bit(off, (void *)map->m_page_addrs[i]);
287}
288
289void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
290{
291 unsigned long i;
292 unsigned long off;
293
294 rdsdebug("clearing congestion for %pI4:%u in map %p\n",
295 &map->m_addr, ntohs(port), map);
296
297 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
298 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
299
300 generic___clear_le_bit(off, (void *)map->m_page_addrs[i]);
301}
302
303static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
304{
305 unsigned long i;
306 unsigned long off;
307
308 i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
309 off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
310
311 return generic_test_le_bit(off, (void *)map->m_page_addrs[i]);
312}
313
314void rds_cong_add_socket(struct rds_sock *rs)
315{
316 unsigned long flags;
317
318 write_lock_irqsave(&rds_cong_monitor_lock, flags);
319 if (list_empty(&rs->rs_cong_list))
320 list_add(&rs->rs_cong_list, &rds_cong_monitor);
321 write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
322}
323
324void rds_cong_remove_socket(struct rds_sock *rs)
325{
326 unsigned long flags;
327 struct rds_cong_map *map;
328
329 write_lock_irqsave(&rds_cong_monitor_lock, flags);
330 list_del_init(&rs->rs_cong_list);
331 write_unlock_irqrestore(&rds_cong_monitor_lock, flags);
332
333
334 spin_lock_irqsave(&rds_cong_lock, flags);
335 map = rds_cong_tree_walk(rs->rs_bound_addr, NULL);
336 spin_unlock_irqrestore(&rds_cong_lock, flags);
337
338 if (map && rds_cong_test_bit(map, rs->rs_bound_port)) {
339 rds_cong_clear_bit(map, rs->rs_bound_port);
340 rds_cong_queue_updates(map);
341 }
342}
343
344int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock,
345 struct rds_sock *rs)
346{
347 if (!rds_cong_test_bit(map, port))
348 return 0;
349 if (nonblock) {
350 if (rs && rs->rs_cong_monitor) {
351 unsigned long flags;
352
353
354
355 spin_lock_irqsave(&rs->rs_lock, flags);
356 rs->rs_cong_mask |= RDS_CONG_MONITOR_MASK(ntohs(port));
357 spin_unlock_irqrestore(&rs->rs_lock, flags);
358
359
360
361 if (!rds_cong_test_bit(map, port))
362 return 0;
363 }
364 rds_stats_inc(s_cong_send_error);
365 return -ENOBUFS;
366 }
367
368 rds_stats_inc(s_cong_send_blocked);
369 rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port));
370
371 return wait_event_interruptible(map->m_waitq,
372 !rds_cong_test_bit(map, port));
373}
374
375void rds_cong_exit(void)
376{
377 struct rb_node *node;
378 struct rds_cong_map *map;
379 unsigned long i;
380
381 while ((node = rb_first(&rds_cong_tree))) {
382 map = rb_entry(node, struct rds_cong_map, m_rb_node);
383 rdsdebug("freeing map %p\n", map);
384 rb_erase(&map->m_rb_node, &rds_cong_tree);
385 for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++)
386 free_page(map->m_page_addrs[i]);
387 kfree(map);
388 }
389}
390
391
392
393
394struct rds_message *rds_cong_update_alloc(struct rds_connection *conn)
395{
396 struct rds_cong_map *map = conn->c_lcong;
397 struct rds_message *rm;
398
399 rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES);
400 if (!IS_ERR(rm))
401 rm->m_inc.i_hdr.h_flags = RDS_FLAG_CONG_BITMAP;
402
403 return rm;
404}
405