1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define KMSG_COMPONENT "IPVS"
43#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44
45#include <linux/ip.h>
46#include <linux/slab.h>
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/skbuff.h>
50#include <linux/jiffies.h>
51
52
53#include <linux/fs.h>
54#include <linux/sysctl.h>
55
56#include <net/ip_vs.h>
57
58
59
60
61
62
63#define CHECK_EXPIRE_INTERVAL (60*HZ)
64#define ENTRY_TIMEOUT (6*60*HZ)
65
66#define DEFAULT_EXPIRATION (24*60*60*HZ)
67
68
69
70
71
72
73
74#define COUNT_FOR_FULL_EXPIRATION 30
75
76
77
78
79
80#ifndef CONFIG_IP_VS_LBLC_TAB_BITS
81#define CONFIG_IP_VS_LBLC_TAB_BITS 10
82#endif
83#define IP_VS_LBLC_TAB_BITS CONFIG_IP_VS_LBLC_TAB_BITS
84#define IP_VS_LBLC_TAB_SIZE (1 << IP_VS_LBLC_TAB_BITS)
85#define IP_VS_LBLC_TAB_MASK (IP_VS_LBLC_TAB_SIZE - 1)
86
87
88
89
90
91
92struct ip_vs_lblc_entry {
93 struct list_head list;
94 int af;
95 union nf_inet_addr addr;
96 struct ip_vs_dest *dest;
97 unsigned long lastuse;
98};
99
100
101
102
103
104struct ip_vs_lblc_table {
105 struct list_head bucket[IP_VS_LBLC_TAB_SIZE];
106 atomic_t entries;
107 int max_size;
108 struct timer_list periodic_timer;
109 int rover;
110 int counter;
111};
112
113
114
115
116
117#ifdef CONFIG_SYSCTL
118static ctl_table vs_vars_table[] = {
119 {
120 .procname = "lblc_expiration",
121 .data = NULL,
122 .maxlen = sizeof(int),
123 .mode = 0644,
124 .proc_handler = proc_dointvec_jiffies,
125 },
126 { }
127};
128#endif
129
130static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
131{
132 list_del(&en->list);
133
134
135
136
137 atomic_dec(&en->dest->refcnt);
138 kfree(en);
139}
140
141
142
143
144
145static inline unsigned
146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
147{
148 __be32 addr_fold = addr->ip;
149
150#ifdef CONFIG_IP_VS_IPV6
151 if (af == AF_INET6)
152 addr_fold = addr->ip6[0]^addr->ip6[1]^
153 addr->ip6[2]^addr->ip6[3];
154#endif
155 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLC_TAB_MASK;
156}
157
158
159
160
161
162
163static void
164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
165{
166 unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr);
167
168 list_add(&en->list, &tbl->bucket[hash]);
169 atomic_inc(&tbl->entries);
170}
171
172
173
174
175
176
177static inline struct ip_vs_lblc_entry *
178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
179 const union nf_inet_addr *addr)
180{
181 unsigned hash = ip_vs_lblc_hashkey(af, addr);
182 struct ip_vs_lblc_entry *en;
183
184 list_for_each_entry(en, &tbl->bucket[hash], list)
185 if (ip_vs_addr_equal(af, &en->addr, addr))
186 return en;
187
188 return NULL;
189}
190
191
192
193
194
195
196static inline struct ip_vs_lblc_entry *
197ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
198 struct ip_vs_dest *dest)
199{
200 struct ip_vs_lblc_entry *en;
201
202 en = ip_vs_lblc_get(dest->af, tbl, daddr);
203 if (!en) {
204 en = kmalloc(sizeof(*en), GFP_ATOMIC);
205 if (!en)
206 return NULL;
207
208 en->af = dest->af;
209 ip_vs_addr_copy(dest->af, &en->addr, daddr);
210 en->lastuse = jiffies;
211
212 atomic_inc(&dest->refcnt);
213 en->dest = dest;
214
215 ip_vs_lblc_hash(tbl, en);
216 } else if (en->dest != dest) {
217 atomic_dec(&en->dest->refcnt);
218 atomic_inc(&dest->refcnt);
219 en->dest = dest;
220 }
221
222 return en;
223}
224
225
226
227
228
229static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
230{
231 struct ip_vs_lblc_entry *en, *nxt;
232 int i;
233
234 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
235 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
236 ip_vs_lblc_free(en);
237 atomic_dec(&tbl->entries);
238 }
239 }
240}
241
242static int sysctl_lblc_expiration(struct ip_vs_service *svc)
243{
244#ifdef CONFIG_SYSCTL
245 struct netns_ipvs *ipvs = net_ipvs(svc->net);
246 return ipvs->sysctl_lblc_expiration;
247#else
248 return DEFAULT_EXPIRATION;
249#endif
250}
251
252static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
253{
254 struct ip_vs_lblc_table *tbl = svc->sched_data;
255 struct ip_vs_lblc_entry *en, *nxt;
256 unsigned long now = jiffies;
257 int i, j;
258
259 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
260 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
261
262 write_lock(&svc->sched_lock);
263 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
264 if (time_before(now,
265 en->lastuse +
266 sysctl_lblc_expiration(svc)))
267 continue;
268
269 ip_vs_lblc_free(en);
270 atomic_dec(&tbl->entries);
271 }
272 write_unlock(&svc->sched_lock);
273 }
274 tbl->rover = j;
275}
276
277
278
279
280
281
282
283
284
285
286
287
288
289static void ip_vs_lblc_check_expire(unsigned long data)
290{
291 struct ip_vs_service *svc = (struct ip_vs_service *) data;
292 struct ip_vs_lblc_table *tbl = svc->sched_data;
293 unsigned long now = jiffies;
294 int goal;
295 int i, j;
296 struct ip_vs_lblc_entry *en, *nxt;
297
298 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
299
300 ip_vs_lblc_full_check(svc);
301 tbl->counter = 1;
302 goto out;
303 }
304
305 if (atomic_read(&tbl->entries) <= tbl->max_size) {
306 tbl->counter++;
307 goto out;
308 }
309
310 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
311 if (goal > tbl->max_size/2)
312 goal = tbl->max_size/2;
313
314 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
315 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
316
317 write_lock(&svc->sched_lock);
318 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
319 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
320 continue;
321
322 ip_vs_lblc_free(en);
323 atomic_dec(&tbl->entries);
324 goal--;
325 }
326 write_unlock(&svc->sched_lock);
327 if (goal <= 0)
328 break;
329 }
330 tbl->rover = j;
331
332 out:
333 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
334}
335
336
337static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
338{
339 int i;
340 struct ip_vs_lblc_table *tbl;
341
342
343
344
345 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
346 if (tbl == NULL)
347 return -ENOMEM;
348
349 svc->sched_data = tbl;
350 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) allocated for "
351 "current service\n", sizeof(*tbl));
352
353
354
355
356 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
357 INIT_LIST_HEAD(&tbl->bucket[i]);
358 }
359 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
360 tbl->rover = 0;
361 tbl->counter = 1;
362
363
364
365
366 setup_timer(&tbl->periodic_timer, ip_vs_lblc_check_expire,
367 (unsigned long)svc);
368 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
369
370 return 0;
371}
372
373
374static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
375{
376 struct ip_vs_lblc_table *tbl = svc->sched_data;
377
378
379 del_timer_sync(&tbl->periodic_timer);
380
381
382 ip_vs_lblc_flush(tbl);
383
384
385 kfree(tbl);
386 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
387 sizeof(*tbl));
388
389 return 0;
390}
391
392
393static inline struct ip_vs_dest *
394__ip_vs_lblc_schedule(struct ip_vs_service *svc)
395{
396 struct ip_vs_dest *dest, *least;
397 int loh, doh;
398
399
400
401
402
403
404
405
406
407
408
409
410
411 list_for_each_entry(dest, &svc->destinations, n_list) {
412 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
413 continue;
414 if (atomic_read(&dest->weight) > 0) {
415 least = dest;
416 loh = ip_vs_dest_conn_overhead(least);
417 goto nextstage;
418 }
419 }
420 return NULL;
421
422
423
424
425 nextstage:
426 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
427 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
428 continue;
429
430 doh = ip_vs_dest_conn_overhead(dest);
431 if (loh * atomic_read(&dest->weight) >
432 doh * atomic_read(&least->weight)) {
433 least = dest;
434 loh = doh;
435 }
436 }
437
438 IP_VS_DBG_BUF(6, "LBLC: server %s:%d "
439 "activeconns %d refcnt %d weight %d overhead %d\n",
440 IP_VS_DBG_ADDR(least->af, &least->addr),
441 ntohs(least->port),
442 atomic_read(&least->activeconns),
443 atomic_read(&least->refcnt),
444 atomic_read(&least->weight), loh);
445
446 return least;
447}
448
449
450
451
452
453
454static inline int
455is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
456{
457 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
458 struct ip_vs_dest *d;
459
460 list_for_each_entry(d, &svc->destinations, n_list) {
461 if (atomic_read(&d->activeconns)*2
462 < atomic_read(&d->weight)) {
463 return 1;
464 }
465 }
466 }
467 return 0;
468}
469
470
471
472
473
474static struct ip_vs_dest *
475ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
476{
477 struct ip_vs_lblc_table *tbl = svc->sched_data;
478 struct ip_vs_iphdr iph;
479 struct ip_vs_dest *dest = NULL;
480 struct ip_vs_lblc_entry *en;
481
482 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
483
484 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
485
486
487 read_lock(&svc->sched_lock);
488 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
489 if (en) {
490
491 en->lastuse = jiffies;
492
493
494
495
496
497
498
499
500
501
502 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE)
503 dest = en->dest;
504 }
505 read_unlock(&svc->sched_lock);
506
507
508 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
509 goto out;
510
511
512 dest = __ip_vs_lblc_schedule(svc);
513 if (!dest) {
514 ip_vs_scheduler_err(svc, "no destination available");
515 return NULL;
516 }
517
518
519 write_lock(&svc->sched_lock);
520 ip_vs_lblc_new(tbl, &iph.daddr, dest);
521 write_unlock(&svc->sched_lock);
522
523out:
524 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
525 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
526 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
527
528 return dest;
529}
530
531
532
533
534
535static struct ip_vs_scheduler ip_vs_lblc_scheduler =
536{
537 .name = "lblc",
538 .refcnt = ATOMIC_INIT(0),
539 .module = THIS_MODULE,
540 .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
541 .init_service = ip_vs_lblc_init_svc,
542 .done_service = ip_vs_lblc_done_svc,
543 .schedule = ip_vs_lblc_schedule,
544};
545
546
547
548
549#ifdef CONFIG_SYSCTL
550static int __net_init __ip_vs_lblc_init(struct net *net)
551{
552 struct netns_ipvs *ipvs = net_ipvs(net);
553
554 if (!net_eq(net, &init_net)) {
555 ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
556 sizeof(vs_vars_table),
557 GFP_KERNEL);
558 if (ipvs->lblc_ctl_table == NULL)
559 return -ENOMEM;
560 } else
561 ipvs->lblc_ctl_table = vs_vars_table;
562 ipvs->sysctl_lblc_expiration = DEFAULT_EXPIRATION;
563 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
564
565 ipvs->lblc_ctl_header =
566 register_net_sysctl_table(net, net_vs_ctl_path,
567 ipvs->lblc_ctl_table);
568 if (!ipvs->lblc_ctl_header) {
569 if (!net_eq(net, &init_net))
570 kfree(ipvs->lblc_ctl_table);
571 return -ENOMEM;
572 }
573
574 return 0;
575}
576
577static void __net_exit __ip_vs_lblc_exit(struct net *net)
578{
579 struct netns_ipvs *ipvs = net_ipvs(net);
580
581 unregister_net_sysctl_table(ipvs->lblc_ctl_header);
582
583 if (!net_eq(net, &init_net))
584 kfree(ipvs->lblc_ctl_table);
585}
586
587#else
588
589static int __net_init __ip_vs_lblc_init(struct net *net) { return 0; }
590static void __net_exit __ip_vs_lblc_exit(struct net *net) { }
591
592#endif
593
594static struct pernet_operations ip_vs_lblc_ops = {
595 .init = __ip_vs_lblc_init,
596 .exit = __ip_vs_lblc_exit,
597};
598
599static int __init ip_vs_lblc_init(void)
600{
601 int ret;
602
603 ret = register_pernet_subsys(&ip_vs_lblc_ops);
604 if (ret)
605 return ret;
606
607 ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
608 if (ret)
609 unregister_pernet_subsys(&ip_vs_lblc_ops);
610 return ret;
611}
612
613static void __exit ip_vs_lblc_cleanup(void)
614{
615 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
616 unregister_pernet_subsys(&ip_vs_lblc_ops);
617}
618
619
620module_init(ip_vs_lblc_init);
621module_exit(ip_vs_lblc_cleanup);
622MODULE_LICENSE("GPL");
623