linux/include/net/inet_hashtables.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 * Authors:     Lotsa people, from code originally in tcp
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _INET_HASHTABLES_H
  15#define _INET_HASHTABLES_H
  16
  17
  18#include <linux/interrupt.h>
  19#include <linux/ip.h>
  20#include <linux/ipv6.h>
  21#include <linux/list.h>
  22#include <linux/slab.h>
  23#include <linux/socket.h>
  24#include <linux/spinlock.h>
  25#include <linux/types.h>
  26#include <linux/wait.h>
  27#include <linux/vmalloc.h>
  28
  29#include <net/inet_connection_sock.h>
  30#include <net/inet_sock.h>
  31#include <net/sock.h>
  32#include <net/route.h>
  33#include <net/tcp_states.h>
  34#include <net/netns/hash.h>
  35
  36#include <asm/atomic.h>
  37#include <asm/byteorder.h>
  38
  39/* This is for all connections with a full identity, no wildcards.
  40 * One chain is dedicated to TIME_WAIT sockets.
  41 * I'll experiment with dynamic table growth later.
  42 */
  43struct inet_ehash_bucket {
  44        struct hlist_head chain;
  45        struct hlist_head twchain;
  46};
  47
  48/* There are a few simple rules, which allow for local port reuse by
  49 * an application.  In essence:
  50 *
  51 *      1) Sockets bound to different interfaces may share a local port.
  52 *         Failing that, goto test 2.
  53 *      2) If all sockets have sk->sk_reuse set, and none of them are in
  54 *         TCP_LISTEN state, the port may be shared.
  55 *         Failing that, goto test 3.
  56 *      3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
  57 *         address, and none of them are the same, the port may be
  58 *         shared.
  59 *         Failing this, the port cannot be shared.
  60 *
  61 * The interesting point, is test #2.  This is what an FTP server does
  62 * all day.  To optimize this case we use a specific flag bit defined
  63 * below.  As we add sockets to a bind bucket list, we perform a
  64 * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
  65 * As long as all sockets added to a bind bucket pass this test,
  66 * the flag bit will be set.
  67 * The resulting situation is that tcp_v[46]_verify_bind() can just check
  68 * for this flag bit, if it is set and the socket trying to bind has
  69 * sk->sk_reuse set, we don't even have to walk the owners list at all,
  70 * we return that it is ok to bind this socket to the requested local port.
  71 *
  72 * Sounds like a lot of work, but it is worth it.  In a more naive
  73 * implementation (ie. current FreeBSD etc.) the entire list of ports
  74 * must be walked for each data port opened by an ftp server.  Needless
  75 * to say, this does not scale at all.  With a couple thousand FTP
  76 * users logged onto your box, isn't it nice to know that new data
  77 * ports are created in O(1) time?  I thought so. ;-)   -DaveM
  78 */
  79struct inet_bind_bucket {
  80        struct net              *ib_net;
  81        unsigned short          port;
  82        signed short            fastreuse;
  83        struct hlist_node       node;
  84        struct hlist_head       owners;
  85};
  86
  87#define inet_bind_bucket_for_each(tb, node, head) \
  88        hlist_for_each_entry(tb, node, head, node)
  89
  90struct inet_bind_hashbucket {
  91        spinlock_t              lock;
  92        struct hlist_head       chain;
  93};
  94
  95/* This is for listening sockets, thus all sockets which possess wildcards. */
  96#define INET_LHTABLE_SIZE       32      /* Yes, really, this is all you need. */
  97
  98struct inet_hashinfo {
  99        /* This is for sockets with full identity only.  Sockets here will
 100         * always be without wildcards and will have the following invariant:
 101         *
 102         *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
 103         *
 104         * TIME_WAIT sockets use a separate chain (twchain).
 105         */
 106        struct inet_ehash_bucket        *ehash;
 107        rwlock_t                        *ehash_locks;
 108        unsigned int                    ehash_size;
 109        unsigned int                    ehash_locks_mask;
 110
 111        /* Ok, let's try this, I give up, we do need a local binding
 112         * TCP hash as well as the others for fast bind/connect.
 113         */
 114        struct inet_bind_hashbucket     *bhash;
 115
 116        unsigned int                    bhash_size;
 117        /* Note : 4 bytes padding on 64 bit arches */
 118
 119        /* All sockets in TCP_LISTEN state will be in here.  This is the only
 120         * table where wildcard'd TCP sockets can exist.  Hash function here
 121         * is just local port number.
 122         */
 123        struct hlist_head               listening_hash[INET_LHTABLE_SIZE];
 124
 125        /* All the above members are written once at bootup and
 126         * never written again _or_ are predominantly read-access.
 127         *
 128         * Now align to a new cache line as all the following members
 129         * are often dirty.
 130         */
 131        rwlock_t                        lhash_lock ____cacheline_aligned;
 132        atomic_t                        lhash_users;
 133        wait_queue_head_t               lhash_wait;
 134        struct kmem_cache                       *bind_bucket_cachep;
 135};
 136
 137static inline struct inet_ehash_bucket *inet_ehash_bucket(
 138        struct inet_hashinfo *hashinfo,
 139        unsigned int hash)
 140{
 141        return &hashinfo->ehash[hash & (hashinfo->ehash_size - 1)];
 142}
 143
 144static inline rwlock_t *inet_ehash_lockp(
 145        struct inet_hashinfo *hashinfo,
 146        unsigned int hash)
 147{
 148        return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
 149}
 150
 151static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
 152{
 153        unsigned int i, size = 256;
 154#if defined(CONFIG_PROVE_LOCKING)
 155        unsigned int nr_pcpus = 2;
 156#else
 157        unsigned int nr_pcpus = num_possible_cpus();
 158#endif
 159        if (nr_pcpus >= 4)
 160                size = 512;
 161        if (nr_pcpus >= 8)
 162                size = 1024;
 163        if (nr_pcpus >= 16)
 164                size = 2048;
 165        if (nr_pcpus >= 32)
 166                size = 4096;
 167        if (sizeof(rwlock_t) != 0) {
 168#ifdef CONFIG_NUMA
 169                if (size * sizeof(rwlock_t) > PAGE_SIZE)
 170                        hashinfo->ehash_locks = vmalloc(size * sizeof(rwlock_t));
 171                else
 172#endif
 173                hashinfo->ehash_locks = kmalloc(size * sizeof(rwlock_t),
 174                                                GFP_KERNEL);
 175                if (!hashinfo->ehash_locks)
 176                        return ENOMEM;
 177                for (i = 0; i < size; i++)
 178                        rwlock_init(&hashinfo->ehash_locks[i]);
 179        }
 180        hashinfo->ehash_locks_mask = size - 1;
 181        return 0;
 182}
 183
 184static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
 185{
 186        if (hashinfo->ehash_locks) {
 187#ifdef CONFIG_NUMA
 188                unsigned int size = (hashinfo->ehash_locks_mask + 1) *
 189                                                        sizeof(rwlock_t);
 190                if (size > PAGE_SIZE)
 191                        vfree(hashinfo->ehash_locks);
 192                else
 193#endif
 194                kfree(hashinfo->ehash_locks);
 195                hashinfo->ehash_locks = NULL;
 196        }
 197}
 198
 199extern struct inet_bind_bucket *
 200                    inet_bind_bucket_create(struct kmem_cache *cachep,
 201                                            struct net *net,
 202                                            struct inet_bind_hashbucket *head,
 203                                            const unsigned short snum);
 204extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
 205                                     struct inet_bind_bucket *tb);
 206
 207static inline int inet_bhashfn(struct net *net,
 208                const __u16 lport, const int bhash_size)
 209{
 210        return (lport + net_hash_mix(net)) & (bhash_size - 1);
 211}
 212
 213extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 214                           const unsigned short snum);
 215
 216/* These can have wildcards, don't try too hard. */
 217static inline int inet_lhashfn(struct net *net, const unsigned short num)
 218{
 219        return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
 220}
 221
 222static inline int inet_sk_listen_hashfn(const struct sock *sk)
 223{
 224        return inet_lhashfn(sock_net(sk), inet_sk(sk)->num);
 225}
 226
 227/* Caller must disable local BH processing. */
 228extern void __inet_inherit_port(struct sock *sk, struct sock *child);
 229
 230extern void inet_put_port(struct sock *sk);
 231
 232extern void inet_listen_wlock(struct inet_hashinfo *hashinfo);
 233
 234/*
 235 * - We may sleep inside this lock.
 236 * - If sleeping is not required (or called from BH),
 237 *   use plain read_(un)lock(&inet_hashinfo.lhash_lock).
 238 */
 239static inline void inet_listen_lock(struct inet_hashinfo *hashinfo)
 240{
 241        /* read_lock synchronizes to candidates to writers */
 242        read_lock(&hashinfo->lhash_lock);
 243        atomic_inc(&hashinfo->lhash_users);
 244        read_unlock(&hashinfo->lhash_lock);
 245}
 246
 247static inline void inet_listen_unlock(struct inet_hashinfo *hashinfo)
 248{
 249        if (atomic_dec_and_test(&hashinfo->lhash_users))
 250                wake_up(&hashinfo->lhash_wait);
 251}
 252
 253extern void __inet_hash_nolisten(struct sock *sk);
 254extern void inet_hash(struct sock *sk);
 255extern void inet_unhash(struct sock *sk);
 256
 257extern struct sock *__inet_lookup_listener(struct net *net,
 258                                           struct inet_hashinfo *hashinfo,
 259                                           const __be32 daddr,
 260                                           const unsigned short hnum,
 261                                           const int dif);
 262
 263static inline struct sock *inet_lookup_listener(struct net *net,
 264                struct inet_hashinfo *hashinfo,
 265                __be32 daddr, __be16 dport, int dif)
 266{
 267        return __inet_lookup_listener(net, hashinfo, daddr, ntohs(dport), dif);
 268}
 269
 270/* Socket demux engine toys. */
 271/* What happens here is ugly; there's a pair of adjacent fields in
 272   struct inet_sock; __be16 dport followed by __u16 num.  We want to
 273   search by pair, so we combine the keys into a single 32bit value
 274   and compare with 32bit value read from &...->dport.  Let's at least
 275   make sure that it's not mixed with anything else...
 276   On 64bit targets we combine comparisons with pair of adjacent __be32
 277   fields in the same way.
 278*/
 279typedef __u32 __bitwise __portpair;
 280#ifdef __BIG_ENDIAN
 281#define INET_COMBINED_PORTS(__sport, __dport) \
 282        ((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
 283#else /* __LITTLE_ENDIAN */
 284#define INET_COMBINED_PORTS(__sport, __dport) \
 285        ((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
 286#endif
 287
 288#if (BITS_PER_LONG == 64)
 289typedef __u64 __bitwise __addrpair;
 290#ifdef __BIG_ENDIAN
 291#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
 292        const __addrpair __name = (__force __addrpair) ( \
 293                                   (((__force __u64)(__be32)(__saddr)) << 32) | \
 294                                   ((__force __u64)(__be32)(__daddr)));
 295#else /* __LITTLE_ENDIAN */
 296#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
 297        const __addrpair __name = (__force __addrpair) ( \
 298                                   (((__force __u64)(__be32)(__daddr)) << 32) | \
 299                                   ((__force __u64)(__be32)(__saddr)));
 300#endif /* __BIG_ENDIAN */
 301#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
 302        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   &&      \
 303         ((*((__addrpair *)&(inet_sk(__sk)->daddr))) == (__cookie))     &&      \
 304         ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))      &&      \
 305         (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
 306#define INET_TW_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
 307        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   &&      \
 308         ((*((__addrpair *)&(inet_twsk(__sk)->tw_daddr))) == (__cookie)) &&     \
 309         ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&      \
 310         (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
 311#else /* 32-bit arch */
 312#define INET_ADDR_COOKIE(__name, __saddr, __daddr)
 313#define INET_MATCH(__sk, __net, __hash, __cookie, __saddr, __daddr, __ports, __dif)     \
 314        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   &&      \
 315         (inet_sk(__sk)->daddr          == (__saddr))           &&      \
 316         (inet_sk(__sk)->rcv_saddr      == (__daddr))           &&      \
 317         ((*((__portpair *)&(inet_sk(__sk)->dport))) == (__ports))      &&      \
 318         (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
 319#define INET_TW_MATCH(__sk, __net, __hash,__cookie, __saddr, __daddr, __ports, __dif)   \
 320        (((__sk)->sk_hash == (__hash)) && sock_net((__sk)) == (__net)   &&      \
 321         (inet_twsk(__sk)->tw_daddr     == (__saddr))           &&      \
 322         (inet_twsk(__sk)->tw_rcv_saddr == (__daddr))           &&      \
 323         ((*((__portpair *)&(inet_twsk(__sk)->tw_dport))) == (__ports)) &&      \
 324         (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
 325#endif /* 64-bit arch */
 326
 327/*
 328 * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
 329 * not check it for lookups anymore, thanks Alexey. -DaveM
 330 *
 331 * Local BH must be disabled here.
 332 */
 333extern struct sock * __inet_lookup_established(struct net *net,
 334                struct inet_hashinfo *hashinfo,
 335                const __be32 saddr, const __be16 sport,
 336                const __be32 daddr, const u16 hnum, const int dif);
 337
 338static inline struct sock *
 339        inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
 340                                const __be32 saddr, const __be16 sport,
 341                                const __be32 daddr, const __be16 dport,
 342                                const int dif)
 343{
 344        return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
 345                                         ntohs(dport), dif);
 346}
 347
 348static inline struct sock *__inet_lookup(struct net *net,
 349                                         struct inet_hashinfo *hashinfo,
 350                                         const __be32 saddr, const __be16 sport,
 351                                         const __be32 daddr, const __be16 dport,
 352                                         const int dif)
 353{
 354        u16 hnum = ntohs(dport);
 355        struct sock *sk = __inet_lookup_established(net, hashinfo,
 356                                saddr, sport, daddr, hnum, dif);
 357
 358        return sk ? : __inet_lookup_listener(net, hashinfo, daddr, hnum, dif);
 359}
 360
 361static inline struct sock *inet_lookup(struct net *net,
 362                                       struct inet_hashinfo *hashinfo,
 363                                       const __be32 saddr, const __be16 sport,
 364                                       const __be32 daddr, const __be16 dport,
 365                                       const int dif)
 366{
 367        struct sock *sk;
 368
 369        local_bh_disable();
 370        sk = __inet_lookup(net, hashinfo, saddr, sport, daddr, dport, dif);
 371        local_bh_enable();
 372
 373        return sk;
 374}
 375
 376static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
 377                                             struct sk_buff *skb,
 378                                             const __be16 sport,
 379                                             const __be16 dport)
 380{
 381        struct sock *sk;
 382        const struct iphdr *iph = ip_hdr(skb);
 383
 384        if (unlikely(sk = skb_steal_sock(skb)))
 385                return sk;
 386        else
 387                return __inet_lookup(dev_net(skb->dst->dev), hashinfo,
 388                                     iph->saddr, sport,
 389                                     iph->daddr, dport, inet_iif(skb));
 390}
 391
 392extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
 393                struct sock *sk, u32 port_offset,
 394                int (*check_established)(struct inet_timewait_death_row *,
 395                        struct sock *, __u16, struct inet_timewait_sock **),
 396                               void (*hash)(struct sock *sk));
 397extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
 398                             struct sock *sk);
 399#endif /* _INET_HASHTABLES_H */
 400