linux/net/ipv6/udp.c
<<
>>
Prefs
   1/*
   2 *      UDP over IPv6
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      Based on linux/ipv4/udp.c
   9 *
  10 *      Fixes:
  11 *      Hideaki YOSHIFUJI       :       sin6_scope_id support
  12 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
  13 *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
  14 *                                      a single port at the same time.
  15 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  16 *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/udp6 to seq_file.
  17 *
  18 *      This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/in6.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/ipv6.h>
  33#include <linux/icmpv6.h>
  34#include <linux/init.h>
  35#include <linux/module.h>
  36#include <linux/skbuff.h>
  37#include <linux/slab.h>
  38#include <asm/uaccess.h>
  39
  40#include <net/ndisc.h>
  41#include <net/protocol.h>
  42#include <net/transp_v6.h>
  43#include <net/ip6_route.h>
  44#include <net/raw.h>
  45#include <net/tcp_states.h>
  46#include <net/ip6_checksum.h>
  47#include <net/xfrm.h>
  48
  49#include <linux/proc_fs.h>
  50#include <linux/seq_file.h>
  51#include <trace/events/skb.h>
  52#include "udp_impl.h"
  53
  54int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
  55{
  56        const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
  57        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
  58        __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
  59        __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
  60        int sk_ipv6only = ipv6_only_sock(sk);
  61        int sk2_ipv6only = inet_v6_ipv6only(sk2);
  62        int addr_type = ipv6_addr_type(sk_rcv_saddr6);
  63        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  64
  65        /* if both are mapped, treat as IPv4 */
  66        if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
  67                return (!sk2_ipv6only &&
  68                        (!sk1_rcv_saddr || !sk2_rcv_saddr ||
  69                          sk1_rcv_saddr == sk2_rcv_saddr));
  70
  71        if (addr_type2 == IPV6_ADDR_ANY &&
  72            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  73                return 1;
  74
  75        if (addr_type == IPV6_ADDR_ANY &&
  76            !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  77                return 1;
  78
  79        if (sk2_rcv_saddr6 &&
  80            ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
  81                return 1;
  82
  83        return 0;
  84}
  85
  86static unsigned int udp6_portaddr_hash(struct net *net,
  87                                       const struct in6_addr *addr6,
  88                                       unsigned int port)
  89{
  90        unsigned int hash, mix = net_hash_mix(net);
  91
  92        if (ipv6_addr_any(addr6))
  93                hash = jhash_1word(0, mix);
  94        else if (ipv6_addr_v4mapped(addr6))
  95                hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
  96        else
  97                hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
  98
  99        return hash ^ port;
 100}
 101
 102
 103int udp_v6_get_port(struct sock *sk, unsigned short snum)
 104{
 105        unsigned int hash2_nulladdr =
 106                udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 107        unsigned int hash2_partial =
 108                udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
 109
 110        /* precompute partial secondary hash */
 111        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 112        return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
 113}
 114
 115static void udp_v6_rehash(struct sock *sk)
 116{
 117        u16 new_hash = udp6_portaddr_hash(sock_net(sk),
 118                                          &inet6_sk(sk)->rcv_saddr,
 119                                          inet_sk(sk)->inet_num);
 120
 121        udp_lib_rehash(sk, new_hash);
 122}
 123
 124static inline int compute_score(struct sock *sk, struct net *net,
 125                                unsigned short hnum,
 126                                const struct in6_addr *saddr, __be16 sport,
 127                                const struct in6_addr *daddr, __be16 dport,
 128                                int dif)
 129{
 130        int score = -1;
 131
 132        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 133                        sk->sk_family == PF_INET6) {
 134                struct ipv6_pinfo *np = inet6_sk(sk);
 135                struct inet_sock *inet = inet_sk(sk);
 136
 137                score = 0;
 138                if (inet->inet_dport) {
 139                        if (inet->inet_dport != sport)
 140                                return -1;
 141                        score++;
 142                }
 143                if (!ipv6_addr_any(&np->rcv_saddr)) {
 144                        if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 145                                return -1;
 146                        score++;
 147                }
 148                if (!ipv6_addr_any(&np->daddr)) {
 149                        if (!ipv6_addr_equal(&np->daddr, saddr))
 150                                return -1;
 151                        score++;
 152                }
 153                if (sk->sk_bound_dev_if) {
 154                        if (sk->sk_bound_dev_if != dif)
 155                                return -1;
 156                        score++;
 157                }
 158        }
 159        return score;
 160}
 161
 162#define SCORE2_MAX (1 + 1 + 1)
 163static inline int compute_score2(struct sock *sk, struct net *net,
 164                                const struct in6_addr *saddr, __be16 sport,
 165                                const struct in6_addr *daddr, unsigned short hnum,
 166                                int dif)
 167{
 168        int score = -1;
 169
 170        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 171                        sk->sk_family == PF_INET6) {
 172                struct ipv6_pinfo *np = inet6_sk(sk);
 173                struct inet_sock *inet = inet_sk(sk);
 174
 175                if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 176                        return -1;
 177                score = 0;
 178                if (inet->inet_dport) {
 179                        if (inet->inet_dport != sport)
 180                                return -1;
 181                        score++;
 182                }
 183                if (!ipv6_addr_any(&np->daddr)) {
 184                        if (!ipv6_addr_equal(&np->daddr, saddr))
 185                                return -1;
 186                        score++;
 187                }
 188                if (sk->sk_bound_dev_if) {
 189                        if (sk->sk_bound_dev_if != dif)
 190                                return -1;
 191                        score++;
 192                }
 193        }
 194        return score;
 195}
 196
 197
 198/* called with read_rcu_lock() */
 199static struct sock *udp6_lib_lookup2(struct net *net,
 200                const struct in6_addr *saddr, __be16 sport,
 201                const struct in6_addr *daddr, unsigned int hnum, int dif,
 202                struct udp_hslot *hslot2, unsigned int slot2)
 203{
 204        struct sock *sk, *result;
 205        struct hlist_nulls_node *node;
 206        int score, badness;
 207
 208begin:
 209        result = NULL;
 210        badness = -1;
 211        udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
 212                score = compute_score2(sk, net, saddr, sport,
 213                                      daddr, hnum, dif);
 214                if (score > badness) {
 215                        result = sk;
 216                        badness = score;
 217                        if (score == SCORE2_MAX)
 218                                goto exact_match;
 219                }
 220        }
 221        /*
 222         * if the nulls value we got at the end of this lookup is
 223         * not the expected one, we must restart lookup.
 224         * We probably met an item that was moved to another chain.
 225         */
 226        if (get_nulls_value(node) != slot2)
 227                goto begin;
 228
 229        if (result) {
 230exact_match:
 231                if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 232                        result = NULL;
 233                else if (unlikely(compute_score2(result, net, saddr, sport,
 234                                  daddr, hnum, dif) < badness)) {
 235                        sock_put(result);
 236                        goto begin;
 237                }
 238        }
 239        return result;
 240}
 241
 242struct sock *__udp6_lib_lookup(struct net *net,
 243                                      const struct in6_addr *saddr, __be16 sport,
 244                                      const struct in6_addr *daddr, __be16 dport,
 245                                      int dif, struct udp_table *udptable)
 246{
 247        struct sock *sk, *result;
 248        struct hlist_nulls_node *node;
 249        unsigned short hnum = ntohs(dport);
 250        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 251        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 252        int score, badness;
 253
 254        rcu_read_lock();
 255        if (hslot->count > 10) {
 256                hash2 = udp6_portaddr_hash(net, daddr, hnum);
 257                slot2 = hash2 & udptable->mask;
 258                hslot2 = &udptable->hash2[slot2];
 259                if (hslot->count < hslot2->count)
 260                        goto begin;
 261
 262                result = udp6_lib_lookup2(net, saddr, sport,
 263                                          daddr, hnum, dif,
 264                                          hslot2, slot2);
 265                if (!result) {
 266                        hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
 267                        slot2 = hash2 & udptable->mask;
 268                        hslot2 = &udptable->hash2[slot2];
 269                        if (hslot->count < hslot2->count)
 270                                goto begin;
 271
 272                        result = udp6_lib_lookup2(net, saddr, sport,
 273                                                  &in6addr_any, hnum, dif,
 274                                                  hslot2, slot2);
 275                }
 276                rcu_read_unlock();
 277                return result;
 278        }
 279begin:
 280        result = NULL;
 281        badness = -1;
 282        sk_nulls_for_each_rcu(sk, node, &hslot->head) {
 283                score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
 284                if (score > badness) {
 285                        result = sk;
 286                        badness = score;
 287                }
 288        }
 289        /*
 290         * if the nulls value we got at the end of this lookup is
 291         * not the expected one, we must restart lookup.
 292         * We probably met an item that was moved to another chain.
 293         */
 294        if (get_nulls_value(node) != slot)
 295                goto begin;
 296
 297        if (result) {
 298                if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 299                        result = NULL;
 300                else if (unlikely(compute_score(result, net, hnum, saddr, sport,
 301                                        daddr, dport, dif) < badness)) {
 302                        sock_put(result);
 303                        goto begin;
 304                }
 305        }
 306        rcu_read_unlock();
 307        return result;
 308}
 309EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
 310
 311static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 312                                          __be16 sport, __be16 dport,
 313                                          struct udp_table *udptable)
 314{
 315        struct sock *sk;
 316        const struct ipv6hdr *iph = ipv6_hdr(skb);
 317
 318        if (unlikely(sk = skb_steal_sock(skb)))
 319                return sk;
 320        return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
 321                                 &iph->daddr, dport, inet6_iif(skb),
 322                                 udptable);
 323}
 324
 325struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 326                             const struct in6_addr *daddr, __be16 dport, int dif)
 327{
 328        return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
 329}
 330EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 331
 332
 333/*
 334 *      This should be easy, if there is something there we
 335 *      return it, otherwise we block.
 336 */
 337
 338int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
 339                  struct msghdr *msg, size_t len,
 340                  int noblock, int flags, int *addr_len)
 341{
 342        struct ipv6_pinfo *np = inet6_sk(sk);
 343        struct inet_sock *inet = inet_sk(sk);
 344        struct sk_buff *skb;
 345        unsigned int ulen, copied;
 346        int peeked, off = 0;
 347        int err;
 348        int is_udplite = IS_UDPLITE(sk);
 349        int is_udp4;
 350        bool slow;
 351
 352        if (addr_len)
 353                *addr_len = sizeof(struct sockaddr_in6);
 354
 355        if (flags & MSG_ERRQUEUE)
 356                return ipv6_recv_error(sk, msg, len);
 357
 358        if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 359                return ipv6_recv_rxpmtu(sk, msg, len);
 360
 361try_again:
 362        skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
 363                                  &peeked, &off, &err);
 364        if (!skb)
 365                goto out;
 366
 367        ulen = skb->len - sizeof(struct udphdr);
 368        copied = len;
 369        if (copied > ulen)
 370                copied = ulen;
 371        else if (copied < ulen)
 372                msg->msg_flags |= MSG_TRUNC;
 373
 374        is_udp4 = (skb->protocol == htons(ETH_P_IP));
 375
 376        /*
 377         * If checksum is needed at all, try to do it while copying the
 378         * data.  If the data is truncated, or if we only want a partial
 379         * coverage checksum (UDP-Lite), do it before the copy.
 380         */
 381
 382        if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
 383                if (udp_lib_checksum_complete(skb))
 384                        goto csum_copy_err;
 385        }
 386
 387        if (skb_csum_unnecessary(skb))
 388                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
 389                                              msg->msg_iov, copied);
 390        else {
 391                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
 392                if (err == -EINVAL)
 393                        goto csum_copy_err;
 394        }
 395        if (unlikely(err)) {
 396                trace_kfree_skb(skb, udpv6_recvmsg);
 397                if (!peeked) {
 398                        atomic_inc(&sk->sk_drops);
 399                        if (is_udp4)
 400                                UDP_INC_STATS_USER(sock_net(sk),
 401                                                   UDP_MIB_INERRORS,
 402                                                   is_udplite);
 403                        else
 404                                UDP6_INC_STATS_USER(sock_net(sk),
 405                                                    UDP_MIB_INERRORS,
 406                                                    is_udplite);
 407                }
 408                goto out_free;
 409        }
 410        if (!peeked) {
 411                if (is_udp4)
 412                        UDP_INC_STATS_USER(sock_net(sk),
 413                                        UDP_MIB_INDATAGRAMS, is_udplite);
 414                else
 415                        UDP6_INC_STATS_USER(sock_net(sk),
 416                                        UDP_MIB_INDATAGRAMS, is_udplite);
 417        }
 418
 419        sock_recv_ts_and_drops(msg, sk, skb);
 420
 421        /* Copy the address. */
 422        if (msg->msg_name) {
 423                struct sockaddr_in6 *sin6;
 424
 425                sin6 = (struct sockaddr_in6 *) msg->msg_name;
 426                sin6->sin6_family = AF_INET6;
 427                sin6->sin6_port = udp_hdr(skb)->source;
 428                sin6->sin6_flowinfo = 0;
 429                sin6->sin6_scope_id = 0;
 430
 431                if (is_udp4)
 432                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 433                                               &sin6->sin6_addr);
 434                else {
 435                        sin6->sin6_addr = ipv6_hdr(skb)->saddr;
 436                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
 437                                sin6->sin6_scope_id = IP6CB(skb)->iif;
 438                }
 439
 440        }
 441        if (is_udp4) {
 442                if (inet->cmsg_flags)
 443                        ip_cmsg_recv(msg, skb);
 444        } else {
 445                if (np->rxopt.all)
 446                        datagram_recv_ctl(sk, msg, skb);
 447        }
 448
 449        err = copied;
 450        if (flags & MSG_TRUNC)
 451                err = ulen;
 452
 453out_free:
 454        skb_free_datagram_locked(sk, skb);
 455out:
 456        return err;
 457
 458csum_copy_err:
 459        slow = lock_sock_fast(sk);
 460        if (!skb_kill_datagram(sk, skb, flags)) {
 461                if (is_udp4)
 462                        UDP_INC_STATS_USER(sock_net(sk),
 463                                        UDP_MIB_INERRORS, is_udplite);
 464                else
 465                        UDP6_INC_STATS_USER(sock_net(sk),
 466                                        UDP_MIB_INERRORS, is_udplite);
 467        }
 468        unlock_sock_fast(sk, slow);
 469
 470        if (noblock)
 471                return -EAGAIN;
 472
 473        /* starting over for a new packet */
 474        msg->msg_flags &= ~MSG_TRUNC;
 475        goto try_again;
 476}
 477
 478void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 479                    u8 type, u8 code, int offset, __be32 info,
 480                    struct udp_table *udptable)
 481{
 482        struct ipv6_pinfo *np;
 483        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 484        const struct in6_addr *saddr = &hdr->saddr;
 485        const struct in6_addr *daddr = &hdr->daddr;
 486        struct udphdr *uh = (struct udphdr*)(skb->data+offset);
 487        struct sock *sk;
 488        int err;
 489
 490        sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
 491                               saddr, uh->source, inet6_iif(skb), udptable);
 492        if (sk == NULL)
 493                return;
 494
 495        if (type == ICMPV6_PKT_TOOBIG)
 496                ip6_sk_update_pmtu(skb, sk, info);
 497        if (type == NDISC_REDIRECT)
 498                ip6_sk_redirect(skb, sk);
 499
 500        np = inet6_sk(sk);
 501
 502        if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
 503                goto out;
 504
 505        if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
 506                goto out;
 507
 508        if (np->recverr)
 509                ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 510
 511        sk->sk_err = err;
 512        sk->sk_error_report(sk);
 513out:
 514        sock_put(sk);
 515}
 516
 517static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 518{
 519        int rc;
 520
 521        if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
 522                sock_rps_save_rxhash(sk, skb);
 523
 524        rc = sock_queue_rcv_skb(sk, skb);
 525        if (rc < 0) {
 526                int is_udplite = IS_UDPLITE(sk);
 527
 528                /* Note that an ENOMEM error is charged twice */
 529                if (rc == -ENOMEM)
 530                        UDP6_INC_STATS_BH(sock_net(sk),
 531                                        UDP_MIB_RCVBUFERRORS, is_udplite);
 532                UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 533                kfree_skb(skb);
 534                return -1;
 535        }
 536        return 0;
 537}
 538
 539static __inline__ void udpv6_err(struct sk_buff *skb,
 540                                 struct inet6_skb_parm *opt, u8 type,
 541                                 u8 code, int offset, __be32 info     )
 542{
 543        __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 544}
 545
 546static struct static_key udpv6_encap_needed __read_mostly;
 547void udpv6_encap_enable(void)
 548{
 549        if (!static_key_enabled(&udpv6_encap_needed))
 550                static_key_slow_inc(&udpv6_encap_needed);
 551}
 552EXPORT_SYMBOL(udpv6_encap_enable);
 553
 554int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 555{
 556        struct udp_sock *up = udp_sk(sk);
 557        int rc;
 558        int is_udplite = IS_UDPLITE(sk);
 559
 560        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 561                goto drop;
 562
 563        if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
 564                int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
 565
 566                /*
 567                 * This is an encapsulation socket so pass the skb to
 568                 * the socket's udp_encap_rcv() hook. Otherwise, just
 569                 * fall through and pass this up the UDP socket.
 570                 * up->encap_rcv() returns the following value:
 571                 * =0 if skb was successfully passed to the encap
 572                 *    handler or was discarded by it.
 573                 * >0 if skb should be passed on to UDP.
 574                 * <0 if skb should be resubmitted as proto -N
 575                 */
 576
 577                /* if we're overly short, let UDP handle it */
 578                encap_rcv = ACCESS_ONCE(up->encap_rcv);
 579                if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
 580                        int ret;
 581
 582                        ret = encap_rcv(sk, skb);
 583                        if (ret <= 0) {
 584                                UDP_INC_STATS_BH(sock_net(sk),
 585                                                 UDP_MIB_INDATAGRAMS,
 586                                                 is_udplite);
 587                                return -ret;
 588                        }
 589                }
 590
 591                /* FALLTHROUGH -- it's a UDP Packet */
 592        }
 593
 594        /*
 595         * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 596         */
 597        if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
 598
 599                if (up->pcrlen == 0) {          /* full coverage was set  */
 600                        LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
 601                                " %d while full coverage %d requested\n",
 602                                UDP_SKB_CB(skb)->cscov, skb->len);
 603                        goto drop;
 604                }
 605                if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
 606                        LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
 607                                                    "too small, need min %d\n",
 608                                       UDP_SKB_CB(skb)->cscov, up->pcrlen);
 609                        goto drop;
 610                }
 611        }
 612
 613        if (rcu_access_pointer(sk->sk_filter)) {
 614                if (udp_lib_checksum_complete(skb))
 615                        goto drop;
 616        }
 617
 618        if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
 619                goto drop;
 620
 621        skb_dst_drop(skb);
 622
 623        bh_lock_sock(sk);
 624        rc = 0;
 625        if (!sock_owned_by_user(sk))
 626                rc = __udpv6_queue_rcv_skb(sk, skb);
 627        else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
 628                bh_unlock_sock(sk);
 629                goto drop;
 630        }
 631        bh_unlock_sock(sk);
 632
 633        return rc;
 634drop:
 635        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 636        atomic_inc(&sk->sk_drops);
 637        kfree_skb(skb);
 638        return -1;
 639}
 640
 641static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
 642                                      __be16 loc_port, const struct in6_addr *loc_addr,
 643                                      __be16 rmt_port, const struct in6_addr *rmt_addr,
 644                                      int dif)
 645{
 646        struct hlist_nulls_node *node;
 647        struct sock *s = sk;
 648        unsigned short num = ntohs(loc_port);
 649
 650        sk_nulls_for_each_from(s, node) {
 651                struct inet_sock *inet = inet_sk(s);
 652
 653                if (!net_eq(sock_net(s), net))
 654                        continue;
 655
 656                if (udp_sk(s)->udp_port_hash == num &&
 657                    s->sk_family == PF_INET6) {
 658                        struct ipv6_pinfo *np = inet6_sk(s);
 659                        if (inet->inet_dport) {
 660                                if (inet->inet_dport != rmt_port)
 661                                        continue;
 662                        }
 663                        if (!ipv6_addr_any(&np->daddr) &&
 664                            !ipv6_addr_equal(&np->daddr, rmt_addr))
 665                                continue;
 666
 667                        if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
 668                                continue;
 669
 670                        if (!ipv6_addr_any(&np->rcv_saddr)) {
 671                                if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
 672                                        continue;
 673                        }
 674                        if (!inet6_mc_check(s, loc_addr, rmt_addr))
 675                                continue;
 676                        return s;
 677                }
 678        }
 679        return NULL;
 680}
 681
 682static void flush_stack(struct sock **stack, unsigned int count,
 683                        struct sk_buff *skb, unsigned int final)
 684{
 685        struct sk_buff *skb1 = NULL;
 686        struct sock *sk;
 687        unsigned int i;
 688
 689        for (i = 0; i < count; i++) {
 690                sk = stack[i];
 691                if (likely(skb1 == NULL))
 692                        skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 693                if (!skb1) {
 694                        atomic_inc(&sk->sk_drops);
 695                        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
 696                                          IS_UDPLITE(sk));
 697                        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
 698                                          IS_UDPLITE(sk));
 699                }
 700
 701                if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
 702                        skb1 = NULL;
 703        }
 704        if (unlikely(skb1))
 705                kfree_skb(skb1);
 706}
 707/*
 708 * Note: called only from the BH handler context,
 709 * so we don't need to lock the hashes.
 710 */
 711static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 712                const struct in6_addr *saddr, const struct in6_addr *daddr,
 713                struct udp_table *udptable)
 714{
 715        struct sock *sk, *stack[256 / sizeof(struct sock *)];
 716        const struct udphdr *uh = udp_hdr(skb);
 717        struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
 718        int dif;
 719        unsigned int i, count = 0;
 720
 721        spin_lock(&hslot->lock);
 722        sk = sk_nulls_head(&hslot->head);
 723        dif = inet6_iif(skb);
 724        sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
 725        while (sk) {
 726                stack[count++] = sk;
 727                sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
 728                                       uh->source, saddr, dif);
 729                if (unlikely(count == ARRAY_SIZE(stack))) {
 730                        if (!sk)
 731                                break;
 732                        flush_stack(stack, count, skb, ~0);
 733                        count = 0;
 734                }
 735        }
 736        /*
 737         * before releasing the lock, we must take reference on sockets
 738         */
 739        for (i = 0; i < count; i++)
 740                sock_hold(stack[i]);
 741
 742        spin_unlock(&hslot->lock);
 743
 744        if (count) {
 745                flush_stack(stack, count, skb, count - 1);
 746
 747                for (i = 0; i < count; i++)
 748                        sock_put(stack[i]);
 749        } else {
 750                kfree_skb(skb);
 751        }
 752        return 0;
 753}
 754
 755static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
 756                                 int proto)
 757{
 758        int err;
 759
 760        UDP_SKB_CB(skb)->partial_cov = 0;
 761        UDP_SKB_CB(skb)->cscov = skb->len;
 762
 763        if (proto == IPPROTO_UDPLITE) {
 764                err = udplite_checksum_init(skb, uh);
 765                if (err)
 766                        return err;
 767        }
 768
 769        if (uh->check == 0) {
 770                /* RFC 2460 section 8.1 says that we SHOULD log
 771                   this error. Well, it is reasonable.
 772                 */
 773                LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
 774                return 1;
 775        }
 776        if (skb->ip_summed == CHECKSUM_COMPLETE &&
 777            !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 778                             skb->len, proto, skb->csum))
 779                skb->ip_summed = CHECKSUM_UNNECESSARY;
 780
 781        if (!skb_csum_unnecessary(skb))
 782                skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 783                                                         &ipv6_hdr(skb)->daddr,
 784                                                         skb->len, proto, 0));
 785
 786        return 0;
 787}
 788
 789int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 790                   int proto)
 791{
 792        struct net *net = dev_net(skb->dev);
 793        struct sock *sk;
 794        struct udphdr *uh;
 795        const struct in6_addr *saddr, *daddr;
 796        u32 ulen = 0;
 797
 798        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 799                goto discard;
 800
 801        saddr = &ipv6_hdr(skb)->saddr;
 802        daddr = &ipv6_hdr(skb)->daddr;
 803        uh = udp_hdr(skb);
 804
 805        ulen = ntohs(uh->len);
 806        if (ulen > skb->len)
 807                goto short_packet;
 808
 809        if (proto == IPPROTO_UDP) {
 810                /* UDP validates ulen. */
 811
 812                /* Check for jumbo payload */
 813                if (ulen == 0)
 814                        ulen = skb->len;
 815
 816                if (ulen < sizeof(*uh))
 817                        goto short_packet;
 818
 819                if (ulen < skb->len) {
 820                        if (pskb_trim_rcsum(skb, ulen))
 821                                goto short_packet;
 822                        saddr = &ipv6_hdr(skb)->saddr;
 823                        daddr = &ipv6_hdr(skb)->daddr;
 824                        uh = udp_hdr(skb);
 825                }
 826        }
 827
 828        if (udp6_csum_init(skb, uh, proto))
 829                goto discard;
 830
 831        /*
 832         *      Multicast receive code
 833         */
 834        if (ipv6_addr_is_multicast(daddr))
 835                return __udp6_lib_mcast_deliver(net, skb,
 836                                saddr, daddr, udptable);
 837
 838        /* Unicast */
 839
 840        /*
 841         * check socket cache ... must talk to Alan about his plans
 842         * for sock caches... i'll skip this for now.
 843         */
 844        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 845        if (sk != NULL) {
 846                int ret = udpv6_queue_rcv_skb(sk, skb);
 847                sock_put(sk);
 848
 849                /* a return value > 0 means to resubmit the input, but
 850                 * it wants the return to be -protocol, or 0
 851                 */
 852                if (ret > 0)
 853                        return -ret;
 854
 855                return 0;
 856        }
 857
 858        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 859                goto discard;
 860
 861        if (udp_lib_checksum_complete(skb))
 862                goto discard;
 863
 864        UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
 865        icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 866
 867        kfree_skb(skb);
 868        return 0;
 869
 870short_packet:
 871        LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
 872                       proto == IPPROTO_UDPLITE ? "-Lite" : "",
 873                       saddr,
 874                       ntohs(uh->source),
 875                       ulen,
 876                       skb->len,
 877                       daddr,
 878                       ntohs(uh->dest));
 879
 880discard:
 881        UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
 882        kfree_skb(skb);
 883        return 0;
 884}
 885
 886static __inline__ int udpv6_rcv(struct sk_buff *skb)
 887{
 888        return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
 889}
 890
 891/*
 892 * Throw away all pending data and cancel the corking. Socket is locked.
 893 */
 894static void udp_v6_flush_pending_frames(struct sock *sk)
 895{
 896        struct udp_sock *up = udp_sk(sk);
 897
 898        if (up->pending == AF_INET)
 899                udp_flush_pending_frames(sk);
 900        else if (up->pending) {
 901                up->len = 0;
 902                up->pending = 0;
 903                ip6_flush_pending_frames(sk);
 904        }
 905}
 906
 907/**
 908 *      udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
 909 *      @sk:    socket we are sending on
 910 *      @skb:   sk_buff containing the filled-in UDP header
 911 *              (checksum field must be zeroed out)
 912 */
 913static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
 914                                 const struct in6_addr *saddr,
 915                                 const struct in6_addr *daddr, int len)
 916{
 917        unsigned int offset;
 918        struct udphdr *uh = udp_hdr(skb);
 919        __wsum csum = 0;
 920
 921        if (skb_queue_len(&sk->sk_write_queue) == 1) {
 922                /* Only one fragment on the socket.  */
 923                skb->csum_start = skb_transport_header(skb) - skb->head;
 924                skb->csum_offset = offsetof(struct udphdr, check);
 925                uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
 926        } else {
 927                /*
 928                 * HW-checksum won't work as there are two or more
 929                 * fragments on the socket so that all csums of sk_buffs
 930                 * should be together
 931                 */
 932                offset = skb_transport_offset(skb);
 933                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
 934
 935                skb->ip_summed = CHECKSUM_NONE;
 936
 937                skb_queue_walk(&sk->sk_write_queue, skb) {
 938                        csum = csum_add(csum, skb->csum);
 939                }
 940
 941                uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
 942                                            csum);
 943                if (uh->check == 0)
 944                        uh->check = CSUM_MANGLED_0;
 945        }
 946}
 947
 948/*
 949 *      Sending
 950 */
 951
 952static int udp_v6_push_pending_frames(struct sock *sk)
 953{
 954        struct sk_buff *skb;
 955        struct udphdr *uh;
 956        struct udp_sock  *up = udp_sk(sk);
 957        struct inet_sock *inet = inet_sk(sk);
 958        struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
 959        int err = 0;
 960        int is_udplite = IS_UDPLITE(sk);
 961        __wsum csum = 0;
 962
 963        /* Grab the skbuff where UDP header space exists. */
 964        if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
 965                goto out;
 966
 967        /*
 968         * Create a UDP header
 969         */
 970        uh = udp_hdr(skb);
 971        uh->source = fl6->fl6_sport;
 972        uh->dest = fl6->fl6_dport;
 973        uh->len = htons(up->len);
 974        uh->check = 0;
 975
 976        if (is_udplite)
 977                csum = udplite_csum_outgoing(sk, skb);
 978        else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 979                udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
 980                                     up->len);
 981                goto send;
 982        } else
 983                csum = udp_csum_outgoing(sk, skb);
 984
 985        /* add protocol-dependent pseudo-header */
 986        uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
 987                                    up->len, fl6->flowi6_proto, csum);
 988        if (uh->check == 0)
 989                uh->check = CSUM_MANGLED_0;
 990
 991send:
 992        err = ip6_push_pending_frames(sk);
 993        if (err) {
 994                if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
 995                        UDP6_INC_STATS_USER(sock_net(sk),
 996                                            UDP_MIB_SNDBUFERRORS, is_udplite);
 997                        err = 0;
 998                }
 999        } else
1000                UDP6_INC_STATS_USER(sock_net(sk),
1001                                    UDP_MIB_OUTDATAGRAMS, is_udplite);
1002out:
1003        up->len = 0;
1004        up->pending = 0;
1005        return err;
1006}
1007
1008int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
1009                  struct msghdr *msg, size_t len)
1010{
1011        struct ipv6_txoptions opt_space;
1012        struct udp_sock *up = udp_sk(sk);
1013        struct inet_sock *inet = inet_sk(sk);
1014        struct ipv6_pinfo *np = inet6_sk(sk);
1015        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
1016        struct in6_addr *daddr, *final_p, final;
1017        struct ipv6_txoptions *opt = NULL;
1018        struct ip6_flowlabel *flowlabel = NULL;
1019        struct flowi6 fl6;
1020        struct dst_entry *dst;
1021        int addr_len = msg->msg_namelen;
1022        int ulen = len;
1023        int hlimit = -1;
1024        int tclass = -1;
1025        int dontfrag = -1;
1026        int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1027        int err;
1028        int connected = 0;
1029        int is_udplite = IS_UDPLITE(sk);
1030        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1031
1032        /* destination address check */
1033        if (sin6) {
1034                if (addr_len < offsetof(struct sockaddr, sa_data))
1035                        return -EINVAL;
1036
1037                switch (sin6->sin6_family) {
1038                case AF_INET6:
1039                        if (addr_len < SIN6_LEN_RFC2133)
1040                                return -EINVAL;
1041                        daddr = &sin6->sin6_addr;
1042                        break;
1043                case AF_INET:
1044                        goto do_udp_sendmsg;
1045                case AF_UNSPEC:
1046                        msg->msg_name = sin6 = NULL;
1047                        msg->msg_namelen = addr_len = 0;
1048                        daddr = NULL;
1049                        break;
1050                default:
1051                        return -EINVAL;
1052                }
1053        } else if (!up->pending) {
1054                if (sk->sk_state != TCP_ESTABLISHED)
1055                        return -EDESTADDRREQ;
1056                daddr = &np->daddr;
1057        } else
1058                daddr = NULL;
1059
1060        if (daddr) {
1061                if (ipv6_addr_v4mapped(daddr)) {
1062                        struct sockaddr_in sin;
1063                        sin.sin_family = AF_INET;
1064                        sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1065                        sin.sin_addr.s_addr = daddr->s6_addr32[3];
1066                        msg->msg_name = &sin;
1067                        msg->msg_namelen = sizeof(sin);
1068do_udp_sendmsg:
1069                        if (__ipv6_only_sock(sk))
1070                                return -ENETUNREACH;
1071                        return udp_sendmsg(iocb, sk, msg, len);
1072                }
1073        }
1074
1075        if (up->pending == AF_INET)
1076                return udp_sendmsg(iocb, sk, msg, len);
1077
1078        /* Rough check on arithmetic overflow,
1079           better check is made in ip6_append_data().
1080           */
1081        if (len > INT_MAX - sizeof(struct udphdr))
1082                return -EMSGSIZE;
1083
1084        if (up->pending) {
1085                /*
1086                 * There are pending frames.
1087                 * The socket lock must be held while it's corked.
1088                 */
1089                lock_sock(sk);
1090                if (likely(up->pending)) {
1091                        if (unlikely(up->pending != AF_INET6)) {
1092                                release_sock(sk);
1093                                return -EAFNOSUPPORT;
1094                        }
1095                        dst = NULL;
1096                        goto do_append_data;
1097                }
1098                release_sock(sk);
1099        }
1100        ulen += sizeof(struct udphdr);
1101
1102        memset(&fl6, 0, sizeof(fl6));
1103
1104        if (sin6) {
1105                if (sin6->sin6_port == 0)
1106                        return -EINVAL;
1107
1108                fl6.fl6_dport = sin6->sin6_port;
1109                daddr = &sin6->sin6_addr;
1110
1111                if (np->sndflow) {
1112                        fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1113                        if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1114                                flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1115                                if (flowlabel == NULL)
1116                                        return -EINVAL;
1117                                daddr = &flowlabel->dst;
1118                        }
1119                }
1120
1121                /*
1122                 * Otherwise it will be difficult to maintain
1123                 * sk->sk_dst_cache.
1124                 */
1125                if (sk->sk_state == TCP_ESTABLISHED &&
1126                    ipv6_addr_equal(daddr, &np->daddr))
1127                        daddr = &np->daddr;
1128
1129                if (addr_len >= sizeof(struct sockaddr_in6) &&
1130                    sin6->sin6_scope_id &&
1131                    ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
1132                        fl6.flowi6_oif = sin6->sin6_scope_id;
1133        } else {
1134                if (sk->sk_state != TCP_ESTABLISHED)
1135                        return -EDESTADDRREQ;
1136
1137                fl6.fl6_dport = inet->inet_dport;
1138                daddr = &np->daddr;
1139                fl6.flowlabel = np->flow_label;
1140                connected = 1;
1141        }
1142
1143        if (!fl6.flowi6_oif)
1144                fl6.flowi6_oif = sk->sk_bound_dev_if;
1145
1146        if (!fl6.flowi6_oif)
1147                fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1148
1149        fl6.flowi6_mark = sk->sk_mark;
1150
1151        if (msg->msg_controllen) {
1152                opt = &opt_space;
1153                memset(opt, 0, sizeof(struct ipv6_txoptions));
1154                opt->tot_len = sizeof(*opt);
1155
1156                err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1157                                        &hlimit, &tclass, &dontfrag);
1158                if (err < 0) {
1159                        fl6_sock_release(flowlabel);
1160                        return err;
1161                }
1162                if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1163                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1164                        if (flowlabel == NULL)
1165                                return -EINVAL;
1166                }
1167                if (!(opt->opt_nflen|opt->opt_flen))
1168                        opt = NULL;
1169                connected = 0;
1170        }
1171        if (opt == NULL)
1172                opt = np->opt;
1173        if (flowlabel)
1174                opt = fl6_merge_options(&opt_space, flowlabel, opt);
1175        opt = ipv6_fixup_options(&opt_space, opt);
1176
1177        fl6.flowi6_proto = sk->sk_protocol;
1178        if (!ipv6_addr_any(daddr))
1179                fl6.daddr = *daddr;
1180        else
1181                fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1182        if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1183                fl6.saddr = np->saddr;
1184        fl6.fl6_sport = inet->inet_sport;
1185
1186        final_p = fl6_update_dst(&fl6, opt, &final);
1187        if (final_p)
1188                connected = 0;
1189
1190        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1191                fl6.flowi6_oif = np->mcast_oif;
1192                connected = 0;
1193        } else if (!fl6.flowi6_oif)
1194                fl6.flowi6_oif = np->ucast_oif;
1195
1196        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1197
1198        dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true);
1199        if (IS_ERR(dst)) {
1200                err = PTR_ERR(dst);
1201                dst = NULL;
1202                goto out;
1203        }
1204
1205        if (hlimit < 0) {
1206                if (ipv6_addr_is_multicast(&fl6.daddr))
1207                        hlimit = np->mcast_hops;
1208                else
1209                        hlimit = np->hop_limit;
1210                if (hlimit < 0)
1211                        hlimit = ip6_dst_hoplimit(dst);
1212        }
1213
1214        if (tclass < 0)
1215                tclass = np->tclass;
1216
1217        if (dontfrag < 0)
1218                dontfrag = np->dontfrag;
1219
1220        if (msg->msg_flags&MSG_CONFIRM)
1221                goto do_confirm;
1222back_from_confirm:
1223
1224        lock_sock(sk);
1225        if (unlikely(up->pending)) {
1226                /* The socket is already corked while preparing it. */
1227                /* ... which is an evident application bug. --ANK */
1228                release_sock(sk);
1229
1230                LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
1231                err = -EINVAL;
1232                goto out;
1233        }
1234
1235        up->pending = AF_INET6;
1236
1237do_append_data:
1238        up->len += ulen;
1239        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1240        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
1241                sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
1242                (struct rt6_info*)dst,
1243                corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
1244        if (err)
1245                udp_v6_flush_pending_frames(sk);
1246        else if (!corkreq)
1247                err = udp_v6_push_pending_frames(sk);
1248        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1249                up->pending = 0;
1250
1251        if (dst) {
1252                if (connected) {
1253                        ip6_dst_store(sk, dst,
1254                                      ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
1255                                      &np->daddr : NULL,
1256#ifdef CONFIG_IPV6_SUBTREES
1257                                      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
1258                                      &np->saddr :
1259#endif
1260                                      NULL);
1261                } else {
1262                        dst_release(dst);
1263                }
1264                dst = NULL;
1265        }
1266
1267        if (err > 0)
1268                err = np->recverr ? net_xmit_errno(err) : 0;
1269        release_sock(sk);
1270out:
1271        dst_release(dst);
1272        fl6_sock_release(flowlabel);
1273        if (!err)
1274                return len;
1275        /*
1276         * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1277         * ENOBUFS might not be good (it's not tunable per se), but otherwise
1278         * we don't have a good statistic (IpOutDiscards but it can be too many
1279         * things).  We could add another new stat but at least for now that
1280         * seems like overkill.
1281         */
1282        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1283                UDP6_INC_STATS_USER(sock_net(sk),
1284                                UDP_MIB_SNDBUFERRORS, is_udplite);
1285        }
1286        return err;
1287
1288do_confirm:
1289        dst_confirm(dst);
1290        if (!(msg->msg_flags&MSG_PROBE) || len)
1291                goto back_from_confirm;
1292        err = 0;
1293        goto out;
1294}
1295
1296void udpv6_destroy_sock(struct sock *sk)
1297{
1298        lock_sock(sk);
1299        udp_v6_flush_pending_frames(sk);
1300        release_sock(sk);
1301
1302        inet6_destroy_sock(sk);
1303}
1304
1305/*
1306 *      Socket option code for UDP
1307 */
1308int udpv6_setsockopt(struct sock *sk, int level, int optname,
1309                     char __user *optval, unsigned int optlen)
1310{
1311        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1312                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1313                                          udp_v6_push_pending_frames);
1314        return ipv6_setsockopt(sk, level, optname, optval, optlen);
1315}
1316
1317#ifdef CONFIG_COMPAT
1318int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1319                            char __user *optval, unsigned int optlen)
1320{
1321        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1322                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1323                                          udp_v6_push_pending_frames);
1324        return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
1325}
1326#endif
1327
1328int udpv6_getsockopt(struct sock *sk, int level, int optname,
1329                     char __user *optval, int __user *optlen)
1330{
1331        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1332                return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1333        return ipv6_getsockopt(sk, level, optname, optval, optlen);
1334}
1335
1336#ifdef CONFIG_COMPAT
1337int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1338                            char __user *optval, int __user *optlen)
1339{
1340        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1341                return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1342        return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
1343}
1344#endif
1345
1346static int udp6_ufo_send_check(struct sk_buff *skb)
1347{
1348        const struct ipv6hdr *ipv6h;
1349        struct udphdr *uh;
1350
1351        if (!pskb_may_pull(skb, sizeof(*uh)))
1352                return -EINVAL;
1353
1354        ipv6h = ipv6_hdr(skb);
1355        uh = udp_hdr(skb);
1356
1357        uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
1358                                     IPPROTO_UDP, 0);
1359        skb->csum_start = skb_transport_header(skb) - skb->head;
1360        skb->csum_offset = offsetof(struct udphdr, check);
1361        skb->ip_summed = CHECKSUM_PARTIAL;
1362        return 0;
1363}
1364
1365static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
1366        netdev_features_t features)
1367{
1368        struct sk_buff *segs = ERR_PTR(-EINVAL);
1369        unsigned int mss;
1370        unsigned int unfrag_ip6hlen, unfrag_len;
1371        struct frag_hdr *fptr;
1372        u8 *mac_start, *prevhdr;
1373        u8 nexthdr;
1374        u8 frag_hdr_sz = sizeof(struct frag_hdr);
1375        int offset;
1376        __wsum csum;
1377
1378        mss = skb_shinfo(skb)->gso_size;
1379        if (unlikely(skb->len <= mss))
1380                goto out;
1381
1382        if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
1383                /* Packet is from an untrusted source, reset gso_segs. */
1384                int type = skb_shinfo(skb)->gso_type;
1385
1386                if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
1387                             !(type & (SKB_GSO_UDP))))
1388                        goto out;
1389
1390                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
1391
1392                segs = NULL;
1393                goto out;
1394        }
1395
1396        /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1397         * do checksum of UDP packets sent as multiple IP fragments.
1398         */
1399        offset = skb_checksum_start_offset(skb);
1400        csum = skb_checksum(skb, offset, skb->len - offset, 0);
1401        offset += skb->csum_offset;
1402        *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1403        skb->ip_summed = CHECKSUM_NONE;
1404
1405        /* Check if there is enough headroom to insert fragment header. */
1406        if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
1407            pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
1408                goto out;
1409
1410        /* Find the unfragmentable header and shift it left by frag_hdr_sz
1411         * bytes to insert fragment header.
1412         */
1413        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
1414        nexthdr = *prevhdr;
1415        *prevhdr = NEXTHDR_FRAGMENT;
1416        unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
1417                     unfrag_ip6hlen;
1418        mac_start = skb_mac_header(skb);
1419        memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
1420
1421        skb->mac_header -= frag_hdr_sz;
1422        skb->network_header -= frag_hdr_sz;
1423
1424        fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1425        fptr->nexthdr = nexthdr;
1426        fptr->reserved = 0;
1427        ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
1428
1429        /* Fragment the skb. ipv6 header and the remaining fields of the
1430         * fragment header are updated in ipv6_gso_segment()
1431         */
1432        segs = skb_segment(skb, features);
1433
1434out:
1435        return segs;
1436}
1437
1438static const struct inet6_protocol udpv6_protocol = {
1439        .handler        =       udpv6_rcv,
1440        .err_handler    =       udpv6_err,
1441        .gso_send_check =       udp6_ufo_send_check,
1442        .gso_segment    =       udp6_ufo_fragment,
1443        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1444};
1445
1446/* ------------------------------------------------------------------------ */
1447#ifdef CONFIG_PROC_FS
1448
1449static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket)
1450{
1451        struct inet_sock *inet = inet_sk(sp);
1452        struct ipv6_pinfo *np = inet6_sk(sp);
1453        const struct in6_addr *dest, *src;
1454        __u16 destp, srcp;
1455
1456        dest  = &np->daddr;
1457        src   = &np->rcv_saddr;
1458        destp = ntohs(inet->inet_dport);
1459        srcp  = ntohs(inet->inet_sport);
1460        seq_printf(seq,
1461                   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1462                   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1463                   bucket,
1464                   src->s6_addr32[0], src->s6_addr32[1],
1465                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1466                   dest->s6_addr32[0], dest->s6_addr32[1],
1467                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1468                   sp->sk_state,
1469                   sk_wmem_alloc_get(sp),
1470                   sk_rmem_alloc_get(sp),
1471                   0, 0L, 0,
1472                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1473                   0,
1474                   sock_i_ino(sp),
1475                   atomic_read(&sp->sk_refcnt), sp,
1476                   atomic_read(&sp->sk_drops));
1477}
1478
1479int udp6_seq_show(struct seq_file *seq, void *v)
1480{
1481        if (v == SEQ_START_TOKEN)
1482                seq_printf(seq,
1483                           "  sl  "
1484                           "local_address                         "
1485                           "remote_address                        "
1486                           "st tx_queue rx_queue tr tm->when retrnsmt"
1487                           "   uid  timeout inode ref pointer drops\n");
1488        else
1489                udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
1490        return 0;
1491}
1492
1493static const struct file_operations udp6_afinfo_seq_fops = {
1494        .owner    = THIS_MODULE,
1495        .open     = udp_seq_open,
1496        .read     = seq_read,
1497        .llseek   = seq_lseek,
1498        .release  = seq_release_net
1499};
1500
1501static struct udp_seq_afinfo udp6_seq_afinfo = {
1502        .name           = "udp6",
1503        .family         = AF_INET6,
1504        .udp_table      = &udp_table,
1505        .seq_fops       = &udp6_afinfo_seq_fops,
1506        .seq_ops        = {
1507                .show           = udp6_seq_show,
1508        },
1509};
1510
1511int __net_init udp6_proc_init(struct net *net)
1512{
1513        return udp_proc_register(net, &udp6_seq_afinfo);
1514}
1515
1516void udp6_proc_exit(struct net *net) {
1517        udp_proc_unregister(net, &udp6_seq_afinfo);
1518}
1519#endif /* CONFIG_PROC_FS */
1520
1521/* ------------------------------------------------------------------------ */
1522
1523struct proto udpv6_prot = {
1524        .name              = "UDPv6",
1525        .owner             = THIS_MODULE,
1526        .close             = udp_lib_close,
1527        .connect           = ip6_datagram_connect,
1528        .disconnect        = udp_disconnect,
1529        .ioctl             = udp_ioctl,
1530        .destroy           = udpv6_destroy_sock,
1531        .setsockopt        = udpv6_setsockopt,
1532        .getsockopt        = udpv6_getsockopt,
1533        .sendmsg           = udpv6_sendmsg,
1534        .recvmsg           = udpv6_recvmsg,
1535        .backlog_rcv       = __udpv6_queue_rcv_skb,
1536        .hash              = udp_lib_hash,
1537        .unhash            = udp_lib_unhash,
1538        .rehash            = udp_v6_rehash,
1539        .get_port          = udp_v6_get_port,
1540        .memory_allocated  = &udp_memory_allocated,
1541        .sysctl_mem        = sysctl_udp_mem,
1542        .sysctl_wmem       = &sysctl_udp_wmem_min,
1543        .sysctl_rmem       = &sysctl_udp_rmem_min,
1544        .obj_size          = sizeof(struct udp6_sock),
1545        .slab_flags        = SLAB_DESTROY_BY_RCU,
1546        .h.udp_table       = &udp_table,
1547#ifdef CONFIG_COMPAT
1548        .compat_setsockopt = compat_udpv6_setsockopt,
1549        .compat_getsockopt = compat_udpv6_getsockopt,
1550#endif
1551        .clear_sk          = sk_prot_clear_portaddr_nulls,
1552};
1553
1554static struct inet_protosw udpv6_protosw = {
1555        .type =      SOCK_DGRAM,
1556        .protocol =  IPPROTO_UDP,
1557        .prot =      &udpv6_prot,
1558        .ops =       &inet6_dgram_ops,
1559        .no_check =  UDP_CSUM_DEFAULT,
1560        .flags =     INET_PROTOSW_PERMANENT,
1561};
1562
1563
1564int __init udpv6_init(void)
1565{
1566        int ret;
1567
1568        ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1569        if (ret)
1570                goto out;
1571
1572        ret = inet6_register_protosw(&udpv6_protosw);
1573        if (ret)
1574                goto out_udpv6_protocol;
1575out:
1576        return ret;
1577
1578out_udpv6_protocol:
1579        inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1580        goto out;
1581}
1582
1583void udpv6_exit(void)
1584{
1585        inet6_unregister_protosw(&udpv6_protosw);
1586        inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1587}
1588
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.