linux/net/ipv4/ip_input.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   4 *              operating system.  INET is implemented using the  BSD Socket
   5 *              interface as the means of communication with the user level.
   6 *
   7 *              The Internet Protocol (IP) module.
   8 *
   9 * Authors:     Ross Biro
  10 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *              Donald Becker, <becker@super.org>
  12 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
  13 *              Richard Underwood
  14 *              Stefan Becker, <stefanb@yello.ping.de>
  15 *              Jorge Cwik, <jorge@laser.satlink.net>
  16 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  17 *
  18 * Fixes:
  19 *              Alan Cox        :       Commented a couple of minor bits of surplus code
  20 *              Alan Cox        :       Undefining IP_FORWARD doesn't include the code
  21 *                                      (just stops a compiler warning).
  22 *              Alan Cox        :       Frames with >=MAX_ROUTE record routes, strict routes or loose routes
  23 *                                      are junked rather than corrupting things.
  24 *              Alan Cox        :       Frames to bad broadcast subnets are dumped
  25 *                                      We used to process them non broadcast and
  26 *                                      boy could that cause havoc.
  27 *              Alan Cox        :       ip_forward sets the free flag on the
  28 *                                      new frame it queues. Still crap because
  29 *                                      it copies the frame but at least it
  30 *                                      doesn't eat memory too.
  31 *              Alan Cox        :       Generic queue code and memory fixes.
  32 *              Fred Van Kempen :       IP fragment support (borrowed from NET2E)
  33 *              Gerhard Koerting:       Forward fragmented frames correctly.
  34 *              Gerhard Koerting:       Fixes to my fix of the above 8-).
  35 *              Gerhard Koerting:       IP interface addressing fix.
  36 *              Linus Torvalds  :       More robustness checks
  37 *              Alan Cox        :       Even more checks: Still not as robust as it ought to be
  38 *              Alan Cox        :       Save IP header pointer for later
  39 *              Alan Cox        :       ip option setting
  40 *              Alan Cox        :       Use ip_tos/ip_ttl settings
  41 *              Alan Cox        :       Fragmentation bogosity removed
  42 *                                      (Thanks to Mark.Bush@prg.ox.ac.uk)
  43 *              Dmitry Gorodchanin :    Send of a raw packet crash fix.
  44 *              Alan Cox        :       Silly ip bug when an overlength
  45 *                                      fragment turns up. Now frees the
  46 *                                      queue.
  47 *              Linus Torvalds/ :       Memory leakage on fragmentation
  48 *              Alan Cox        :       handling.
  49 *              Gerhard Koerting:       Forwarding uses IP priority hints
  50 *              Teemu Rantanen  :       Fragment problems.
  51 *              Alan Cox        :       General cleanup, comments and reformat
  52 *              Alan Cox        :       SNMP statistics
  53 *              Alan Cox        :       BSD address rule semantics. Also see
  54 *                                      UDP as there is a nasty checksum issue
  55 *                                      if you do things the wrong way.
  56 *              Alan Cox        :       Always defrag, moved IP_FORWARD to the config.in file
  57 *              Alan Cox        :       IP options adjust sk->priority.
  58 *              Pedro Roque     :       Fix mtu/length error in ip_forward.
  59 *              Alan Cox        :       Avoid ip_chk_addr when possible.
  60 *      Richard Underwood       :       IP multicasting.
  61 *              Alan Cox        :       Cleaned up multicast handlers.
  62 *              Alan Cox        :       RAW sockets demultiplex in the BSD style.
  63 *              Gunther Mayer   :       Fix the SNMP reporting typo
  64 *              Alan Cox        :       Always in group 224.0.0.1
  65 *      Pauline Middelink       :       Fast ip_checksum update when forwarding
  66 *                                      Masquerading support.
  67 *              Alan Cox        :       Multicast loopback error for 224.0.0.1
  68 *              Alan Cox        :       IP_MULTICAST_LOOP option.
  69 *              Alan Cox        :       Use notifiers.
  70 *              Bjorn Ekwall    :       Removed ip_csum (from slhc.c too)
  71 *              Bjorn Ekwall    :       Moved ip_fast_csum to ip.h (inline!)
  72 *              Stefan Becker   :       Send out ICMP HOST REDIRECT
  73 *      Arnt Gulbrandsen        :       ip_build_xmit
  74 *              Alan Cox        :       Per socket routing cache
  75 *              Alan Cox        :       Fixed routing cache, added header cache.
  76 *              Alan Cox        :       Loopback didn't work right in original ip_build_xmit - fixed it.
  77 *              Alan Cox        :       Only send ICMP_REDIRECT if src/dest are the same net.
  78 *              Alan Cox        :       Incoming IP option handling.
  79 *              Alan Cox        :       Set saddr on raw output frames as per BSD.
  80 *              Alan Cox        :       Stopped broadcast source route explosions.
  81 *              Alan Cox        :       Can disable source routing
  82 *              Takeshi Sone    :       Masquerading didn't work.
  83 *      Dave Bonn,Alan Cox      :       Faster IP forwarding whenever possible.
  84 *              Alan Cox        :       Memory leaks, tramples, misc debugging.
  85 *              Alan Cox        :       Fixed multicast (by popular demand 8))
  86 *              Alan Cox        :       Fixed forwarding (by even more popular demand 8))
  87 *              Alan Cox        :       Fixed SNMP statistics [I think]
  88 *      Gerhard Koerting        :       IP fragmentation forwarding fix
  89 *              Alan Cox        :       Device lock against page fault.
  90 *              Alan Cox        :       IP_HDRINCL facility.
  91 *      Werner Almesberger      :       Zero fragment bug
  92 *              Alan Cox        :       RAW IP frame length bug
  93 *              Alan Cox        :       Outgoing firewall on build_xmit
  94 *              A.N.Kuznetsov   :       IP_OPTIONS support throughout the kernel
  95 *              Alan Cox        :       Multicast routing hooks
  96 *              Jos Vos         :       Do accounting *before* call_in_firewall
  97 *      Willy Konynenberg       :       Transparent proxying support
  98 *
  99 * To Fix:
 100 *              IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
 101 *              and could be made very efficient with the addition of some virtual memory hacks to permit
 102 *              the allocation of a buffer that can then be 'grown' by twiddling page tables.
 103 *              Output fragmentation wants updating along with the buffer management to use a single
 104 *              interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
 105 *              output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
 106 *              fragmentation anyway.
 107 */
 108
 109#define pr_fmt(fmt) "IPv4: " fmt
 110
 111#include <linux/module.h>
 112#include <linux/types.h>
 113#include <linux/kernel.h>
 114#include <linux/string.h>
 115#include <linux/errno.h>
 116#include <linux/slab.h>
 117
 118#include <linux/net.h>
 119#include <linux/socket.h>
 120#include <linux/sockios.h>
 121#include <linux/in.h>
 122#include <linux/inet.h>
 123#include <linux/inetdevice.h>
 124#include <linux/netdevice.h>
 125#include <linux/etherdevice.h>
 126#include <linux/indirect_call_wrapper.h>
 127
 128#include <net/snmp.h>
 129#include <net/ip.h>
 130#include <net/protocol.h>
 131#include <net/route.h>
 132#include <linux/skbuff.h>
 133#include <net/sock.h>
 134#include <net/arp.h>
 135#include <net/icmp.h>
 136#include <net/raw.h>
 137#include <net/checksum.h>
 138#include <net/inet_ecn.h>
 139#include <linux/netfilter_ipv4.h>
 140#include <net/xfrm.h>
 141#include <linux/mroute.h>
 142#include <linux/netlink.h>
 143#include <net/dst_metadata.h>
 144
 145/*
 146 *      Process Router Attention IP option (RFC 2113)
 147 */
 148bool ip_call_ra_chain(struct sk_buff *skb)
 149{
 150        struct ip_ra_chain *ra;
 151        u8 protocol = ip_hdr(skb)->protocol;
 152        struct sock *last = NULL;
 153        struct net_device *dev = skb->dev;
 154        struct net *net = dev_net(dev);
 155
 156        for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
 157                struct sock *sk = ra->sk;
 158
 159                /* If socket is bound to an interface, only report
 160                 * the packet if it came  from that interface.
 161                 */
 162                if (sk && inet_sk(sk)->inet_num == protocol &&
 163                    (!sk->sk_bound_dev_if ||
 164                     sk->sk_bound_dev_if == dev->ifindex)) {
 165                        if (ip_is_fragment(ip_hdr(skb))) {
 166                                if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
 167                                        return true;
 168                        }
 169                        if (last) {
 170                                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 171                                if (skb2)
 172                                        raw_rcv(last, skb2);
 173                        }
 174                        last = sk;
 175                }
 176        }
 177
 178        if (last) {
 179                raw_rcv(last, skb);
 180                return true;
 181        }
 182        return false;
 183}
 184
 185INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
 186INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
 187void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
 188{
 189        const struct net_protocol *ipprot;
 190        int raw, ret;
 191
 192resubmit:
 193        raw = raw_local_deliver(skb, protocol);
 194
 195        ipprot = rcu_dereference(inet_protos[protocol]);
 196        if (ipprot) {
 197                if (!ipprot->no_policy) {
 198                        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 199                                kfree_skb_reason(skb,
 200                                                 SKB_DROP_REASON_XFRM_POLICY);
 201                                return;
 202                        }
 203                        nf_reset_ct(skb);
 204                }
 205                ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
 206                                      skb);
 207                if (ret < 0) {
 208                        protocol = -ret;
 209                        goto resubmit;
 210                }
 211                __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
 212        } else {
 213                if (!raw) {
 214                        if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
 215                                __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
 216                                icmp_send(skb, ICMP_DEST_UNREACH,
 217                                          ICMP_PROT_UNREACH, 0);
 218                        }
 219                        kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO);
 220                } else {
 221                        __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
 222                        consume_skb(skb);
 223                }
 224        }
 225}
 226
 227static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 228{
 229        skb_clear_delivery_time(skb);
 230        __skb_pull(skb, skb_network_header_len(skb));
 231
 232        rcu_read_lock();
 233        ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
 234        rcu_read_unlock();
 235
 236        return 0;
 237}
 238
 239/*
 240 *      Deliver IP Packets to the higher protocol layers.
 241 */
 242int ip_local_deliver(struct sk_buff *skb)
 243{
 244        /*
 245         *      Reassemble IP fragments.
 246         */
 247        struct net *net = dev_net(skb->dev);
 248
 249        if (ip_is_fragment(ip_hdr(skb))) {
 250                if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
 251                        return 0;
 252        }
 253
 254        return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
 255                       net, NULL, skb, skb->dev, NULL,
 256                       ip_local_deliver_finish);
 257}
 258EXPORT_SYMBOL(ip_local_deliver);
 259
 260static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
 261{
 262        struct ip_options *opt;
 263        const struct iphdr *iph;
 264
 265        /* It looks as overkill, because not all
 266           IP options require packet mangling.
 267           But it is the easiest for now, especially taking
 268           into account that combination of IP options
 269           and running sniffer is extremely rare condition.
 270                                              --ANK (980813)
 271        */
 272        if (skb_cow(skb, skb_headroom(skb))) {
 273                __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
 274                goto drop;
 275        }
 276
 277        iph = ip_hdr(skb);
 278        opt = &(IPCB(skb)->opt);
 279        opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
 280
 281        if (ip_options_compile(dev_net(dev), opt, skb)) {
 282                __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
 283                goto drop;
 284        }
 285
 286        if (unlikely(opt->srr)) {
 287                struct in_device *in_dev = __in_dev_get_rcu(dev);
 288
 289                if (in_dev) {
 290                        if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
 291                                if (IN_DEV_LOG_MARTIANS(in_dev))
 292                                        net_info_ratelimited("source route option %pI4 -> %pI4\n",
 293                                                             &iph->saddr,
 294                                                             &iph->daddr);
 295                                goto drop;
 296                        }
 297                }
 298
 299                if (ip_options_rcv_srr(skb, dev))
 300                        goto drop;
 301        }
 302
 303        return false;
 304drop:
 305        return true;
 306}
 307
 308static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
 309                            const struct sk_buff *hint)
 310{
 311        return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
 312               ip_hdr(hint)->tos == iph->tos;
 313}
 314
 315int tcp_v4_early_demux(struct sk_buff *skb);
 316int udp_v4_early_demux(struct sk_buff *skb);
 317static int ip_rcv_finish_core(struct net *net, struct sock *sk,
 318                              struct sk_buff *skb, struct net_device *dev,
 319                              const struct sk_buff *hint)
 320{
 321        const struct iphdr *iph = ip_hdr(skb);
 322        int err, drop_reason;
 323        struct rtable *rt;
 324
 325        drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 326
 327        if (ip_can_use_hint(skb, iph, hint)) {
 328                err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
 329                                        dev, hint);
 330                if (unlikely(err))
 331                        goto drop_error;
 332        }
 333
 334        if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
 335            !skb_dst(skb) &&
 336            !skb->sk &&
 337            !ip_is_fragment(iph)) {
 338                switch (iph->protocol) {
 339                case IPPROTO_TCP:
 340                        if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
 341                                tcp_v4_early_demux(skb);
 342
 343                                /* must reload iph, skb->head might have changed */
 344                                iph = ip_hdr(skb);
 345                        }
 346                        break;
 347                case IPPROTO_UDP:
 348                        if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
 349                                err = udp_v4_early_demux(skb);
 350                                if (unlikely(err))
 351                                        goto drop_error;
 352
 353                                /* must reload iph, skb->head might have changed */
 354                                iph = ip_hdr(skb);
 355                        }
 356                        break;
 357                }
 358        }
 359
 360        /*
 361         *      Initialise the virtual path cache for the packet. It describes
 362         *      how the packet travels inside Linux networking.
 363         */
 364        if (!skb_valid_dst(skb)) {
 365                err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
 366                                           iph->tos, dev);
 367                if (unlikely(err))
 368                        goto drop_error;
 369        } else {
 370                struct in_device *in_dev = __in_dev_get_rcu(dev);
 371
 372                if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
 373                        IPCB(skb)->flags |= IPSKB_NOPOLICY;
 374        }
 375
 376#ifdef CONFIG_IP_ROUTE_CLASSID
 377        if (unlikely(skb_dst(skb)->tclassid)) {
 378                struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
 379                u32 idx = skb_dst(skb)->tclassid;
 380                st[idx&0xFF].o_packets++;
 381                st[idx&0xFF].o_bytes += skb->len;
 382                st[(idx>>16)&0xFF].i_packets++;
 383                st[(idx>>16)&0xFF].i_bytes += skb->len;
 384        }
 385#endif
 386
 387        if (iph->ihl > 5 && ip_rcv_options(skb, dev))
 388                goto drop;
 389
 390        rt = skb_rtable(skb);
 391        if (rt->rt_type == RTN_MULTICAST) {
 392                __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
 393        } else if (rt->rt_type == RTN_BROADCAST) {
 394                __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
 395        } else if (skb->pkt_type == PACKET_BROADCAST ||
 396                   skb->pkt_type == PACKET_MULTICAST) {
 397                struct in_device *in_dev = __in_dev_get_rcu(dev);
 398
 399                /* RFC 1122 3.3.6:
 400                 *
 401                 *   When a host sends a datagram to a link-layer broadcast
 402                 *   address, the IP destination address MUST be a legal IP
 403                 *   broadcast or IP multicast address.
 404                 *
 405                 *   A host SHOULD silently discard a datagram that is received
 406                 *   via a link-layer broadcast (see Section 2.4) but does not
 407                 *   specify an IP multicast or broadcast destination address.
 408                 *
 409                 * This doesn't explicitly say L2 *broadcast*, but broadcast is
 410                 * in a way a form of multicast and the most common use case for
 411                 * this is 802.11 protecting against cross-station spoofing (the
 412                 * so-called "hole-196" attack) so do it for both.
 413                 */
 414                if (in_dev &&
 415                    IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) {
 416                        drop_reason = SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST;
 417                        goto drop;
 418                }
 419        }
 420
 421        return NET_RX_SUCCESS;
 422
 423drop:
 424        kfree_skb_reason(skb, drop_reason);
 425        return NET_RX_DROP;
 426
 427drop_error:
 428        if (err == -EXDEV) {
 429                drop_reason = SKB_DROP_REASON_IP_RPFILTER;
 430                __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
 431        }
 432        goto drop;
 433}
 434
 435static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 436{
 437        struct net_device *dev = skb->dev;
 438        int ret;
 439
 440        /* if ingress device is enslaved to an L3 master device pass the
 441         * skb to its handler for processing
 442         */
 443        skb = l3mdev_ip_rcv(skb);
 444        if (!skb)
 445                return NET_RX_SUCCESS;
 446
 447        ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
 448        if (ret != NET_RX_DROP)
 449                ret = dst_input(skb);
 450        return ret;
 451}
 452
 453/*
 454 *      Main IP Receive routine.
 455 */
 456static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
 457{
 458        const struct iphdr *iph;
 459        int drop_reason;
 460        u32 len;
 461
 462        /* When the interface is in promisc. mode, drop all the crap
 463         * that it receives, do not try to analyse it.
 464         */
 465        if (skb->pkt_type == PACKET_OTHERHOST) {
 466                dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
 467                drop_reason = SKB_DROP_REASON_OTHERHOST;
 468                goto drop;
 469        }
 470
 471        __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
 472
 473        skb = skb_share_check(skb, GFP_ATOMIC);
 474        if (!skb) {
 475                __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
 476                goto out;
 477        }
 478
 479        drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
 480        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 481                goto inhdr_error;
 482
 483        iph = ip_hdr(skb);
 484
 485        /*
 486         *      RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
 487         *
 488         *      Is the datagram acceptable?
 489         *
 490         *      1.      Length at least the size of an ip header
 491         *      2.      Version of 4
 492         *      3.      Checksums correctly. [Speed optimisation for later, skip loopback checksums]
 493         *      4.      Doesn't have a bogus length
 494         */
 495
 496        if (iph->ihl < 5 || iph->version != 4)
 497                goto inhdr_error;
 498
 499        BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
 500        BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
 501        BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
 502        __IP_ADD_STATS(net,
 503                       IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
 504                       max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
 505
 506        if (!pskb_may_pull(skb, iph->ihl*4))
 507                goto inhdr_error;
 508
 509        iph = ip_hdr(skb);
 510
 511        if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
 512                goto csum_error;
 513
 514        len = iph_totlen(skb, iph);
 515        if (skb->len < len) {
 516                drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
 517                __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
 518                goto drop;
 519        } else if (len < (iph->ihl*4))
 520                goto inhdr_error;
 521
 522        /* Our transport medium may have padded the buffer out. Now we know it
 523         * is IP we can trim to the true length of the frame.
 524         * Note this now means skb->len holds ntohs(iph->tot_len).
 525         */
 526        if (pskb_trim_rcsum(skb, len)) {
 527                __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
 528                goto drop;
 529        }
 530
 531        iph = ip_hdr(skb);
 532        skb->transport_header = skb->network_header + iph->ihl*4;
 533
 534        /* Remove any debris in the socket control block */
 535        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 536        IPCB(skb)->iif = skb->skb_iif;
 537
 538        /* Must drop socket now because of tproxy. */
 539        if (!skb_sk_is_prefetched(skb))
 540                skb_orphan(skb);
 541
 542        return skb;
 543
 544csum_error:
 545        drop_reason = SKB_DROP_REASON_IP_CSUM;
 546        __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
 547inhdr_error:
 548        if (drop_reason == SKB_DROP_REASON_NOT_SPECIFIED)
 549                drop_reason = SKB_DROP_REASON_IP_INHDR;
 550        __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
 551drop:
 552        kfree_skb_reason(skb, drop_reason);
 553out:
 554        return NULL;
 555}
 556
 557/*
 558 * IP receive entry point
 559 */
 560int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
 561           struct net_device *orig_dev)
 562{
 563        struct net *net = dev_net(dev);
 564
 565        skb = ip_rcv_core(skb, net);
 566        if (skb == NULL)
 567                return NET_RX_DROP;
 568
 569        return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
 570                       net, NULL, skb, dev, NULL,
 571                       ip_rcv_finish);
 572}
 573
 574static void ip_sublist_rcv_finish(struct list_head *head)
 575{
 576        struct sk_buff *skb, *next;
 577
 578        list_for_each_entry_safe(skb, next, head, list) {
 579                skb_list_del_init(skb);
 580                dst_input(skb);
 581        }
 582}
 583
 584static struct sk_buff *ip_extract_route_hint(const struct net *net,
 585                                             struct sk_buff *skb, int rt_type)
 586{
 587        if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
 588            IPCB(skb)->flags & IPSKB_MULTIPATH)
 589                return NULL;
 590
 591        return skb;
 592}
 593
 594static void ip_list_rcv_finish(struct net *net, struct sock *sk,
 595                               struct list_head *head)
 596{
 597        struct sk_buff *skb, *next, *hint = NULL;
 598        struct dst_entry *curr_dst = NULL;
 599        struct list_head sublist;
 600
 601        INIT_LIST_HEAD(&sublist);
 602        list_for_each_entry_safe(skb, next, head, list) {
 603                struct net_device *dev = skb->dev;
 604                struct dst_entry *dst;
 605
 606                skb_list_del_init(skb);
 607                /* if ingress device is enslaved to an L3 master device pass the
 608                 * skb to its handler for processing
 609                 */
 610                skb = l3mdev_ip_rcv(skb);
 611                if (!skb)
 612                        continue;
 613                if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
 614                        continue;
 615
 616                dst = skb_dst(skb);
 617                if (curr_dst != dst) {
 618                        hint = ip_extract_route_hint(net, skb,
 619                                               ((struct rtable *)dst)->rt_type);
 620
 621                        /* dispatch old sublist */
 622                        if (!list_empty(&sublist))
 623                                ip_sublist_rcv_finish(&sublist);
 624                        /* start new sublist */
 625                        INIT_LIST_HEAD(&sublist);
 626                        curr_dst = dst;
 627                }
 628                list_add_tail(&skb->list, &sublist);
 629        }
 630        /* dispatch final sublist */
 631        ip_sublist_rcv_finish(&sublist);
 632}
 633
 634static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
 635                           struct net *net)
 636{
 637        NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
 638                     head, dev, NULL, ip_rcv_finish);
 639        ip_list_rcv_finish(net, NULL, head);
 640}
 641
 642/* Receive a list of IP packets */
 643void ip_list_rcv(struct list_head *head, struct packet_type *pt,
 644                 struct net_device *orig_dev)
 645{
 646        struct net_device *curr_dev = NULL;
 647        struct net *curr_net = NULL;
 648        struct sk_buff *skb, *next;
 649        struct list_head sublist;
 650
 651        INIT_LIST_HEAD(&sublist);
 652        list_for_each_entry_safe(skb, next, head, list) {
 653                struct net_device *dev = skb->dev;
 654                struct net *net = dev_net(dev);
 655
 656                skb_list_del_init(skb);
 657                skb = ip_rcv_core(skb, net);
 658                if (skb == NULL)
 659                        continue;
 660
 661                if (curr_dev != dev || curr_net != net) {
 662                        /* dispatch old sublist */
 663                        if (!list_empty(&sublist))
 664                                ip_sublist_rcv(&sublist, curr_dev, curr_net);
 665                        /* start new sublist */
 666                        INIT_LIST_HEAD(&sublist);
 667                        curr_dev = dev;
 668                        curr_net = net;
 669                }
 670                list_add_tail(&skb->list, &sublist);
 671        }
 672        /* dispatch final sublist */
 673        if (!list_empty(&sublist))
 674                ip_sublist_rcv(&sublist, curr_dev, curr_net);
 675}
 676