linux/net/core/netpoll.c
<<
>>
Prefs
   1/*
   2 * Common framework for low-level network console, dump, and debugger code
   3 *
   4 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
   5 *
   6 * based on the netconsole code from:
   7 *
   8 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
   9 * Copyright (C) 2002  Red Hat, Inc.
  10 */
  11
  12#include <linux/netdevice.h>
  13#include <linux/etherdevice.h>
  14#include <linux/string.h>
  15#include <linux/if_arp.h>
  16#include <linux/inetdevice.h>
  17#include <linux/inet.h>
  18#include <linux/interrupt.h>
  19#include <linux/netpoll.h>
  20#include <linux/sched.h>
  21#include <linux/delay.h>
  22#include <linux/rcupdate.h>
  23#include <linux/workqueue.h>
  24#include <net/tcp.h>
  25#include <net/udp.h>
  26#include <asm/unaligned.h>
  27
  28/*
  29 * We maintain a small pool of fully-sized skbs, to make sure the
  30 * message gets out even in extreme OOM situations.
  31 */
  32
  33#define MAX_UDP_CHUNK 1460
  34#define MAX_SKBS 32
  35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
  36
  37static struct sk_buff_head skb_pool;
  38
  39static atomic_t trapped;
  40
  41#define USEC_PER_POLL   50
  42#define NETPOLL_RX_ENABLED  1
  43#define NETPOLL_RX_DROP     2
  44
  45#define MAX_SKB_SIZE \
  46                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
  47                                sizeof(struct iphdr) + sizeof(struct ethhdr))
  48
  49static void zap_completion_queue(void);
  50static void arp_reply(struct sk_buff *skb);
  51
  52static void queue_process(struct work_struct *work)
  53{
  54        struct netpoll_info *npinfo =
  55                container_of(work, struct netpoll_info, tx_work.work);
  56        struct sk_buff *skb;
  57        unsigned long flags;
  58
  59        while ((skb = skb_dequeue(&npinfo->txq))) {
  60                struct net_device *dev = skb->dev;
  61                const struct net_device_ops *ops = dev->netdev_ops;
  62                struct netdev_queue *txq;
  63
  64                if (!netif_device_present(dev) || !netif_running(dev)) {
  65                        __kfree_skb(skb);
  66                        continue;
  67                }
  68
  69                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  70
  71                local_irq_save(flags);
  72                __netif_tx_lock(txq, smp_processor_id());
  73                if (netif_tx_queue_stopped(txq) ||
  74                    netif_tx_queue_frozen(txq) ||
  75                    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
  76                        skb_queue_head(&npinfo->txq, skb);
  77                        __netif_tx_unlock(txq);
  78                        local_irq_restore(flags);
  79
  80                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
  81                        return;
  82                }
  83                __netif_tx_unlock(txq);
  84                local_irq_restore(flags);
  85        }
  86}
  87
  88static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  89                            unsigned short ulen, __be32 saddr, __be32 daddr)
  90{
  91        __wsum psum;
  92
  93        if (uh->check == 0 || skb_csum_unnecessary(skb))
  94                return 0;
  95
  96        psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  97
  98        if (skb->ip_summed == CHECKSUM_COMPLETE &&
  99            !csum_fold(csum_add(psum, skb->csum)))
 100                return 0;
 101
 102        skb->csum = psum;
 103
 104        return __skb_checksum_complete(skb);
 105}
 106
 107/*
 108 * Check whether delayed processing was scheduled for our NIC. If so,
 109 * we attempt to grab the poll lock and use ->poll() to pump the card.
 110 * If this fails, either we've recursed in ->poll() or it's already
 111 * running on another CPU.
 112 *
 113 * Note: we don't mask interrupts with this lock because we're using
 114 * trylock here and interrupts are already disabled in the softirq
 115 * case. Further, we test the poll_owner to avoid recursion on UP
 116 * systems where the lock doesn't exist.
 117 *
 118 * In cases where there is bi-directional communications, reading only
 119 * one message at a time can lead to packets being dropped by the
 120 * network adapter, forcing superfluous retries and possibly timeouts.
 121 * Thus, we set our budget to greater than 1.
 122 */
 123static int poll_one_napi(struct netpoll_info *npinfo,
 124                         struct napi_struct *napi, int budget)
 125{
 126        int work;
 127
 128        /* net_rx_action's ->poll() invocations and our's are
 129         * synchronized by this test which is only made while
 130         * holding the napi->poll_lock.
 131         */
 132        if (!test_bit(NAPI_STATE_SCHED, &napi->state))
 133                return budget;
 134
 135        npinfo->rx_flags |= NETPOLL_RX_DROP;
 136        atomic_inc(&trapped);
 137        set_bit(NAPI_STATE_NPSVC, &napi->state);
 138
 139        work = napi->poll(napi, budget);
 140
 141        clear_bit(NAPI_STATE_NPSVC, &napi->state);
 142        atomic_dec(&trapped);
 143        npinfo->rx_flags &= ~NETPOLL_RX_DROP;
 144
 145        return budget - work;
 146}
 147
 148static void poll_napi(struct net_device *dev)
 149{
 150        struct napi_struct *napi;
 151        int budget = 16;
 152
 153        list_for_each_entry(napi, &dev->napi_list, dev_list) {
 154                if (napi->poll_owner != smp_processor_id() &&
 155                    spin_trylock(&napi->poll_lock)) {
 156                        budget = poll_one_napi(dev->npinfo, napi, budget);
 157                        spin_unlock(&napi->poll_lock);
 158
 159                        if (!budget)
 160                                break;
 161                }
 162        }
 163}
 164
 165static void service_arp_queue(struct netpoll_info *npi)
 166{
 167        if (npi) {
 168                struct sk_buff *skb;
 169
 170                while ((skb = skb_dequeue(&npi->arp_tx)))
 171                        arp_reply(skb);
 172        }
 173}
 174
 175void netpoll_poll(struct netpoll *np)
 176{
 177        struct net_device *dev = np->dev;
 178        const struct net_device_ops *ops;
 179
 180        if (!dev || !netif_running(dev))
 181                return;
 182
 183        ops = dev->netdev_ops;
 184        if (!ops->ndo_poll_controller)
 185                return;
 186
 187        /* Process pending work on NIC */
 188        ops->ndo_poll_controller(dev);
 189
 190        poll_napi(dev);
 191
 192        service_arp_queue(dev->npinfo);
 193
 194        zap_completion_queue();
 195}
 196
 197static void refill_skbs(void)
 198{
 199        struct sk_buff *skb;
 200        unsigned long flags;
 201
 202        spin_lock_irqsave(&skb_pool.lock, flags);
 203        while (skb_pool.qlen < MAX_SKBS) {
 204                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
 205                if (!skb)
 206                        break;
 207
 208                __skb_queue_tail(&skb_pool, skb);
 209        }
 210        spin_unlock_irqrestore(&skb_pool.lock, flags);
 211}
 212
 213static void zap_completion_queue(void)
 214{
 215        unsigned long flags;
 216        struct softnet_data *sd = &get_cpu_var(softnet_data);
 217
 218        if (sd->completion_queue) {
 219                struct sk_buff *clist;
 220
 221                local_irq_save(flags);
 222                clist = sd->completion_queue;
 223                sd->completion_queue = NULL;
 224                local_irq_restore(flags);
 225
 226                while (clist != NULL) {
 227                        struct sk_buff *skb = clist;
 228                        clist = clist->next;
 229                        if (skb->destructor) {
 230                                atomic_inc(&skb->users);
 231                                dev_kfree_skb_any(skb); /* put this one back */
 232                        } else {
 233                                __kfree_skb(skb);
 234                        }
 235                }
 236        }
 237
 238        put_cpu_var(softnet_data);
 239}
 240
 241static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 242{
 243        int count = 0;
 244        struct sk_buff *skb;
 245
 246        zap_completion_queue();
 247        refill_skbs();
 248repeat:
 249
 250        skb = alloc_skb(len, GFP_ATOMIC);
 251        if (!skb)
 252                skb = skb_dequeue(&skb_pool);
 253
 254        if (!skb) {
 255                if (++count < 10) {
 256                        netpoll_poll(np);
 257                        goto repeat;
 258                }
 259                return NULL;
 260        }
 261
 262        atomic_set(&skb->users, 1);
 263        skb_reserve(skb, reserve);
 264        return skb;
 265}
 266
 267static int netpoll_owner_active(struct net_device *dev)
 268{
 269        struct napi_struct *napi;
 270
 271        list_for_each_entry(napi, &dev->napi_list, dev_list) {
 272                if (napi->poll_owner == smp_processor_id())
 273                        return 1;
 274        }
 275        return 0;
 276}
 277
 278static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 279{
 280        int status = NETDEV_TX_BUSY;
 281        unsigned long tries;
 282        struct net_device *dev = np->dev;
 283        const struct net_device_ops *ops = dev->netdev_ops;
 284        struct netpoll_info *npinfo = np->dev->npinfo;
 285
 286        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
 287                __kfree_skb(skb);
 288                return;
 289        }
 290
 291        /* don't get messages out of order, and no recursion */
 292        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
 293                struct netdev_queue *txq;
 294                unsigned long flags;
 295
 296                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
 297
 298                local_irq_save(flags);
 299                /* try until next clock tick */
 300                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
 301                     tries > 0; --tries) {
 302                        if (__netif_tx_trylock(txq)) {
 303                                if (!netif_tx_queue_stopped(txq))
 304                                        status = ops->ndo_start_xmit(skb, dev);
 305                                __netif_tx_unlock(txq);
 306
 307                                if (status == NETDEV_TX_OK)
 308                                        break;
 309
 310                        }
 311
 312                        /* tickle device maybe there is some cleanup */
 313                        netpoll_poll(np);
 314
 315                        udelay(USEC_PER_POLL);
 316                }
 317                local_irq_restore(flags);
 318        }
 319
 320        if (status != NETDEV_TX_OK) {
 321                skb_queue_tail(&npinfo->txq, skb);
 322                schedule_delayed_work(&npinfo->tx_work,0);
 323        }
 324}
 325
 326void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 327{
 328        int total_len, eth_len, ip_len, udp_len;
 329        struct sk_buff *skb;
 330        struct udphdr *udph;
 331        struct iphdr *iph;
 332        struct ethhdr *eth;
 333
 334        udp_len = len + sizeof(*udph);
 335        ip_len = eth_len = udp_len + sizeof(*iph);
 336        total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
 337
 338        skb = find_skb(np, total_len, total_len - len);
 339        if (!skb)
 340                return;
 341
 342        skb_copy_to_linear_data(skb, msg, len);
 343        skb->len += len;
 344
 345        skb_push(skb, sizeof(*udph));
 346        skb_reset_transport_header(skb);
 347        udph = udp_hdr(skb);
 348        udph->source = htons(np->local_port);
 349        udph->dest = htons(np->remote_port);
 350        udph->len = htons(udp_len);
 351        udph->check = 0;
 352        udph->check = csum_tcpudp_magic(np->local_ip,
 353                                        np->remote_ip,
 354                                        udp_len, IPPROTO_UDP,
 355                                        csum_partial(udph, udp_len, 0));
 356        if (udph->check == 0)
 357                udph->check = CSUM_MANGLED_0;
 358
 359        skb_push(skb, sizeof(*iph));
 360        skb_reset_network_header(skb);
 361        iph = ip_hdr(skb);
 362
 363        /* iph->version = 4; iph->ihl = 5; */
 364        put_unaligned(0x45, (unsigned char *)iph);
 365        iph->tos      = 0;
 366        put_unaligned(htons(ip_len), &(iph->tot_len));
 367        iph->id       = 0;
 368        iph->frag_off = 0;
 369        iph->ttl      = 64;
 370        iph->protocol = IPPROTO_UDP;
 371        iph->check    = 0;
 372        put_unaligned(np->local_ip, &(iph->saddr));
 373        put_unaligned(np->remote_ip, &(iph->daddr));
 374        iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
 375
 376        eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
 377        skb_reset_mac_header(skb);
 378        skb->protocol = eth->h_proto = htons(ETH_P_IP);
 379        memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
 380        memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
 381
 382        skb->dev = np->dev;
 383
 384        netpoll_send_skb(np, skb);
 385}
 386
 387static void arp_reply(struct sk_buff *skb)
 388{
 389        struct netpoll_info *npinfo = skb->dev->npinfo;
 390        struct arphdr *arp;
 391        unsigned char *arp_ptr;
 392        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
 393        __be32 sip, tip;
 394        unsigned char *sha;
 395        struct sk_buff *send_skb;
 396        struct netpoll *np = NULL;
 397
 398        if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
 399                np = npinfo->rx_np;
 400        if (!np)
 401                return;
 402
 403        /* No arp on this interface */
 404        if (skb->dev->flags & IFF_NOARP)
 405                return;
 406
 407        if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
 408                return;
 409
 410        skb_reset_network_header(skb);
 411        skb_reset_transport_header(skb);
 412        arp = arp_hdr(skb);
 413
 414        if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
 415             arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
 416            arp->ar_pro != htons(ETH_P_IP) ||
 417            arp->ar_op != htons(ARPOP_REQUEST))
 418                return;
 419
 420        arp_ptr = (unsigned char *)(arp+1);
 421        /* save the location of the src hw addr */
 422        sha = arp_ptr;
 423        arp_ptr += skb->dev->addr_len;
 424        memcpy(&sip, arp_ptr, 4);
 425        arp_ptr += 4;
 426        /* if we actually cared about dst hw addr, it would get copied here */
 427        arp_ptr += skb->dev->addr_len;
 428        memcpy(&tip, arp_ptr, 4);
 429
 430        /* Should we ignore arp? */
 431        if (tip != np->local_ip ||
 432            ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
 433                return;
 434
 435        size = arp_hdr_len(skb->dev);
 436        send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
 437                            LL_RESERVED_SPACE(np->dev));
 438
 439        if (!send_skb)
 440                return;
 441
 442        skb_reset_network_header(send_skb);
 443        arp = (struct arphdr *) skb_put(send_skb, size);
 444        send_skb->dev = skb->dev;
 445        send_skb->protocol = htons(ETH_P_ARP);
 446
 447        /* Fill the device header for the ARP frame */
 448        if (dev_hard_header(send_skb, skb->dev, ptype,
 449                            sha, np->dev->dev_addr,
 450                            send_skb->len) < 0) {
 451                kfree_skb(send_skb);
 452                return;
 453        }
 454
 455        /*
 456         * Fill out the arp protocol part.
 457         *
 458         * we only support ethernet device type,
 459         * which (according to RFC 1390) should always equal 1 (Ethernet).
 460         */
 461
 462        arp->ar_hrd = htons(np->dev->type);
 463        arp->ar_pro = htons(ETH_P_IP);
 464        arp->ar_hln = np->dev->addr_len;
 465        arp->ar_pln = 4;
 466        arp->ar_op = htons(type);
 467
 468        arp_ptr=(unsigned char *)(arp + 1);
 469        memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
 470        arp_ptr += np->dev->addr_len;
 471        memcpy(arp_ptr, &tip, 4);
 472        arp_ptr += 4;
 473        memcpy(arp_ptr, sha, np->dev->addr_len);
 474        arp_ptr += np->dev->addr_len;
 475        memcpy(arp_ptr, &sip, 4);
 476
 477        netpoll_send_skb(np, send_skb);
 478}
 479
 480int __netpoll_rx(struct sk_buff *skb)
 481{
 482        int proto, len, ulen;
 483        struct iphdr *iph;
 484        struct udphdr *uh;
 485        struct netpoll_info *npi = skb->dev->npinfo;
 486        struct netpoll *np = npi->rx_np;
 487
 488        if (!np)
 489                goto out;
 490        if (skb->dev->type != ARPHRD_ETHER)
 491                goto out;
 492
 493        /* check if netpoll clients need ARP */
 494        if (skb->protocol == htons(ETH_P_ARP) &&
 495            atomic_read(&trapped)) {
 496                skb_queue_tail(&npi->arp_tx, skb);
 497                return 1;
 498        }
 499
 500        proto = ntohs(eth_hdr(skb)->h_proto);
 501        if (proto != ETH_P_IP)
 502                goto out;
 503        if (skb->pkt_type == PACKET_OTHERHOST)
 504                goto out;
 505        if (skb_shared(skb))
 506                goto out;
 507
 508        iph = (struct iphdr *)skb->data;
 509        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 510                goto out;
 511        if (iph->ihl < 5 || iph->version != 4)
 512                goto out;
 513        if (!pskb_may_pull(skb, iph->ihl*4))
 514                goto out;
 515        if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
 516                goto out;
 517
 518        len = ntohs(iph->tot_len);
 519        if (skb->len < len || len < iph->ihl*4)
 520                goto out;
 521
 522        /*
 523         * Our transport medium may have padded the buffer out.
 524         * Now We trim to the true length of the frame.
 525         */
 526        if (pskb_trim_rcsum(skb, len))
 527                goto out;
 528
 529        if (iph->protocol != IPPROTO_UDP)
 530                goto out;
 531
 532        len -= iph->ihl*4;
 533        uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
 534        ulen = ntohs(uh->len);
 535
 536        if (ulen != len)
 537                goto out;
 538        if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
 539                goto out;
 540        if (np->local_ip && np->local_ip != iph->daddr)
 541                goto out;
 542        if (np->remote_ip && np->remote_ip != iph->saddr)
 543                goto out;
 544        if (np->local_port && np->local_port != ntohs(uh->dest))
 545                goto out;
 546
 547        np->rx_hook(np, ntohs(uh->source),
 548                    (char *)(uh+1),
 549                    ulen - sizeof(struct udphdr));
 550
 551        kfree_skb(skb);
 552        return 1;
 553
 554out:
 555        if (atomic_read(&trapped)) {
 556                kfree_skb(skb);
 557                return 1;
 558        }
 559
 560        return 0;
 561}
 562
 563void netpoll_print_options(struct netpoll *np)
 564{
 565        printk(KERN_INFO "%s: local port %d\n",
 566                         np->name, np->local_port);
 567        printk(KERN_INFO "%s: local IP %pI4\n",
 568                         np->name, &np->local_ip);
 569        printk(KERN_INFO "%s: interface %s\n",
 570                         np->name, np->dev_name);
 571        printk(KERN_INFO "%s: remote port %d\n",
 572                         np->name, np->remote_port);
 573        printk(KERN_INFO "%s: remote IP %pI4\n",
 574                         np->name, &np->remote_ip);
 575        printk(KERN_INFO "%s: remote ethernet address %pM\n",
 576                         np->name, np->remote_mac);
 577}
 578
 579int netpoll_parse_options(struct netpoll *np, char *opt)
 580{
 581        char *cur=opt, *delim;
 582
 583        if (*cur != '@') {
 584                if ((delim = strchr(cur, '@')) == NULL)
 585                        goto parse_failed;
 586                *delim = 0;
 587                np->local_port = simple_strtol(cur, NULL, 10);
 588                cur = delim;
 589        }
 590        cur++;
 591
 592        if (*cur != '/') {
 593                if ((delim = strchr(cur, '/')) == NULL)
 594                        goto parse_failed;
 595                *delim = 0;
 596                np->local_ip = in_aton(cur);
 597                cur = delim;
 598        }
 599        cur++;
 600
 601        if (*cur != ',') {
 602                /* parse out dev name */
 603                if ((delim = strchr(cur, ',')) == NULL)
 604                        goto parse_failed;
 605                *delim = 0;
 606                strlcpy(np->dev_name, cur, sizeof(np->dev_name));
 607                cur = delim;
 608        }
 609        cur++;
 610
 611        if (*cur != '@') {
 612                /* dst port */
 613                if ((delim = strchr(cur, '@')) == NULL)
 614                        goto parse_failed;
 615                *delim = 0;
 616                np->remote_port = simple_strtol(cur, NULL, 10);
 617                cur = delim;
 618        }
 619        cur++;
 620
 621        /* dst ip */
 622        if ((delim = strchr(cur, '/')) == NULL)
 623                goto parse_failed;
 624        *delim = 0;
 625        np->remote_ip = in_aton(cur);
 626        cur = delim + 1;
 627
 628        if (*cur != 0) {
 629                /* MAC address */
 630                if ((delim = strchr(cur, ':')) == NULL)
 631                        goto parse_failed;
 632                *delim = 0;
 633                np->remote_mac[0] = simple_strtol(cur, NULL, 16);
 634                cur = delim + 1;
 635                if ((delim = strchr(cur, ':')) == NULL)
 636                        goto parse_failed;
 637                *delim = 0;
 638                np->remote_mac[1] = simple_strtol(cur, NULL, 16);
 639                cur = delim + 1;
 640                if ((delim = strchr(cur, ':')) == NULL)
 641                        goto parse_failed;
 642                *delim = 0;
 643                np->remote_mac[2] = simple_strtol(cur, NULL, 16);
 644                cur = delim + 1;
 645                if ((delim = strchr(cur, ':')) == NULL)
 646                        goto parse_failed;
 647                *delim = 0;
 648                np->remote_mac[3] = simple_strtol(cur, NULL, 16);
 649                cur = delim + 1;
 650                if ((delim = strchr(cur, ':')) == NULL)
 651                        goto parse_failed;
 652                *delim = 0;
 653                np->remote_mac[4] = simple_strtol(cur, NULL, 16);
 654                cur = delim + 1;
 655                np->remote_mac[5] = simple_strtol(cur, NULL, 16);
 656        }
 657
 658        netpoll_print_options(np);
 659
 660        return 0;
 661
 662 parse_failed:
 663        printk(KERN_INFO "%s: couldn't parse config at %s!\n",
 664               np->name, cur);
 665        return -1;
 666}
 667
 668int netpoll_setup(struct netpoll *np)
 669{
 670        struct net_device *ndev = NULL;
 671        struct in_device *in_dev;
 672        struct netpoll_info *npinfo;
 673        unsigned long flags;
 674        int err;
 675
 676        if (np->dev_name)
 677                ndev = dev_get_by_name(&init_net, np->dev_name);
 678        if (!ndev) {
 679                printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
 680                       np->name, np->dev_name);
 681                return -ENODEV;
 682        }
 683
 684        np->dev = ndev;
 685        if (!ndev->npinfo) {
 686                npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
 687                if (!npinfo) {
 688                        err = -ENOMEM;
 689                        goto release;
 690                }
 691
 692                npinfo->rx_flags = 0;
 693                npinfo->rx_np = NULL;
 694
 695                spin_lock_init(&npinfo->rx_lock);
 696                skb_queue_head_init(&npinfo->arp_tx);
 697                skb_queue_head_init(&npinfo->txq);
 698                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 699
 700                atomic_set(&npinfo->refcnt, 1);
 701        } else {
 702                npinfo = ndev->npinfo;
 703                atomic_inc(&npinfo->refcnt);
 704        }
 705
 706        if (!ndev->netdev_ops->ndo_poll_controller) {
 707                printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
 708                       np->name, np->dev_name);
 709                err = -ENOTSUPP;
 710                goto release;
 711        }
 712
 713        if (!netif_running(ndev)) {
 714                unsigned long atmost, atleast;
 715
 716                printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
 717                       np->name, np->dev_name);
 718
 719                rtnl_lock();
 720                err = dev_open(ndev);
 721                rtnl_unlock();
 722
 723                if (err) {
 724                        printk(KERN_ERR "%s: failed to open %s\n",
 725                               np->name, ndev->name);
 726                        goto release;
 727                }
 728
 729                atleast = jiffies + HZ/10;
 730                atmost = jiffies + 4*HZ;
 731                while (!netif_carrier_ok(ndev)) {
 732                        if (time_after(jiffies, atmost)) {
 733                                printk(KERN_NOTICE
 734                                       "%s: timeout waiting for carrier\n",
 735                                       np->name);
 736                                break;
 737                        }
 738                        cond_resched();
 739                }
 740
 741                /* If carrier appears to come up instantly, we don't
 742                 * trust it and pause so that we don't pump all our
 743                 * queued console messages into the bitbucket.
 744                 */
 745
 746                if (time_before(jiffies, atleast)) {
 747                        printk(KERN_NOTICE "%s: carrier detect appears"
 748                               " untrustworthy, waiting 4 seconds\n",
 749                               np->name);
 750                        msleep(4000);
 751                }
 752        }
 753
 754        if (!np->local_ip) {
 755                rcu_read_lock();
 756                in_dev = __in_dev_get_rcu(ndev);
 757
 758                if (!in_dev || !in_dev->ifa_list) {
 759                        rcu_read_unlock();
 760                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
 761                               np->name, np->dev_name);
 762                        err = -EDESTADDRREQ;
 763                        goto release;
 764                }
 765
 766                np->local_ip = in_dev->ifa_list->ifa_local;
 767                rcu_read_unlock();
 768                printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
 769        }
 770
 771        if (np->rx_hook) {
 772                spin_lock_irqsave(&npinfo->rx_lock, flags);
 773                npinfo->rx_flags |= NETPOLL_RX_ENABLED;
 774                npinfo->rx_np = np;
 775                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 776        }
 777
 778        /* fill up the skb queue */
 779        refill_skbs();
 780
 781        /* last thing to do is link it to the net device structure */
 782        ndev->npinfo = npinfo;
 783
 784        /* avoid racing with NAPI reading npinfo */
 785        synchronize_rcu();
 786
 787        return 0;
 788
 789 release:
 790        if (!ndev->npinfo)
 791                kfree(npinfo);
 792        np->dev = NULL;
 793        dev_put(ndev);
 794        return err;
 795}
 796
 797static int __init netpoll_init(void)
 798{
 799        skb_queue_head_init(&skb_pool);
 800        return 0;
 801}
 802core_initcall(netpoll_init);
 803
 804void netpoll_cleanup(struct netpoll *np)
 805{
 806        struct netpoll_info *npinfo;
 807        unsigned long flags;
 808
 809        if (np->dev) {
 810                npinfo = np->dev->npinfo;
 811                if (npinfo) {
 812                        if (npinfo->rx_np == np) {
 813                                spin_lock_irqsave(&npinfo->rx_lock, flags);
 814                                npinfo->rx_np = NULL;
 815                                npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
 816                                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 817                        }
 818
 819                        if (atomic_dec_and_test(&npinfo->refcnt)) {
 820                                skb_queue_purge(&npinfo->arp_tx);
 821                                skb_queue_purge(&npinfo->txq);
 822                                cancel_rearming_delayed_work(&npinfo->tx_work);
 823
 824                                /* clean after last, unfinished work */
 825                                __skb_queue_purge(&npinfo->txq);
 826                                kfree(npinfo);
 827                                np->dev->npinfo = NULL;
 828                        }
 829                }
 830
 831                dev_put(np->dev);
 832        }
 833
 834        np->dev = NULL;
 835}
 836
 837int netpoll_trap(void)
 838{
 839        return atomic_read(&trapped);
 840}
 841
 842void netpoll_set_trap(int trap)
 843{
 844        if (trap)
 845                atomic_inc(&trapped);
 846        else
 847                atomic_dec(&trapped);
 848}
 849
 850EXPORT_SYMBOL(netpoll_set_trap);
 851EXPORT_SYMBOL(netpoll_trap);
 852EXPORT_SYMBOL(netpoll_print_options);
 853EXPORT_SYMBOL(netpoll_parse_options);
 854EXPORT_SYMBOL(netpoll_setup);
 855EXPORT_SYMBOL(netpoll_cleanup);
 856EXPORT_SYMBOL(netpoll_send_udp);
 857EXPORT_SYMBOL(netpoll_poll);
 858