linux/net/core/netpoll.c
<<
>>
Prefs
   1/*
   2 * Common framework for low-level network console, dump, and debugger code
   3 *
   4 * Sep 8 2003  Matt Mackall <mpm@selenic.com>
   5 *
   6 * based on the netconsole code from:
   7 *
   8 * Copyright (C) 2001  Ingo Molnar <mingo@redhat.com>
   9 * Copyright (C) 2002  Red Hat, Inc.
  10 */
  11
  12#include <linux/netdevice.h>
  13#include <linux/etherdevice.h>
  14#include <linux/string.h>
  15#include <linux/if_arp.h>
  16#include <linux/inetdevice.h>
  17#include <linux/inet.h>
  18#include <linux/interrupt.h>
  19#include <linux/netpoll.h>
  20#include <linux/sched.h>
  21#include <linux/delay.h>
  22#include <linux/rcupdate.h>
  23#include <linux/workqueue.h>
  24#include <net/tcp.h>
  25#include <net/udp.h>
  26#include <asm/unaligned.h>
  27
  28/*
  29 * We maintain a small pool of fully-sized skbs, to make sure the
  30 * message gets out even in extreme OOM situations.
  31 */
  32
  33#define MAX_UDP_CHUNK 1460
  34#define MAX_SKBS 32
  35#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
  36
  37static struct sk_buff_head skb_pool;
  38
  39static atomic_t trapped;
  40
  41#define USEC_PER_POLL   50
  42#define NETPOLL_RX_ENABLED  1
  43#define NETPOLL_RX_DROP     2
  44
  45#define MAX_SKB_SIZE \
  46                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
  47                                sizeof(struct iphdr) + sizeof(struct ethhdr))
  48
  49static void zap_completion_queue(void);
  50static void arp_reply(struct sk_buff *skb);
  51
  52static void queue_process(struct work_struct *work)
  53{
  54        struct netpoll_info *npinfo =
  55                container_of(work, struct netpoll_info, tx_work.work);
  56        struct sk_buff *skb;
  57        unsigned long flags;
  58
  59        while ((skb = skb_dequeue(&npinfo->txq))) {
  60                struct net_device *dev = skb->dev;
  61
  62                if (!netif_device_present(dev) || !netif_running(dev)) {
  63                        __kfree_skb(skb);
  64                        continue;
  65                }
  66
  67                local_irq_save(flags);
  68                netif_tx_lock(dev);
  69                if ((netif_queue_stopped(dev) ||
  70                     netif_subqueue_stopped(dev, skb)) ||
  71                     dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
  72                        skb_queue_head(&npinfo->txq, skb);
  73                        netif_tx_unlock(dev);
  74                        local_irq_restore(flags);
  75
  76                        schedule_delayed_work(&npinfo->tx_work, HZ/10);
  77                        return;
  78                }
  79                netif_tx_unlock(dev);
  80                local_irq_restore(flags);
  81        }
  82}
  83
  84static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  85                            unsigned short ulen, __be32 saddr, __be32 daddr)
  86{
  87        __wsum psum;
  88
  89        if (uh->check == 0 || skb_csum_unnecessary(skb))
  90                return 0;
  91
  92        psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  93
  94        if (skb->ip_summed == CHECKSUM_COMPLETE &&
  95            !csum_fold(csum_add(psum, skb->csum)))
  96                return 0;
  97
  98        skb->csum = psum;
  99
 100        return __skb_checksum_complete(skb);
 101}
 102
 103/*
 104 * Check whether delayed processing was scheduled for our NIC. If so,
 105 * we attempt to grab the poll lock and use ->poll() to pump the card.
 106 * If this fails, either we've recursed in ->poll() or it's already
 107 * running on another CPU.
 108 *
 109 * Note: we don't mask interrupts with this lock because we're using
 110 * trylock here and interrupts are already disabled in the softirq
 111 * case. Further, we test the poll_owner to avoid recursion on UP
 112 * systems where the lock doesn't exist.
 113 *
 114 * In cases where there is bi-directional communications, reading only
 115 * one message at a time can lead to packets being dropped by the
 116 * network adapter, forcing superfluous retries and possibly timeouts.
 117 * Thus, we set our budget to greater than 1.
 118 */
 119static int poll_one_napi(struct netpoll_info *npinfo,
 120                         struct napi_struct *napi, int budget)
 121{
 122        int work;
 123
 124        /* net_rx_action's ->poll() invocations and our's are
 125         * synchronized by this test which is only made while
 126         * holding the napi->poll_lock.
 127         */
 128        if (!test_bit(NAPI_STATE_SCHED, &napi->state))
 129                return budget;
 130
 131        npinfo->rx_flags |= NETPOLL_RX_DROP;
 132        atomic_inc(&trapped);
 133
 134        work = napi->poll(napi, budget);
 135
 136        atomic_dec(&trapped);
 137        npinfo->rx_flags &= ~NETPOLL_RX_DROP;
 138
 139        return budget - work;
 140}
 141
 142static void poll_napi(struct netpoll *np)
 143{
 144        struct netpoll_info *npinfo = np->dev->npinfo;
 145        struct napi_struct *napi;
 146        int budget = 16;
 147
 148        list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
 149                if (napi->poll_owner != smp_processor_id() &&
 150                    spin_trylock(&napi->poll_lock)) {
 151                        budget = poll_one_napi(npinfo, napi, budget);
 152                        spin_unlock(&napi->poll_lock);
 153
 154                        if (!budget)
 155                                break;
 156                }
 157        }
 158}
 159
 160static void service_arp_queue(struct netpoll_info *npi)
 161{
 162        struct sk_buff *skb;
 163
 164        if (unlikely(!npi))
 165                return;
 166
 167        skb = skb_dequeue(&npi->arp_tx);
 168
 169        while (skb != NULL) {
 170                arp_reply(skb);
 171                skb = skb_dequeue(&npi->arp_tx);
 172        }
 173}
 174
 175void netpoll_poll(struct netpoll *np)
 176{
 177        if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
 178                return;
 179
 180        /* Process pending work on NIC */
 181        np->dev->poll_controller(np->dev);
 182        if (!list_empty(&np->dev->napi_list))
 183                poll_napi(np);
 184
 185        service_arp_queue(np->dev->npinfo);
 186
 187        zap_completion_queue();
 188}
 189
 190static void refill_skbs(void)
 191{
 192        struct sk_buff *skb;
 193        unsigned long flags;
 194
 195        spin_lock_irqsave(&skb_pool.lock, flags);
 196        while (skb_pool.qlen < MAX_SKBS) {
 197                skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
 198                if (!skb)
 199                        break;
 200
 201                __skb_queue_tail(&skb_pool, skb);
 202        }
 203        spin_unlock_irqrestore(&skb_pool.lock, flags);
 204}
 205
 206static void zap_completion_queue(void)
 207{
 208        unsigned long flags;
 209        struct softnet_data *sd = &get_cpu_var(softnet_data);
 210
 211        if (sd->completion_queue) {
 212                struct sk_buff *clist;
 213
 214                local_irq_save(flags);
 215                clist = sd->completion_queue;
 216                sd->completion_queue = NULL;
 217                local_irq_restore(flags);
 218
 219                while (clist != NULL) {
 220                        struct sk_buff *skb = clist;
 221                        clist = clist->next;
 222                        if (skb->destructor)
 223                                dev_kfree_skb_any(skb); /* put this one back */
 224                        else
 225                                __kfree_skb(skb);
 226                }
 227        }
 228
 229        put_cpu_var(softnet_data);
 230}
 231
 232static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
 233{
 234        int count = 0;
 235        struct sk_buff *skb;
 236
 237        zap_completion_queue();
 238        refill_skbs();
 239repeat:
 240
 241        skb = alloc_skb(len, GFP_ATOMIC);
 242        if (!skb)
 243                skb = skb_dequeue(&skb_pool);
 244
 245        if (!skb) {
 246                if (++count < 10) {
 247                        netpoll_poll(np);
 248                        goto repeat;
 249                }
 250                return NULL;
 251        }
 252
 253        atomic_set(&skb->users, 1);
 254        skb_reserve(skb, reserve);
 255        return skb;
 256}
 257
 258static int netpoll_owner_active(struct net_device *dev)
 259{
 260        struct napi_struct *napi;
 261
 262        list_for_each_entry(napi, &dev->napi_list, dev_list) {
 263                if (napi->poll_owner == smp_processor_id())
 264                        return 1;
 265        }
 266        return 0;
 267}
 268
 269static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 270{
 271        int status = NETDEV_TX_BUSY;
 272        unsigned long tries;
 273        struct net_device *dev = np->dev;
 274        struct netpoll_info *npinfo = np->dev->npinfo;
 275
 276        if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
 277                __kfree_skb(skb);
 278                return;
 279        }
 280
 281        /* don't get messages out of order, and no recursion */
 282        if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
 283                unsigned long flags;
 284
 285                local_irq_save(flags);
 286                /* try until next clock tick */
 287                for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
 288                     tries > 0; --tries) {
 289                        if (netif_tx_trylock(dev)) {
 290                                if (!netif_queue_stopped(dev) &&
 291                                    !netif_subqueue_stopped(dev, skb))
 292                                        status = dev->hard_start_xmit(skb, dev);
 293                                netif_tx_unlock(dev);
 294
 295                                if (status == NETDEV_TX_OK)
 296                                        break;
 297
 298                        }
 299
 300                        /* tickle device maybe there is some cleanup */
 301                        netpoll_poll(np);
 302
 303                        udelay(USEC_PER_POLL);
 304                }
 305                local_irq_restore(flags);
 306        }
 307
 308        if (status != NETDEV_TX_OK) {
 309                skb_queue_tail(&npinfo->txq, skb);
 310                schedule_delayed_work(&npinfo->tx_work,0);
 311        }
 312}
 313
 314void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
 315{
 316        int total_len, eth_len, ip_len, udp_len;
 317        struct sk_buff *skb;
 318        struct udphdr *udph;
 319        struct iphdr *iph;
 320        struct ethhdr *eth;
 321
 322        udp_len = len + sizeof(*udph);
 323        ip_len = eth_len = udp_len + sizeof(*iph);
 324        total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
 325
 326        skb = find_skb(np, total_len, total_len - len);
 327        if (!skb)
 328                return;
 329
 330        skb_copy_to_linear_data(skb, msg, len);
 331        skb->len += len;
 332
 333        skb_push(skb, sizeof(*udph));
 334        skb_reset_transport_header(skb);
 335        udph = udp_hdr(skb);
 336        udph->source = htons(np->local_port);
 337        udph->dest = htons(np->remote_port);
 338        udph->len = htons(udp_len);
 339        udph->check = 0;
 340        udph->check = csum_tcpudp_magic(htonl(np->local_ip),
 341                                        htonl(np->remote_ip),
 342                                        udp_len, IPPROTO_UDP,
 343                                        csum_partial((unsigned char *)udph, udp_len, 0));
 344        if (udph->check == 0)
 345                udph->check = CSUM_MANGLED_0;
 346
 347        skb_push(skb, sizeof(*iph));
 348        skb_reset_network_header(skb);
 349        iph = ip_hdr(skb);
 350
 351        /* iph->version = 4; iph->ihl = 5; */
 352        put_unaligned(0x45, (unsigned char *)iph);
 353        iph->tos      = 0;
 354        put_unaligned(htons(ip_len), &(iph->tot_len));
 355        iph->id       = 0;
 356        iph->frag_off = 0;
 357        iph->ttl      = 64;
 358        iph->protocol = IPPROTO_UDP;
 359        iph->check    = 0;
 360        put_unaligned(htonl(np->local_ip), &(iph->saddr));
 361        put_unaligned(htonl(np->remote_ip), &(iph->daddr));
 362        iph->check    = ip_fast_csum((unsigned char *)iph, iph->ihl);
 363
 364        eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
 365        skb_reset_mac_header(skb);
 366        skb->protocol = eth->h_proto = htons(ETH_P_IP);
 367        memcpy(eth->h_source, np->local_mac, 6);
 368        memcpy(eth->h_dest, np->remote_mac, 6);
 369
 370        skb->dev = np->dev;
 371
 372        netpoll_send_skb(np, skb);
 373}
 374
 375static void arp_reply(struct sk_buff *skb)
 376{
 377        struct netpoll_info *npinfo = skb->dev->npinfo;
 378        struct arphdr *arp;
 379        unsigned char *arp_ptr;
 380        int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
 381        __be32 sip, tip;
 382        unsigned char *sha;
 383        struct sk_buff *send_skb;
 384        struct netpoll *np = NULL;
 385
 386        if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
 387                np = npinfo->rx_np;
 388        if (!np)
 389                return;
 390
 391        /* No arp on this interface */
 392        if (skb->dev->flags & IFF_NOARP)
 393                return;
 394
 395        if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
 396                                 (2 * skb->dev->addr_len) +
 397                                 (2 * sizeof(u32)))))
 398                return;
 399
 400        skb_reset_network_header(skb);
 401        skb_reset_transport_header(skb);
 402        arp = arp_hdr(skb);
 403
 404        if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
 405             arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
 406            arp->ar_pro != htons(ETH_P_IP) ||
 407            arp->ar_op != htons(ARPOP_REQUEST))
 408                return;
 409
 410        arp_ptr = (unsigned char *)(arp+1);
 411        /* save the location of the src hw addr */
 412        sha = arp_ptr;
 413        arp_ptr += skb->dev->addr_len;
 414        memcpy(&sip, arp_ptr, 4);
 415        arp_ptr += 4;
 416        /* if we actually cared about dst hw addr, it would get copied here */
 417        arp_ptr += skb->dev->addr_len;
 418        memcpy(&tip, arp_ptr, 4);
 419
 420        /* Should we ignore arp? */
 421        if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
 422                return;
 423
 424        size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
 425        send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
 426                            LL_RESERVED_SPACE(np->dev));
 427
 428        if (!send_skb)
 429                return;
 430
 431        skb_reset_network_header(send_skb);
 432        arp = (struct arphdr *) skb_put(send_skb, size);
 433        send_skb->dev = skb->dev;
 434        send_skb->protocol = htons(ETH_P_ARP);
 435
 436        /* Fill the device header for the ARP frame */
 437        if (dev_hard_header(send_skb, skb->dev, ptype,
 438                            sha, np->local_mac,
 439                            send_skb->len) < 0) {
 440                kfree_skb(send_skb);
 441                return;
 442        }
 443
 444        /*
 445         * Fill out the arp protocol part.
 446         *
 447         * we only support ethernet device type,
 448         * which (according to RFC 1390) should always equal 1 (Ethernet).
 449         */
 450
 451        arp->ar_hrd = htons(np->dev->type);
 452        arp->ar_pro = htons(ETH_P_IP);
 453        arp->ar_hln = np->dev->addr_len;
 454        arp->ar_pln = 4;
 455        arp->ar_op = htons(type);
 456
 457        arp_ptr=(unsigned char *)(arp + 1);
 458        memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
 459        arp_ptr += np->dev->addr_len;
 460        memcpy(arp_ptr, &tip, 4);
 461        arp_ptr += 4;
 462        memcpy(arp_ptr, sha, np->dev->addr_len);
 463        arp_ptr += np->dev->addr_len;
 464        memcpy(arp_ptr, &sip, 4);
 465
 466        netpoll_send_skb(np, send_skb);
 467}
 468
 469int __netpoll_rx(struct sk_buff *skb)
 470{
 471        int proto, len, ulen;
 472        struct iphdr *iph;
 473        struct udphdr *uh;
 474        struct netpoll_info *npi = skb->dev->npinfo;
 475        struct netpoll *np = npi->rx_np;
 476
 477        if (!np)
 478                goto out;
 479        if (skb->dev->type != ARPHRD_ETHER)
 480                goto out;
 481
 482        /* check if netpoll clients need ARP */
 483        if (skb->protocol == htons(ETH_P_ARP) &&
 484            atomic_read(&trapped)) {
 485                skb_queue_tail(&npi->arp_tx, skb);
 486                return 1;
 487        }
 488
 489        proto = ntohs(eth_hdr(skb)->h_proto);
 490        if (proto != ETH_P_IP)
 491                goto out;
 492        if (skb->pkt_type == PACKET_OTHERHOST)
 493                goto out;
 494        if (skb_shared(skb))
 495                goto out;
 496
 497        iph = (struct iphdr *)skb->data;
 498        if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 499                goto out;
 500        if (iph->ihl < 5 || iph->version != 4)
 501                goto out;
 502        if (!pskb_may_pull(skb, iph->ihl*4))
 503                goto out;
 504        if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
 505                goto out;
 506
 507        len = ntohs(iph->tot_len);
 508        if (skb->len < len || len < iph->ihl*4)
 509                goto out;
 510
 511        /*
 512         * Our transport medium may have padded the buffer out.
 513         * Now We trim to the true length of the frame.
 514         */
 515        if (pskb_trim_rcsum(skb, len))
 516                goto out;
 517
 518        if (iph->protocol != IPPROTO_UDP)
 519                goto out;
 520
 521        len -= iph->ihl*4;
 522        uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
 523        ulen = ntohs(uh->len);
 524
 525        if (ulen != len)
 526                goto out;
 527        if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
 528                goto out;
 529        if (np->local_ip && np->local_ip != ntohl(iph->daddr))
 530                goto out;
 531        if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
 532                goto out;
 533        if (np->local_port && np->local_port != ntohs(uh->dest))
 534                goto out;
 535
 536        np->rx_hook(np, ntohs(uh->source),
 537                    (char *)(uh+1),
 538                    ulen - sizeof(struct udphdr));
 539
 540        kfree_skb(skb);
 541        return 1;
 542
 543out:
 544        if (atomic_read(&trapped)) {
 545                kfree_skb(skb);
 546                return 1;
 547        }
 548
 549        return 0;
 550}
 551
 552void netpoll_print_options(struct netpoll *np)
 553{
 554        DECLARE_MAC_BUF(mac);
 555        printk(KERN_INFO "%s: local port %d\n",
 556                         np->name, np->local_port);
 557        printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
 558                         np->name, HIPQUAD(np->local_ip));
 559        printk(KERN_INFO "%s: interface %s\n",
 560                         np->name, np->dev_name);
 561        printk(KERN_INFO "%s: remote port %d\n",
 562                         np->name, np->remote_port);
 563        printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
 564                         np->name, HIPQUAD(np->remote_ip));
 565        printk(KERN_INFO "%s: remote ethernet address %s\n",
 566                         np->name, print_mac(mac, np->remote_mac));
 567}
 568
 569int netpoll_parse_options(struct netpoll *np, char *opt)
 570{
 571        char *cur=opt, *delim;
 572
 573        if (*cur != '@') {
 574                if ((delim = strchr(cur, '@')) == NULL)
 575                        goto parse_failed;
 576                *delim = 0;
 577                np->local_port = simple_strtol(cur, NULL, 10);
 578                cur = delim;
 579        }
 580        cur++;
 581
 582        if (*cur != '/') {
 583                if ((delim = strchr(cur, '/')) == NULL)
 584                        goto parse_failed;
 585                *delim = 0;
 586                np->local_ip = ntohl(in_aton(cur));
 587                cur = delim;
 588        }
 589        cur++;
 590
 591        if (*cur != ',') {
 592                /* parse out dev name */
 593                if ((delim = strchr(cur, ',')) == NULL)
 594                        goto parse_failed;
 595                *delim = 0;
 596                strlcpy(np->dev_name, cur, sizeof(np->dev_name));
 597                cur = delim;
 598        }
 599        cur++;
 600
 601        if (*cur != '@') {
 602                /* dst port */
 603                if ((delim = strchr(cur, '@')) == NULL)
 604                        goto parse_failed;
 605                *delim = 0;
 606                np->remote_port = simple_strtol(cur, NULL, 10);
 607                cur = delim;
 608        }
 609        cur++;
 610
 611        /* dst ip */
 612        if ((delim = strchr(cur, '/')) == NULL)
 613                goto parse_failed;
 614        *delim = 0;
 615        np->remote_ip = ntohl(in_aton(cur));
 616        cur = delim + 1;
 617
 618        if (*cur != 0) {
 619                /* MAC address */
 620                if ((delim = strchr(cur, ':')) == NULL)
 621                        goto parse_failed;
 622                *delim = 0;
 623                np->remote_mac[0] = simple_strtol(cur, NULL, 16);
 624                cur = delim + 1;
 625                if ((delim = strchr(cur, ':')) == NULL)
 626                        goto parse_failed;
 627                *delim = 0;
 628                np->remote_mac[1] = simple_strtol(cur, NULL, 16);
 629                cur = delim + 1;
 630                if ((delim = strchr(cur, ':')) == NULL)
 631                        goto parse_failed;
 632                *delim = 0;
 633                np->remote_mac[2] = simple_strtol(cur, NULL, 16);
 634                cur = delim + 1;
 635                if ((delim = strchr(cur, ':')) == NULL)
 636                        goto parse_failed;
 637                *delim = 0;
 638                np->remote_mac[3] = simple_strtol(cur, NULL, 16);
 639                cur = delim + 1;
 640                if ((delim = strchr(cur, ':')) == NULL)
 641                        goto parse_failed;
 642                *delim = 0;
 643                np->remote_mac[4] = simple_strtol(cur, NULL, 16);
 644                cur = delim + 1;
 645                np->remote_mac[5] = simple_strtol(cur, NULL, 16);
 646        }
 647
 648        netpoll_print_options(np);
 649
 650        return 0;
 651
 652 parse_failed:
 653        printk(KERN_INFO "%s: couldn't parse config at %s!\n",
 654               np->name, cur);
 655        return -1;
 656}
 657
 658int netpoll_setup(struct netpoll *np)
 659{
 660        struct net_device *ndev = NULL;
 661        struct in_device *in_dev;
 662        struct netpoll_info *npinfo;
 663        unsigned long flags;
 664        int err;
 665
 666        if (np->dev_name)
 667                ndev = dev_get_by_name(&init_net, np->dev_name);
 668        if (!ndev) {
 669                printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
 670                       np->name, np->dev_name);
 671                return -ENODEV;
 672        }
 673
 674        np->dev = ndev;
 675        if (!ndev->npinfo) {
 676                npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
 677                if (!npinfo) {
 678                        err = -ENOMEM;
 679                        goto release;
 680                }
 681
 682                npinfo->rx_flags = 0;
 683                npinfo->rx_np = NULL;
 684
 685                spin_lock_init(&npinfo->rx_lock);
 686                skb_queue_head_init(&npinfo->arp_tx);
 687                skb_queue_head_init(&npinfo->txq);
 688                INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
 689
 690                atomic_set(&npinfo->refcnt, 1);
 691        } else {
 692                npinfo = ndev->npinfo;
 693                atomic_inc(&npinfo->refcnt);
 694        }
 695
 696        if (!ndev->poll_controller) {
 697                printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
 698                       np->name, np->dev_name);
 699                err = -ENOTSUPP;
 700                goto release;
 701        }
 702
 703        if (!netif_running(ndev)) {
 704                unsigned long atmost, atleast;
 705
 706                printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
 707                       np->name, np->dev_name);
 708
 709                rtnl_lock();
 710                err = dev_open(ndev);
 711                rtnl_unlock();
 712
 713                if (err) {
 714                        printk(KERN_ERR "%s: failed to open %s\n",
 715                               np->name, ndev->name);
 716                        goto release;
 717                }
 718
 719                atleast = jiffies + HZ/10;
 720                atmost = jiffies + 4*HZ;
 721                while (!netif_carrier_ok(ndev)) {
 722                        if (time_after(jiffies, atmost)) {
 723                                printk(KERN_NOTICE
 724                                       "%s: timeout waiting for carrier\n",
 725                                       np->name);
 726                                break;
 727                        }
 728                        cond_resched();
 729                }
 730
 731                /* If carrier appears to come up instantly, we don't
 732                 * trust it and pause so that we don't pump all our
 733                 * queued console messages into the bitbucket.
 734                 */
 735
 736                if (time_before(jiffies, atleast)) {
 737                        printk(KERN_NOTICE "%s: carrier detect appears"
 738                               " untrustworthy, waiting 4 seconds\n",
 739                               np->name);
 740                        msleep(4000);
 741                }
 742        }
 743
 744        if (is_zero_ether_addr(np->local_mac) && ndev->dev_addr)
 745                memcpy(np->local_mac, ndev->dev_addr, 6);
 746
 747        if (!np->local_ip) {
 748                rcu_read_lock();
 749                in_dev = __in_dev_get_rcu(ndev);
 750
 751                if (!in_dev || !in_dev->ifa_list) {
 752                        rcu_read_unlock();
 753                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
 754                               np->name, np->dev_name);
 755                        err = -EDESTADDRREQ;
 756                        goto release;
 757                }
 758
 759                np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
 760                rcu_read_unlock();
 761                printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
 762                       np->name, HIPQUAD(np->local_ip));
 763        }
 764
 765        if (np->rx_hook) {
 766                spin_lock_irqsave(&npinfo->rx_lock, flags);
 767                npinfo->rx_flags |= NETPOLL_RX_ENABLED;
 768                npinfo->rx_np = np;
 769                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 770        }
 771
 772        /* fill up the skb queue */
 773        refill_skbs();
 774
 775        /* last thing to do is link it to the net device structure */
 776        ndev->npinfo = npinfo;
 777
 778        /* avoid racing with NAPI reading npinfo */
 779        synchronize_rcu();
 780
 781        return 0;
 782
 783 release:
 784        if (!ndev->npinfo)
 785                kfree(npinfo);
 786        np->dev = NULL;
 787        dev_put(ndev);
 788        return err;
 789}
 790
 791static int __init netpoll_init(void)
 792{
 793        skb_queue_head_init(&skb_pool);
 794        return 0;
 795}
 796core_initcall(netpoll_init);
 797
 798void netpoll_cleanup(struct netpoll *np)
 799{
 800        struct netpoll_info *npinfo;
 801        unsigned long flags;
 802
 803        if (np->dev) {
 804                npinfo = np->dev->npinfo;
 805                if (npinfo) {
 806                        if (npinfo->rx_np == np) {
 807                                spin_lock_irqsave(&npinfo->rx_lock, flags);
 808                                npinfo->rx_np = NULL;
 809                                npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
 810                                spin_unlock_irqrestore(&npinfo->rx_lock, flags);
 811                        }
 812
 813                        if (atomic_dec_and_test(&npinfo->refcnt)) {
 814                                skb_queue_purge(&npinfo->arp_tx);
 815                                skb_queue_purge(&npinfo->txq);
 816                                cancel_rearming_delayed_work(&npinfo->tx_work);
 817
 818                                /* clean after last, unfinished work */
 819                                if (!skb_queue_empty(&npinfo->txq)) {
 820                                        struct sk_buff *skb;
 821                                        skb = __skb_dequeue(&npinfo->txq);
 822                                        kfree_skb(skb);
 823                                }
 824                                kfree(npinfo);
 825                                np->dev->npinfo = NULL;
 826                        }
 827                }
 828
 829                dev_put(np->dev);
 830        }
 831
 832        np->dev = NULL;
 833}
 834
 835int netpoll_trap(void)
 836{
 837        return atomic_read(&trapped);
 838}
 839
 840void netpoll_set_trap(int trap)
 841{
 842        if (trap)
 843                atomic_inc(&trapped);
 844        else
 845                atomic_dec(&trapped);
 846}
 847
 848EXPORT_SYMBOL(netpoll_set_trap);
 849EXPORT_SYMBOL(netpoll_trap);
 850EXPORT_SYMBOL(netpoll_print_options);
 851EXPORT_SYMBOL(netpoll_parse_options);
 852EXPORT_SYMBOL(netpoll_setup);
 853EXPORT_SYMBOL(netpoll_cleanup);
 854EXPORT_SYMBOL(netpoll_send_udp);
 855EXPORT_SYMBOL(netpoll_poll);
 856