linux/drivers/net/ethernet/realtek/8139cp.c
<<
>>
Prefs
   1/* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
   2/*
   3        Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
   4
   5        Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
   6        Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
   7        Copyright 2001 Manfred Spraul                               [natsemi.c]
   8        Copyright 1999-2001 by Donald Becker.                       [natsemi.c]
   9        Written 1997-2001 by Donald Becker.                         [8139too.c]
  10        Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
  11
  12        This software may be used and distributed according to the terms of
  13        the GNU General Public License (GPL), incorporated herein by reference.
  14        Drivers based on or derived from this code fall under the GPL and must
  15        retain the authorship, copyright and license notice.  This file is not
  16        a complete program and may only be used when the entire operating
  17        system is licensed under the GPL.
  18
  19        See the file COPYING in this distribution for more information.
  20
  21        Contributors:
  22
  23                Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
  24                PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
  25                LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
  26
  27        TODO:
  28        * Test Tx checksumming thoroughly
  29
  30        Low priority TODO:
  31        * Complete reset on PciErr
  32        * Consider Rx interrupt mitigation using TimerIntr
  33        * Investigate using skb->priority with h/w VLAN priority
  34        * Investigate using High Priority Tx Queue with skb->priority
  35        * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  36        * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  37        * Implement Tx software interrupt mitigation via
  38          Tx descriptor bit
  39        * The real minimum of CP_MIN_MTU is 4 bytes.  However,
  40          for this to be supported, one must(?) turn on packet padding.
  41        * Support external MII transceivers (patch available)
  42
  43        NOTES:
  44        * TX checksumming is considered experimental.  It is off by
  45          default, use ethtool to turn it on.
  46
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#define DRV_NAME                "8139cp"
  52#define DRV_VERSION             "1.3"
  53#define DRV_RELDATE             "Mar 22, 2004"
  54
  55
  56#include <linux/module.h>
  57#include <linux/moduleparam.h>
  58#include <linux/kernel.h>
  59#include <linux/compiler.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/init.h>
  63#include <linux/interrupt.h>
  64#include <linux/pci.h>
  65#include <linux/dma-mapping.h>
  66#include <linux/delay.h>
  67#include <linux/ethtool.h>
  68#include <linux/gfp.h>
  69#include <linux/mii.h>
  70#include <linux/if_vlan.h>
  71#include <linux/crc32.h>
  72#include <linux/in.h>
  73#include <linux/ip.h>
  74#include <linux/tcp.h>
  75#include <linux/udp.h>
  76#include <linux/cache.h>
  77#include <asm/io.h>
  78#include <asm/irq.h>
  79#include <asm/uaccess.h>
  80
  81/* These identify the driver base version and may not be removed. */
  82static char version[] =
  83DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
  84
  85MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  86MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
  87MODULE_VERSION(DRV_VERSION);
  88MODULE_LICENSE("GPL");
  89
  90static int debug = -1;
  91module_param(debug, int, 0);
  92MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
  93
  94/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  95   The RTL chips use a 64 element hash table based on the Ethernet CRC.  */
  96static int multicast_filter_limit = 32;
  97module_param(multicast_filter_limit, int, 0);
  98MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
  99
 100#define CP_DEF_MSG_ENABLE       (NETIF_MSG_DRV          | \
 101                                 NETIF_MSG_PROBE        | \
 102                                 NETIF_MSG_LINK)
 103#define CP_NUM_STATS            14      /* struct cp_dma_stats, plus one */
 104#define CP_STATS_SIZE           64      /* size in bytes of DMA stats block */
 105#define CP_REGS_SIZE            (0xff + 1)
 106#define CP_REGS_VER             1               /* version 1 */
 107#define CP_RX_RING_SIZE         64
 108#define CP_TX_RING_SIZE         64
 109#define CP_RING_BYTES           \
 110                ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) +   \
 111                 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) +   \
 112                 CP_STATS_SIZE)
 113#define NEXT_TX(N)              (((N) + 1) & (CP_TX_RING_SIZE - 1))
 114#define NEXT_RX(N)              (((N) + 1) & (CP_RX_RING_SIZE - 1))
 115#define TX_BUFFS_AVAIL(CP)                                      \
 116        (((CP)->tx_tail <= (CP)->tx_head) ?                     \
 117          (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head :       \
 118          (CP)->tx_tail - (CP)->tx_head - 1)
 119
 120#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
 121#define CP_INTERNAL_PHY         32
 122
 123/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
 124#define RX_FIFO_THRESH          5       /* Rx buffer level before first PCI xfer.  */
 125#define RX_DMA_BURST            4       /* Maximum PCI burst, '4' is 256 */
 126#define TX_DMA_BURST            6       /* Maximum PCI burst, '6' is 1024 */
 127#define TX_EARLY_THRESH         256     /* Early Tx threshold, in bytes */
 128
 129/* Time in jiffies before concluding the transmitter is hung. */
 130#define TX_TIMEOUT              (6*HZ)
 131
 132/* hardware minimum and maximum for a single frame's data payload */
 133#define CP_MIN_MTU              60      /* TODO: allow lower, but pad */
 134#define CP_MAX_MTU              4096
 135
 136enum {
 137        /* NIC register offsets */
 138        MAC0            = 0x00, /* Ethernet hardware address. */
 139        MAR0            = 0x08, /* Multicast filter. */
 140        StatsAddr       = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
 141        TxRingAddr      = 0x20, /* 64-bit start addr of Tx ring */
 142        HiTxRingAddr    = 0x28, /* 64-bit start addr of high priority Tx ring */
 143        Cmd             = 0x37, /* Command register */
 144        IntrMask        = 0x3C, /* Interrupt mask */
 145        IntrStatus      = 0x3E, /* Interrupt status */
 146        TxConfig        = 0x40, /* Tx configuration */
 147        ChipVersion     = 0x43, /* 8-bit chip version, inside TxConfig */
 148        RxConfig        = 0x44, /* Rx configuration */
 149        RxMissed        = 0x4C, /* 24 bits valid, write clears */
 150        Cfg9346         = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
 151        Config1         = 0x52, /* Config1 */
 152        Config3         = 0x59, /* Config3 */
 153        Config4         = 0x5A, /* Config4 */
 154        MultiIntr       = 0x5C, /* Multiple interrupt select */
 155        BasicModeCtrl   = 0x62, /* MII BMCR */
 156        BasicModeStatus = 0x64, /* MII BMSR */
 157        NWayAdvert      = 0x66, /* MII ADVERTISE */
 158        NWayLPAR        = 0x68, /* MII LPA */
 159        NWayExpansion   = 0x6A, /* MII Expansion */
 160        TxDmaOkLowDesc  = 0x82, /* Low 16 bit address of a Tx descriptor. */
 161        Config5         = 0xD8, /* Config5 */
 162        TxPoll          = 0xD9, /* Tell chip to check Tx descriptors for work */
 163        RxMaxSize       = 0xDA, /* Max size of an Rx packet (8169 only) */
 164        CpCmd           = 0xE0, /* C+ Command register (C+ mode only) */
 165        IntrMitigate    = 0xE2, /* rx/tx interrupt mitigation control */
 166        RxRingAddr      = 0xE4, /* 64-bit start addr of Rx ring */
 167        TxThresh        = 0xEC, /* Early Tx threshold */
 168        OldRxBufAddr    = 0x30, /* DMA address of Rx ring buffer (C mode) */
 169        OldTSD0         = 0x10, /* DMA address of first Tx desc (C mode) */
 170
 171        /* Tx and Rx status descriptors */
 172        DescOwn         = (1 << 31), /* Descriptor is owned by NIC */
 173        RingEnd         = (1 << 30), /* End of descriptor ring */
 174        FirstFrag       = (1 << 29), /* First segment of a packet */
 175        LastFrag        = (1 << 28), /* Final segment of a packet */
 176        LargeSend       = (1 << 27), /* TCP Large Send Offload (TSO) */
 177        MSSShift        = 16,        /* MSS value position */
 178        MSSMask         = 0x7ff,     /* MSS value: 11 bits */
 179        TxError         = (1 << 23), /* Tx error summary */
 180        RxError         = (1 << 20), /* Rx error summary */
 181        IPCS            = (1 << 18), /* Calculate IP checksum */
 182        UDPCS           = (1 << 17), /* Calculate UDP/IP checksum */
 183        TCPCS           = (1 << 16), /* Calculate TCP/IP checksum */
 184        TxVlanTag       = (1 << 17), /* Add VLAN tag */
 185        RxVlanTagged    = (1 << 16), /* Rx VLAN tag available */
 186        IPFail          = (1 << 15), /* IP checksum failed */
 187        UDPFail         = (1 << 14), /* UDP/IP checksum failed */
 188        TCPFail         = (1 << 13), /* TCP/IP checksum failed */
 189        NormalTxPoll    = (1 << 6),  /* One or more normal Tx packets to send */
 190        PID1            = (1 << 17), /* 2 protocol id bits:  0==non-IP, */
 191        PID0            = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
 192        RxProtoTCP      = 1,
 193        RxProtoUDP      = 2,
 194        RxProtoIP       = 3,
 195        TxFIFOUnder     = (1 << 25), /* Tx FIFO underrun */
 196        TxOWC           = (1 << 22), /* Tx Out-of-window collision */
 197        TxLinkFail      = (1 << 21), /* Link failed during Tx of packet */
 198        TxMaxCol        = (1 << 20), /* Tx aborted due to excessive collisions */
 199        TxColCntShift   = 16,        /* Shift, to get 4-bit Tx collision cnt */
 200        TxColCntMask    = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
 201        RxErrFrame      = (1 << 27), /* Rx frame alignment error */
 202        RxMcast         = (1 << 26), /* Rx multicast packet rcv'd */
 203        RxErrCRC        = (1 << 18), /* Rx CRC error */
 204        RxErrRunt       = (1 << 19), /* Rx error, packet < 64 bytes */
 205        RxErrLong       = (1 << 21), /* Rx error, packet > 4096 bytes */
 206        RxErrFIFO       = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
 207
 208        /* StatsAddr register */
 209        DumpStats       = (1 << 3),  /* Begin stats dump */
 210
 211        /* RxConfig register */
 212        RxCfgFIFOShift  = 13,        /* Shift, to get Rx FIFO thresh value */
 213        RxCfgDMAShift   = 8,         /* Shift, to get Rx Max DMA value */
 214        AcceptErr       = 0x20,      /* Accept packets with CRC errors */
 215        AcceptRunt      = 0x10,      /* Accept runt (<64 bytes) packets */
 216        AcceptBroadcast = 0x08,      /* Accept broadcast packets */
 217        AcceptMulticast = 0x04,      /* Accept multicast packets */
 218        AcceptMyPhys    = 0x02,      /* Accept pkts with our MAC as dest */
 219        AcceptAllPhys   = 0x01,      /* Accept all pkts w/ physical dest */
 220
 221        /* IntrMask / IntrStatus registers */
 222        PciErr          = (1 << 15), /* System error on the PCI bus */
 223        TimerIntr       = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
 224        LenChg          = (1 << 13), /* Cable length change */
 225        SWInt           = (1 << 8),  /* Software-requested interrupt */
 226        TxEmpty         = (1 << 7),  /* No Tx descriptors available */
 227        RxFIFOOvr       = (1 << 6),  /* Rx FIFO Overflow */
 228        LinkChg         = (1 << 5),  /* Packet underrun, or link change */
 229        RxEmpty         = (1 << 4),  /* No Rx descriptors available */
 230        TxErr           = (1 << 3),  /* Tx error */
 231        TxOK            = (1 << 2),  /* Tx packet sent */
 232        RxErr           = (1 << 1),  /* Rx error */
 233        RxOK            = (1 << 0),  /* Rx packet received */
 234        IntrResvd       = (1 << 10), /* reserved, according to RealTek engineers,
 235                                        but hardware likes to raise it */
 236
 237        IntrAll         = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
 238                          RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
 239                          RxErr | RxOK | IntrResvd,
 240
 241        /* C mode command register */
 242        CmdReset        = (1 << 4),  /* Enable to reset; self-clearing */
 243        RxOn            = (1 << 3),  /* Rx mode enable */
 244        TxOn            = (1 << 2),  /* Tx mode enable */
 245
 246        /* C+ mode command register */
 247        RxVlanOn        = (1 << 6),  /* Rx VLAN de-tagging enable */
 248        RxChkSum        = (1 << 5),  /* Rx checksum offload enable */
 249        PCIDAC          = (1 << 4),  /* PCI Dual Address Cycle (64-bit PCI) */
 250        PCIMulRW        = (1 << 3),  /* Enable PCI read/write multiple */
 251        CpRxOn          = (1 << 1),  /* Rx mode enable */
 252        CpTxOn          = (1 << 0),  /* Tx mode enable */
 253
 254        /* Cfg9436 EEPROM control register */
 255        Cfg9346_Lock    = 0x00,      /* Lock ConfigX/MII register access */
 256        Cfg9346_Unlock  = 0xC0,      /* Unlock ConfigX/MII register access */
 257
 258        /* TxConfig register */
 259        IFG             = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
 260        TxDMAShift      = 8,         /* DMA burst value (0-7) is shift this many bits */
 261
 262        /* Early Tx Threshold register */
 263        TxThreshMask    = 0x3f,      /* Mask bits 5-0 */
 264        TxThreshMax     = 2048,      /* Max early Tx threshold */
 265
 266        /* Config1 register */
 267        DriverLoaded    = (1 << 5),  /* Software marker, driver is loaded */
 268        LWACT           = (1 << 4),  /* LWAKE active mode */
 269        PMEnable        = (1 << 0),  /* Enable various PM features of chip */
 270
 271        /* Config3 register */
 272        PARMEnable      = (1 << 6),  /* Enable auto-loading of PHY parms */
 273        MagicPacket     = (1 << 5),  /* Wake up when receives a Magic Packet */
 274        LinkUp          = (1 << 4),  /* Wake up when the cable connection is re-established */
 275
 276        /* Config4 register */
 277        LWPTN           = (1 << 1),  /* LWAKE Pattern */
 278        LWPME           = (1 << 4),  /* LANWAKE vs PMEB */
 279
 280        /* Config5 register */
 281        BWF             = (1 << 6),  /* Accept Broadcast wakeup frame */
 282        MWF             = (1 << 5),  /* Accept Multicast wakeup frame */
 283        UWF             = (1 << 4),  /* Accept Unicast wakeup frame */
 284        LANWake         = (1 << 1),  /* Enable LANWake signal */
 285        PMEStatus       = (1 << 0),  /* PME status can be reset by PCI RST# */
 286
 287        cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
 288        cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
 289        cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
 290};
 291
 292static const unsigned int cp_rx_config =
 293          (RX_FIFO_THRESH << RxCfgFIFOShift) |
 294          (RX_DMA_BURST << RxCfgDMAShift);
 295
 296struct cp_desc {
 297        __le32          opts1;
 298        __le32          opts2;
 299        __le64          addr;
 300};
 301
 302struct cp_dma_stats {
 303        __le64                  tx_ok;
 304        __le64                  rx_ok;
 305        __le64                  tx_err;
 306        __le32                  rx_err;
 307        __le16                  rx_fifo;
 308        __le16                  frame_align;
 309        __le32                  tx_ok_1col;
 310        __le32                  tx_ok_mcol;
 311        __le64                  rx_ok_phys;
 312        __le64                  rx_ok_bcast;
 313        __le32                  rx_ok_mcast;
 314        __le16                  tx_abort;
 315        __le16                  tx_underrun;
 316} __packed;
 317
 318struct cp_extra_stats {
 319        unsigned long           rx_frags;
 320};
 321
 322struct cp_private {
 323        void                    __iomem *regs;
 324        struct net_device       *dev;
 325        spinlock_t              lock;
 326        u32                     msg_enable;
 327
 328        struct napi_struct      napi;
 329
 330        struct pci_dev          *pdev;
 331        u32                     rx_config;
 332        u16                     cpcmd;
 333
 334        struct cp_extra_stats   cp_stats;
 335
 336        unsigned                rx_head         ____cacheline_aligned;
 337        unsigned                rx_tail;
 338        struct cp_desc          *rx_ring;
 339        struct sk_buff          *rx_skb[CP_RX_RING_SIZE];
 340
 341        unsigned                tx_head         ____cacheline_aligned;
 342        unsigned                tx_tail;
 343        struct cp_desc          *tx_ring;
 344        struct sk_buff          *tx_skb[CP_TX_RING_SIZE];
 345        u32                     tx_opts[CP_TX_RING_SIZE];
 346
 347        unsigned                rx_buf_sz;
 348        unsigned                wol_enabled : 1; /* Is Wake-on-LAN enabled? */
 349
 350        dma_addr_t              ring_dma;
 351
 352        struct mii_if_info      mii_if;
 353};
 354
 355#define cpr8(reg)       readb(cp->regs + (reg))
 356#define cpr16(reg)      readw(cp->regs + (reg))
 357#define cpr32(reg)      readl(cp->regs + (reg))
 358#define cpw8(reg,val)   writeb((val), cp->regs + (reg))
 359#define cpw16(reg,val)  writew((val), cp->regs + (reg))
 360#define cpw32(reg,val)  writel((val), cp->regs + (reg))
 361#define cpw8_f(reg,val) do {                    \
 362        writeb((val), cp->regs + (reg));        \
 363        readb(cp->regs + (reg));                \
 364        } while (0)
 365#define cpw16_f(reg,val) do {                   \
 366        writew((val), cp->regs + (reg));        \
 367        readw(cp->regs + (reg));                \
 368        } while (0)
 369#define cpw32_f(reg,val) do {                   \
 370        writel((val), cp->regs + (reg));        \
 371        readl(cp->regs + (reg));                \
 372        } while (0)
 373
 374
 375static void __cp_set_rx_mode (struct net_device *dev);
 376static void cp_tx (struct cp_private *cp);
 377static void cp_clean_rings (struct cp_private *cp);
 378#ifdef CONFIG_NET_POLL_CONTROLLER
 379static void cp_poll_controller(struct net_device *dev);
 380#endif
 381static int cp_get_eeprom_len(struct net_device *dev);
 382static int cp_get_eeprom(struct net_device *dev,
 383                         struct ethtool_eeprom *eeprom, u8 *data);
 384static int cp_set_eeprom(struct net_device *dev,
 385                         struct ethtool_eeprom *eeprom, u8 *data);
 386
 387static struct {
 388        const char str[ETH_GSTRING_LEN];
 389} ethtool_stats_keys[] = {
 390        { "tx_ok" },
 391        { "rx_ok" },
 392        { "tx_err" },
 393        { "rx_err" },
 394        { "rx_fifo" },
 395        { "frame_align" },
 396        { "tx_ok_1col" },
 397        { "tx_ok_mcol" },
 398        { "rx_ok_phys" },
 399        { "rx_ok_bcast" },
 400        { "rx_ok_mcast" },
 401        { "tx_abort" },
 402        { "tx_underrun" },
 403        { "rx_frags" },
 404};
 405
 406
 407static inline void cp_set_rxbufsize (struct cp_private *cp)
 408{
 409        unsigned int mtu = cp->dev->mtu;
 410
 411        if (mtu > ETH_DATA_LEN)
 412                /* MTU + ethernet header + FCS + optional VLAN tag */
 413                cp->rx_buf_sz = mtu + ETH_HLEN + 8;
 414        else
 415                cp->rx_buf_sz = PKT_BUF_SZ;
 416}
 417
 418static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
 419                              struct cp_desc *desc)
 420{
 421        u32 opts2 = le32_to_cpu(desc->opts2);
 422
 423        skb->protocol = eth_type_trans (skb, cp->dev);
 424
 425        cp->dev->stats.rx_packets++;
 426        cp->dev->stats.rx_bytes += skb->len;
 427
 428        if (opts2 & RxVlanTagged)
 429                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
 430
 431        napi_gro_receive(&cp->napi, skb);
 432}
 433
 434static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
 435                            u32 status, u32 len)
 436{
 437        netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
 438                  rx_tail, status, len);
 439        cp->dev->stats.rx_errors++;
 440        if (status & RxErrFrame)
 441                cp->dev->stats.rx_frame_errors++;
 442        if (status & RxErrCRC)
 443                cp->dev->stats.rx_crc_errors++;
 444        if ((status & RxErrRunt) || (status & RxErrLong))
 445                cp->dev->stats.rx_length_errors++;
 446        if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
 447                cp->dev->stats.rx_length_errors++;
 448        if (status & RxErrFIFO)
 449                cp->dev->stats.rx_fifo_errors++;
 450}
 451
 452static inline unsigned int cp_rx_csum_ok (u32 status)
 453{
 454        unsigned int protocol = (status >> 16) & 0x3;
 455
 456        if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
 457            ((protocol == RxProtoUDP) && !(status & UDPFail)))
 458                return 1;
 459        else
 460                return 0;
 461}
 462
 463static int cp_rx_poll(struct napi_struct *napi, int budget)
 464{
 465        struct cp_private *cp = container_of(napi, struct cp_private, napi);
 466        struct net_device *dev = cp->dev;
 467        unsigned int rx_tail = cp->rx_tail;
 468        int rx;
 469
 470rx_status_loop:
 471        rx = 0;
 472        cpw16(IntrStatus, cp_rx_intr_mask);
 473
 474        while (rx < budget) {
 475                u32 status, len;
 476                dma_addr_t mapping, new_mapping;
 477                struct sk_buff *skb, *new_skb;
 478                struct cp_desc *desc;
 479                const unsigned buflen = cp->rx_buf_sz;
 480
 481                skb = cp->rx_skb[rx_tail];
 482                BUG_ON(!skb);
 483
 484                desc = &cp->rx_ring[rx_tail];
 485                status = le32_to_cpu(desc->opts1);
 486                if (status & DescOwn)
 487                        break;
 488
 489                len = (status & 0x1fff) - 4;
 490                mapping = le64_to_cpu(desc->addr);
 491
 492                if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
 493                        /* we don't support incoming fragmented frames.
 494                         * instead, we attempt to ensure that the
 495                         * pre-allocated RX skbs are properly sized such
 496                         * that RX fragments are never encountered
 497                         */
 498                        cp_rx_err_acct(cp, rx_tail, status, len);
 499                        dev->stats.rx_dropped++;
 500                        cp->cp_stats.rx_frags++;
 501                        goto rx_next;
 502                }
 503
 504                if (status & (RxError | RxErrFIFO)) {
 505                        cp_rx_err_acct(cp, rx_tail, status, len);
 506                        goto rx_next;
 507                }
 508
 509                netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
 510                          rx_tail, status, len);
 511
 512                new_skb = napi_alloc_skb(napi, buflen);
 513                if (!new_skb) {
 514                        dev->stats.rx_dropped++;
 515                        goto rx_next;
 516                }
 517
 518                new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
 519                                         PCI_DMA_FROMDEVICE);
 520                if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
 521                        dev->stats.rx_dropped++;
 522                        kfree_skb(new_skb);
 523                        goto rx_next;
 524                }
 525
 526                dma_unmap_single(&cp->pdev->dev, mapping,
 527                                 buflen, PCI_DMA_FROMDEVICE);
 528
 529                /* Handle checksum offloading for incoming packets. */
 530                if (cp_rx_csum_ok(status))
 531                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 532                else
 533                        skb_checksum_none_assert(skb);
 534
 535                skb_put(skb, len);
 536
 537                cp->rx_skb[rx_tail] = new_skb;
 538
 539                cp_rx_skb(cp, skb, desc);
 540                rx++;
 541                mapping = new_mapping;
 542
 543rx_next:
 544                cp->rx_ring[rx_tail].opts2 = 0;
 545                cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
 546                if (rx_tail == (CP_RX_RING_SIZE - 1))
 547                        desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
 548                                                  cp->rx_buf_sz);
 549                else
 550                        desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
 551                rx_tail = NEXT_RX(rx_tail);
 552        }
 553
 554        cp->rx_tail = rx_tail;
 555
 556        /* if we did not reach work limit, then we're done with
 557         * this round of polling
 558         */
 559        if (rx < budget) {
 560                unsigned long flags;
 561
 562                if (cpr16(IntrStatus) & cp_rx_intr_mask)
 563                        goto rx_status_loop;
 564
 565                napi_gro_flush(napi, false);
 566                spin_lock_irqsave(&cp->lock, flags);
 567                __napi_complete(napi);
 568                cpw16_f(IntrMask, cp_intr_mask);
 569                spin_unlock_irqrestore(&cp->lock, flags);
 570        }
 571
 572        return rx;
 573}
 574
 575static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 576{
 577        struct net_device *dev = dev_instance;
 578        struct cp_private *cp;
 579        int handled = 0;
 580        u16 status;
 581
 582        if (unlikely(dev == NULL))
 583                return IRQ_NONE;
 584        cp = netdev_priv(dev);
 585
 586        spin_lock(&cp->lock);
 587
 588        status = cpr16(IntrStatus);
 589        if (!status || (status == 0xFFFF))
 590                goto out_unlock;
 591
 592        handled = 1;
 593
 594        netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
 595                  status, cpr8(Cmd), cpr16(CpCmd));
 596
 597        cpw16(IntrStatus, status & ~cp_rx_intr_mask);
 598
 599        /* close possible race's with dev_close */
 600        if (unlikely(!netif_running(dev))) {
 601                cpw16(IntrMask, 0);
 602                goto out_unlock;
 603        }
 604
 605        if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
 606                if (napi_schedule_prep(&cp->napi)) {
 607                        cpw16_f(IntrMask, cp_norx_intr_mask);
 608                        __napi_schedule(&cp->napi);
 609                }
 610
 611        if (status & (TxOK | TxErr | TxEmpty | SWInt))
 612                cp_tx(cp);
 613        if (status & LinkChg)
 614                mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
 615
 616
 617        if (status & PciErr) {
 618                u16 pci_status;
 619
 620                pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
 621                pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
 622                netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
 623                           status, pci_status);
 624
 625                /* TODO: reset hardware */
 626        }
 627
 628out_unlock:
 629        spin_unlock(&cp->lock);
 630
 631        return IRQ_RETVAL(handled);
 632}
 633
 634#ifdef CONFIG_NET_POLL_CONTROLLER
 635/*
 636 * Polling receive - used by netconsole and other diagnostic tools
 637 * to allow network i/o with interrupts disabled.
 638 */
 639static void cp_poll_controller(struct net_device *dev)
 640{
 641        struct cp_private *cp = netdev_priv(dev);
 642        const int irq = cp->pdev->irq;
 643
 644        disable_irq(irq);
 645        cp_interrupt(irq, dev);
 646        enable_irq(irq);
 647}
 648#endif
 649
 650static void cp_tx (struct cp_private *cp)
 651{
 652        unsigned tx_head = cp->tx_head;
 653        unsigned tx_tail = cp->tx_tail;
 654        unsigned bytes_compl = 0, pkts_compl = 0;
 655
 656        while (tx_tail != tx_head) {
 657                struct cp_desc *txd = cp->tx_ring + tx_tail;
 658                struct sk_buff *skb;
 659                u32 status;
 660
 661                rmb();
 662                status = le32_to_cpu(txd->opts1);
 663                if (status & DescOwn)
 664                        break;
 665
 666                skb = cp->tx_skb[tx_tail];
 667                BUG_ON(!skb);
 668
 669                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 670                                 cp->tx_opts[tx_tail] & 0xffff,
 671                                 PCI_DMA_TODEVICE);
 672
 673                if (status & LastFrag) {
 674                        if (status & (TxError | TxFIFOUnder)) {
 675                                netif_dbg(cp, tx_err, cp->dev,
 676                                          "tx err, status 0x%x\n", status);
 677                                cp->dev->stats.tx_errors++;
 678                                if (status & TxOWC)
 679                                        cp->dev->stats.tx_window_errors++;
 680                                if (status & TxMaxCol)
 681                                        cp->dev->stats.tx_aborted_errors++;
 682                                if (status & TxLinkFail)
 683                                        cp->dev->stats.tx_carrier_errors++;
 684                                if (status & TxFIFOUnder)
 685                                        cp->dev->stats.tx_fifo_errors++;
 686                        } else {
 687                                cp->dev->stats.collisions +=
 688                                        ((status >> TxColCntShift) & TxColCntMask);
 689                                cp->dev->stats.tx_packets++;
 690                                cp->dev->stats.tx_bytes += skb->len;
 691                                netif_dbg(cp, tx_done, cp->dev,
 692                                          "tx done, slot %d\n", tx_tail);
 693                        }
 694                        bytes_compl += skb->len;
 695                        pkts_compl++;
 696                        dev_kfree_skb_irq(skb);
 697                }
 698
 699                cp->tx_skb[tx_tail] = NULL;
 700
 701                tx_tail = NEXT_TX(tx_tail);
 702        }
 703
 704        cp->tx_tail = tx_tail;
 705
 706        netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
 707        if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
 708                netif_wake_queue(cp->dev);
 709}
 710
 711static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
 712{
 713        return skb_vlan_tag_present(skb) ?
 714                TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
 715}
 716
 717static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
 718                                   int first, int entry_last)
 719{
 720        int frag, index;
 721        struct cp_desc *txd;
 722        skb_frag_t *this_frag;
 723        for (frag = 0; frag+first < entry_last; frag++) {
 724                index = first+frag;
 725                cp->tx_skb[index] = NULL;
 726                txd = &cp->tx_ring[index];
 727                this_frag = &skb_shinfo(skb)->frags[frag];
 728                dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
 729                                 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
 730        }
 731}
 732
 733static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
 734                                        struct net_device *dev)
 735{
 736        struct cp_private *cp = netdev_priv(dev);
 737        unsigned entry;
 738        u32 eor, opts1;
 739        unsigned long intr_flags;
 740        __le32 opts2;
 741        int mss = 0;
 742
 743        spin_lock_irqsave(&cp->lock, intr_flags);
 744
 745        /* This is a hard error, log it. */
 746        if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
 747                netif_stop_queue(dev);
 748                spin_unlock_irqrestore(&cp->lock, intr_flags);
 749                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 750                return NETDEV_TX_BUSY;
 751        }
 752
 753        entry = cp->tx_head;
 754        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 755        mss = skb_shinfo(skb)->gso_size;
 756
 757        if (mss > MSSMask) {
 758                WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
 759                          mss);
 760                goto out_dma_error;
 761        }
 762
 763        opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
 764        opts1 = DescOwn;
 765        if (mss)
 766                opts1 |= LargeSend | (mss << MSSShift);
 767        else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 768                const struct iphdr *ip = ip_hdr(skb);
 769                if (ip->protocol == IPPROTO_TCP)
 770                        opts1 |= IPCS | TCPCS;
 771                else if (ip->protocol == IPPROTO_UDP)
 772                        opts1 |= IPCS | UDPCS;
 773                else {
 774                        WARN_ONCE(1,
 775                                  "Net bug: asked to checksum invalid Legacy IP packet\n");
 776                        goto out_dma_error;
 777                }
 778        }
 779
 780        if (skb_shinfo(skb)->nr_frags == 0) {
 781                struct cp_desc *txd = &cp->tx_ring[entry];
 782                u32 len;
 783                dma_addr_t mapping;
 784
 785                len = skb->len;
 786                mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
 787                if (dma_mapping_error(&cp->pdev->dev, mapping))
 788                        goto out_dma_error;
 789
 790                txd->opts2 = opts2;
 791                txd->addr = cpu_to_le64(mapping);
 792                wmb();
 793
 794                opts1 |= eor | len | FirstFrag | LastFrag;
 795
 796                txd->opts1 = cpu_to_le32(opts1);
 797                wmb();
 798
 799                cp->tx_skb[entry] = skb;
 800                cp->tx_opts[entry] = opts1;
 801                netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
 802                          entry, skb->len);
 803        } else {
 804                struct cp_desc *txd;
 805                u32 first_len, first_eor, ctrl;
 806                dma_addr_t first_mapping;
 807                int frag, first_entry = entry;
 808
 809                /* We must give this initial chunk to the device last.
 810                 * Otherwise we could race with the device.
 811                 */
 812                first_eor = eor;
 813                first_len = skb_headlen(skb);
 814                first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
 815                                               first_len, PCI_DMA_TODEVICE);
 816                if (dma_mapping_error(&cp->pdev->dev, first_mapping))
 817                        goto out_dma_error;
 818
 819                cp->tx_skb[entry] = skb;
 820
 821                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
 822                        const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
 823                        u32 len;
 824                        dma_addr_t mapping;
 825
 826                        entry = NEXT_TX(entry);
 827
 828                        len = skb_frag_size(this_frag);
 829                        mapping = dma_map_single(&cp->pdev->dev,
 830                                                 skb_frag_address(this_frag),
 831                                                 len, PCI_DMA_TODEVICE);
 832                        if (dma_mapping_error(&cp->pdev->dev, mapping)) {
 833                                unwind_tx_frag_mapping(cp, skb, first_entry, entry);
 834                                goto out_dma_error;
 835                        }
 836
 837                        eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
 838
 839                        ctrl = opts1 | eor | len;
 840
 841                        if (frag == skb_shinfo(skb)->nr_frags - 1)
 842                                ctrl |= LastFrag;
 843
 844                        txd = &cp->tx_ring[entry];
 845                        txd->opts2 = opts2;
 846                        txd->addr = cpu_to_le64(mapping);
 847                        wmb();
 848
 849                        txd->opts1 = cpu_to_le32(ctrl);
 850                        wmb();
 851
 852                        cp->tx_opts[entry] = ctrl;
 853                        cp->tx_skb[entry] = skb;
 854                }
 855
 856                txd = &cp->tx_ring[first_entry];
 857                txd->opts2 = opts2;
 858                txd->addr = cpu_to_le64(first_mapping);
 859                wmb();
 860
 861                ctrl = opts1 | first_eor | first_len | FirstFrag;
 862                txd->opts1 = cpu_to_le32(ctrl);
 863                wmb();
 864
 865                cp->tx_opts[first_entry] = ctrl;
 866                netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
 867                          first_entry, entry, skb->len);
 868        }
 869        cp->tx_head = NEXT_TX(entry);
 870
 871        netdev_sent_queue(dev, skb->len);
 872        if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
 873                netif_stop_queue(dev);
 874
 875out_unlock:
 876        spin_unlock_irqrestore(&cp->lock, intr_flags);
 877
 878        cpw8(TxPoll, NormalTxPoll);
 879
 880        return NETDEV_TX_OK;
 881out_dma_error:
 882        dev_kfree_skb_any(skb);
 883        cp->dev->stats.tx_dropped++;
 884        goto out_unlock;
 885}
 886
 887/* Set or clear the multicast filter for this adaptor.
 888   This routine is not state sensitive and need not be SMP locked. */
 889
 890static void __cp_set_rx_mode (struct net_device *dev)
 891{
 892        struct cp_private *cp = netdev_priv(dev);
 893        u32 mc_filter[2];       /* Multicast hash filter */
 894        int rx_mode;
 895
 896        /* Note: do not reorder, GCC is clever about common statements. */
 897        if (dev->flags & IFF_PROMISC) {
 898                /* Unconditionally log net taps. */
 899                rx_mode =
 900                    AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
 901                    AcceptAllPhys;
 902                mc_filter[1] = mc_filter[0] = 0xffffffff;
 903        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
 904                   (dev->flags & IFF_ALLMULTI)) {
 905                /* Too many to filter perfectly -- accept all multicasts. */
 906                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
 907                mc_filter[1] = mc_filter[0] = 0xffffffff;
 908        } else {
 909                struct netdev_hw_addr *ha;
 910                rx_mode = AcceptBroadcast | AcceptMyPhys;
 911                mc_filter[1] = mc_filter[0] = 0;
 912                netdev_for_each_mc_addr(ha, dev) {
 913                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
 914
 915                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
 916                        rx_mode |= AcceptMulticast;
 917                }
 918        }
 919
 920        /* We can safely update without stopping the chip. */
 921        cp->rx_config = cp_rx_config | rx_mode;
 922        cpw32_f(RxConfig, cp->rx_config);
 923
 924        cpw32_f (MAR0 + 0, mc_filter[0]);
 925        cpw32_f (MAR0 + 4, mc_filter[1]);
 926}
 927
 928static void cp_set_rx_mode (struct net_device *dev)
 929{
 930        unsigned long flags;
 931        struct cp_private *cp = netdev_priv(dev);
 932
 933        spin_lock_irqsave (&cp->lock, flags);
 934        __cp_set_rx_mode(dev);
 935        spin_unlock_irqrestore (&cp->lock, flags);
 936}
 937
 938static void __cp_get_stats(struct cp_private *cp)
 939{
 940        /* only lower 24 bits valid; write any value to clear */
 941        cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
 942        cpw32 (RxMissed, 0);
 943}
 944
 945static struct net_device_stats *cp_get_stats(struct net_device *dev)
 946{
 947        struct cp_private *cp = netdev_priv(dev);
 948        unsigned long flags;
 949
 950        /* The chip only need report frame silently dropped. */
 951        spin_lock_irqsave(&cp->lock, flags);
 952        if (netif_running(dev) && netif_device_present(dev))
 953                __cp_get_stats(cp);
 954        spin_unlock_irqrestore(&cp->lock, flags);
 955
 956        return &dev->stats;
 957}
 958
 959static void cp_stop_hw (struct cp_private *cp)
 960{
 961        cpw16(IntrStatus, ~(cpr16(IntrStatus)));
 962        cpw16_f(IntrMask, 0);
 963        cpw8(Cmd, 0);
 964        cpw16_f(CpCmd, 0);
 965        cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
 966
 967        cp->rx_tail = 0;
 968        cp->tx_head = cp->tx_tail = 0;
 969
 970        netdev_reset_queue(cp->dev);
 971}
 972
 973static void cp_reset_hw (struct cp_private *cp)
 974{
 975        unsigned work = 1000;
 976
 977        cpw8(Cmd, CmdReset);
 978
 979        while (work--) {
 980                if (!(cpr8(Cmd) & CmdReset))
 981                        return;
 982
 983                schedule_timeout_uninterruptible(10);
 984        }
 985
 986        netdev_err(cp->dev, "hardware reset timeout\n");
 987}
 988
 989static inline void cp_start_hw (struct cp_private *cp)
 990{
 991        dma_addr_t ring_dma;
 992
 993        cpw16(CpCmd, cp->cpcmd);
 994
 995        /*
 996         * These (at least TxRingAddr) need to be configured after the
 997         * corresponding bits in CpCmd are enabled. Datasheet v1.6 \xC2\xA76.33
 998         * (C+ Command Register) recommends that these and more be configured
 999         * *after* the [RT]xEnable bits in CpCmd are set. And on some hardware
1000         * it's been observed that the TxRingAddr is actually reset to garbage
1001         * when C+ mode Tx is enabled in CpCmd.
1002         */
1003        cpw32_f(HiTxRingAddr, 0);
1004        cpw32_f(HiTxRingAddr + 4, 0);
1005
1006        ring_dma = cp->ring_dma;
1007        cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1008        cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1009
1010        ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1011        cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1012        cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1013
1014        /*
1015         * Strictly speaking, the datasheet says this should be enabled
1016         * *before* setting the descriptor addresses. But what, then, would
1017         * prevent it from doing DMA to random unconfigured addresses?
1018         * This variant appears to work fine.
1019         */
1020        cpw8(Cmd, RxOn | TxOn);
1021
1022        netdev_reset_queue(cp->dev);
1023}
1024
1025static void cp_enable_irq(struct cp_private *cp)
1026{
1027        cpw16_f(IntrMask, cp_intr_mask);
1028}
1029
1030static void cp_init_hw (struct cp_private *cp)
1031{
1032        struct net_device *dev = cp->dev;
1033
1034        cp_reset_hw(cp);
1035
1036        cpw8_f (Cfg9346, Cfg9346_Unlock);
1037
1038        /* Restore our idea of the MAC address. */
1039        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1040        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1041
1042        cp_start_hw(cp);
1043        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
1044
1045        __cp_set_rx_mode(dev);
1046        cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1047
1048        cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1049        /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
1050        cpw8(Config3, PARMEnable);
1051        cp->wol_enabled = 0;
1052
1053        cpw8(Config5, cpr8(Config5) & PMEStatus);
1054
1055        cpw16(MultiIntr, 0);
1056
1057        cpw8_f(Cfg9346, Cfg9346_Lock);
1058}
1059
1060static int cp_refill_rx(struct cp_private *cp)
1061{
1062        struct net_device *dev = cp->dev;
1063        unsigned i;
1064
1065        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1066                struct sk_buff *skb;
1067                dma_addr_t mapping;
1068
1069                skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1070                if (!skb)
1071                        goto err_out;
1072
1073                mapping = dma_map_single(&cp->pdev->dev, skb->data,
1074                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1075                if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1076                        kfree_skb(skb);
1077                        goto err_out;
1078                }
1079                cp->rx_skb[i] = skb;
1080
1081                cp->rx_ring[i].opts2 = 0;
1082                cp->rx_ring[i].addr = cpu_to_le64(mapping);
1083                if (i == (CP_RX_RING_SIZE - 1))
1084                        cp->rx_ring[i].opts1 =
1085                                cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1086                else
1087                        cp->rx_ring[i].opts1 =
1088                                cpu_to_le32(DescOwn | cp->rx_buf_sz);
1089        }
1090
1091        return 0;
1092
1093err_out:
1094        cp_clean_rings(cp);
1095        return -ENOMEM;
1096}
1097
1098static void cp_init_rings_index (struct cp_private *cp)
1099{
1100        cp->rx_tail = 0;
1101        cp->tx_head = cp->tx_tail = 0;
1102}
1103
1104static int cp_init_rings (struct cp_private *cp)
1105{
1106        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1107        cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1108        memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1109
1110        cp_init_rings_index(cp);
1111
1112        return cp_refill_rx (cp);
1113}
1114
1115static int cp_alloc_rings (struct cp_private *cp)
1116{
1117        struct device *d = &cp->pdev->dev;
1118        void *mem;
1119        int rc;
1120
1121        mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1122        if (!mem)
1123                return -ENOMEM;
1124
1125        cp->rx_ring = mem;
1126        cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1127
1128        rc = cp_init_rings(cp);
1129        if (rc < 0)
1130                dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1131
1132        return rc;
1133}
1134
1135static void cp_clean_rings (struct cp_private *cp)
1136{
1137        struct cp_desc *desc;
1138        unsigned i;
1139
1140        for (i = 0; i < CP_RX_RING_SIZE; i++) {
1141                if (cp->rx_skb[i]) {
1142                        desc = cp->rx_ring + i;
1143                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1144                                         cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1145                        dev_kfree_skb_any(cp->rx_skb[i]);
1146                }
1147        }
1148
1149        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1150                if (cp->tx_skb[i]) {
1151                        struct sk_buff *skb = cp->tx_skb[i];
1152
1153                        desc = cp->tx_ring + i;
1154                        dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1155                                         le32_to_cpu(desc->opts1) & 0xffff,
1156                                         PCI_DMA_TODEVICE);
1157                        if (le32_to_cpu(desc->opts1) & LastFrag)
1158                                dev_kfree_skb_any(skb);
1159                        cp->dev->stats.tx_dropped++;
1160                }
1161        }
1162        netdev_reset_queue(cp->dev);
1163
1164        memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1165        memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1166        memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1167
1168        memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1169        memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1170}
1171
1172static void cp_free_rings (struct cp_private *cp)
1173{
1174        cp_clean_rings(cp);
1175        dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1176                          cp->ring_dma);
1177        cp->rx_ring = NULL;
1178        cp->tx_ring = NULL;
1179}
1180
1181static int cp_open (struct net_device *dev)
1182{
1183        struct cp_private *cp = netdev_priv(dev);
1184        const int irq = cp->pdev->irq;
1185        int rc;
1186
1187        netif_dbg(cp, ifup, dev, "enabling interface\n");
1188
1189        rc = cp_alloc_rings(cp);
1190        if (rc)
1191                return rc;
1192
1193        napi_enable(&cp->napi);
1194
1195        cp_init_hw(cp);
1196
1197        rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1198        if (rc)
1199                goto err_out_hw;
1200
1201        cp_enable_irq(cp);
1202
1203        netif_carrier_off(dev);
1204        mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1205        netif_start_queue(dev);
1206
1207        return 0;
1208
1209err_out_hw:
1210        napi_disable(&cp->napi);
1211        cp_stop_hw(cp);
1212        cp_free_rings(cp);
1213        return rc;
1214}
1215
1216static int cp_close (struct net_device *dev)
1217{
1218        struct cp_private *cp = netdev_priv(dev);
1219        unsigned long flags;
1220
1221        napi_disable(&cp->napi);
1222
1223        netif_dbg(cp, ifdown, dev, "disabling interface\n");
1224
1225        spin_lock_irqsave(&cp->lock, flags);
1226
1227        netif_stop_queue(dev);
1228        netif_carrier_off(dev);
1229
1230        cp_stop_hw(cp);
1231
1232        spin_unlock_irqrestore(&cp->lock, flags);
1233
1234        free_irq(cp->pdev->irq, dev);
1235
1236        cp_free_rings(cp);
1237        return 0;
1238}
1239
1240static void cp_tx_timeout(struct net_device *dev)
1241{
1242        struct cp_private *cp = netdev_priv(dev);
1243        unsigned long flags;
1244        int rc, i;
1245
1246        netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1247                    cpr8(Cmd), cpr16(CpCmd),
1248                    cpr16(IntrStatus), cpr16(IntrMask));
1249
1250        spin_lock_irqsave(&cp->lock, flags);
1251
1252        netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1253                  cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1254        for (i = 0; i < CP_TX_RING_SIZE; i++) {
1255                netif_dbg(cp, tx_err, cp->dev,
1256                          "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1257                          i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1258                          cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1259                          le64_to_cpu(cp->tx_ring[i].addr),
1260                          cp->tx_skb[i]);
1261        }
1262
1263        cp_stop_hw(cp);
1264        cp_clean_rings(cp);
1265        rc = cp_init_rings(cp);
1266        cp_start_hw(cp);
1267        __cp_set_rx_mode(dev);
1268        cpw16_f(IntrMask, cp_norx_intr_mask);
1269
1270        netif_wake_queue(dev);
1271        napi_schedule_irqoff(&cp->napi);
1272
1273        spin_unlock_irqrestore(&cp->lock, flags);
1274}
1275
1276static int cp_change_mtu(struct net_device *dev, int new_mtu)
1277{
1278        struct cp_private *cp = netdev_priv(dev);
1279
1280        /* check for invalid MTU, according to hardware limits */
1281        if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1282                return -EINVAL;
1283
1284        /* if network interface not up, no need for complexity */
1285        if (!netif_running(dev)) {
1286                dev->mtu = new_mtu;
1287                cp_set_rxbufsize(cp);   /* set new rx buf size */
1288                return 0;
1289        }
1290
1291        /* network IS up, close it, reset MTU, and come up again. */
1292        cp_close(dev);
1293        dev->mtu = new_mtu;
1294        cp_set_rxbufsize(cp);
1295        return cp_open(dev);
1296}
1297
1298static const char mii_2_8139_map[8] = {
1299        BasicModeCtrl,
1300        BasicModeStatus,
1301        0,
1302        0,
1303        NWayAdvert,
1304        NWayLPAR,
1305        NWayExpansion,
1306        0
1307};
1308
1309static int mdio_read(struct net_device *dev, int phy_id, int location)
1310{
1311        struct cp_private *cp = netdev_priv(dev);
1312
1313        return location < 8 && mii_2_8139_map[location] ?
1314               readw(cp->regs + mii_2_8139_map[location]) : 0;
1315}
1316
1317
1318static void mdio_write(struct net_device *dev, int phy_id, int location,
1319                       int value)
1320{
1321        struct cp_private *cp = netdev_priv(dev);
1322
1323        if (location == 0) {
1324                cpw8(Cfg9346, Cfg9346_Unlock);
1325                cpw16(BasicModeCtrl, value);
1326                cpw8(Cfg9346, Cfg9346_Lock);
1327        } else if (location < 8 && mii_2_8139_map[location])
1328                cpw16(mii_2_8139_map[location], value);
1329}
1330
1331/* Set the ethtool Wake-on-LAN settings */
1332static int netdev_set_wol (struct cp_private *cp,
1333                           const struct ethtool_wolinfo *wol)
1334{
1335        u8 options;
1336
1337        options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1338        /* If WOL is being disabled, no need for complexity */
1339        if (wol->wolopts) {
1340                if (wol->wolopts & WAKE_PHY)    options |= LinkUp;
1341                if (wol->wolopts & WAKE_MAGIC)  options |= MagicPacket;
1342        }
1343
1344        cpw8 (Cfg9346, Cfg9346_Unlock);
1345        cpw8 (Config3, options);
1346        cpw8 (Cfg9346, Cfg9346_Lock);
1347
1348        options = 0; /* Paranoia setting */
1349        options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1350        /* If WOL is being disabled, no need for complexity */
1351        if (wol->wolopts) {
1352                if (wol->wolopts & WAKE_UCAST)  options |= UWF;
1353                if (wol->wolopts & WAKE_BCAST)  options |= BWF;
1354                if (wol->wolopts & WAKE_MCAST)  options |= MWF;
1355        }
1356
1357        cpw8 (Config5, options);
1358
1359        cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1360
1361        return 0;
1362}
1363
1364/* Get the ethtool Wake-on-LAN settings */
1365static void netdev_get_wol (struct cp_private *cp,
1366                     struct ethtool_wolinfo *wol)
1367{
1368        u8 options;
1369
1370        wol->wolopts   = 0; /* Start from scratch */
1371        wol->supported = WAKE_PHY   | WAKE_BCAST | WAKE_MAGIC |
1372                         WAKE_MCAST | WAKE_UCAST;
1373        /* We don't need to go on if WOL is disabled */
1374        if (!cp->wol_enabled) return;
1375
1376        options        = cpr8 (Config3);
1377        if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
1378        if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
1379
1380        options        = 0; /* Paranoia setting */
1381        options        = cpr8 (Config5);
1382        if (options & UWF)           wol->wolopts |= WAKE_UCAST;
1383        if (options & BWF)           wol->wolopts |= WAKE_BCAST;
1384        if (options & MWF)           wol->wolopts |= WAKE_MCAST;
1385}
1386
1387static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1388{
1389        struct cp_private *cp = netdev_priv(dev);
1390
1391        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1392        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1393        strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1394}
1395
1396static void cp_get_ringparam(struct net_device *dev,
1397                                struct ethtool_ringparam *ring)
1398{
1399        ring->rx_max_pending = CP_RX_RING_SIZE;
1400        ring->tx_max_pending = CP_TX_RING_SIZE;
1401        ring->rx_pending = CP_RX_RING_SIZE;
1402        ring->tx_pending = CP_TX_RING_SIZE;
1403}
1404
1405static int cp_get_regs_len(struct net_device *dev)
1406{
1407        return CP_REGS_SIZE;
1408}
1409
1410static int cp_get_sset_count (struct net_device *dev, int sset)
1411{
1412        switch (sset) {
1413        case ETH_SS_STATS:
1414                return CP_NUM_STATS;
1415        default:
1416                return -EOPNOTSUPP;
1417        }
1418}
1419
1420static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1421{
1422        struct cp_private *cp = netdev_priv(dev);
1423        int rc;
1424        unsigned long flags;
1425
1426        spin_lock_irqsave(&cp->lock, flags);
1427        rc = mii_ethtool_gset(&cp->mii_if, cmd);
1428        spin_unlock_irqrestore(&cp->lock, flags);
1429
1430        return rc;
1431}
1432
1433static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1434{
1435        struct cp_private *cp = netdev_priv(dev);
1436        int rc;
1437        unsigned long flags;
1438
1439        spin_lock_irqsave(&cp->lock, flags);
1440        rc = mii_ethtool_sset(&cp->mii_if, cmd);
1441        spin_unlock_irqrestore(&cp->lock, flags);
1442
1443        return rc;
1444}
1445
1446static int cp_nway_reset(struct net_device *dev)
1447{
1448        struct cp_private *cp = netdev_priv(dev);
1449        return mii_nway_restart(&cp->mii_if);
1450}
1451
1452static u32 cp_get_msglevel(struct net_device *dev)
1453{
1454        struct cp_private *cp = netdev_priv(dev);
1455        return cp->msg_enable;
1456}
1457
1458static void cp_set_msglevel(struct net_device *dev, u32 value)
1459{
1460        struct cp_private *cp = netdev_priv(dev);
1461        cp->msg_enable = value;
1462}
1463
1464static int cp_set_features(struct net_device *dev, netdev_features_t features)
1465{
1466        struct cp_private *cp = netdev_priv(dev);
1467        unsigned long flags;
1468
1469        if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1470                return 0;
1471
1472        spin_lock_irqsave(&cp->lock, flags);
1473
1474        if (features & NETIF_F_RXCSUM)
1475                cp->cpcmd |= RxChkSum;
1476        else
1477                cp->cpcmd &= ~RxChkSum;
1478
1479        if (features & NETIF_F_HW_VLAN_CTAG_RX)
1480                cp->cpcmd |= RxVlanOn;
1481        else
1482                cp->cpcmd &= ~RxVlanOn;
1483
1484        cpw16_f(CpCmd, cp->cpcmd);
1485        spin_unlock_irqrestore(&cp->lock, flags);
1486
1487        return 0;
1488}
1489
1490static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1491                        void *p)
1492{
1493        struct cp_private *cp = netdev_priv(dev);
1494        unsigned long flags;
1495
1496        if (regs->len < CP_REGS_SIZE)
1497                return /* -EINVAL */;
1498
1499        regs->version = CP_REGS_VER;
1500
1501        spin_lock_irqsave(&cp->lock, flags);
1502        memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1503        spin_unlock_irqrestore(&cp->lock, flags);
1504}
1505
1506static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1507{
1508        struct cp_private *cp = netdev_priv(dev);
1509        unsigned long flags;
1510
1511        spin_lock_irqsave (&cp->lock, flags);
1512        netdev_get_wol (cp, wol);
1513        spin_unlock_irqrestore (&cp->lock, flags);
1514}
1515
1516static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1517{
1518        struct cp_private *cp = netdev_priv(dev);
1519        unsigned long flags;
1520        int rc;
1521
1522        spin_lock_irqsave (&cp->lock, flags);
1523        rc = netdev_set_wol (cp, wol);
1524        spin_unlock_irqrestore (&cp->lock, flags);
1525
1526        return rc;
1527}
1528
1529static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1530{
1531        switch (stringset) {
1532        case ETH_SS_STATS:
1533                memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
1534                break;
1535        default:
1536                BUG();
1537                break;
1538        }
1539}
1540
1541static void cp_get_ethtool_stats (struct net_device *dev,
1542                                  struct ethtool_stats *estats, u64 *tmp_stats)
1543{
1544        struct cp_private *cp = netdev_priv(dev);
1545        struct cp_dma_stats *nic_stats;
1546        dma_addr_t dma;
1547        int i;
1548
1549        nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1550                                       &dma, GFP_KERNEL);
1551        if (!nic_stats)
1552                return;
1553
1554        /* begin NIC statistics dump */
1555        cpw32(StatsAddr + 4, (u64)dma >> 32);
1556        cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1557        cpr32(StatsAddr);
1558
1559        for (i = 0; i < 1000; i++) {
1560                if ((cpr32(StatsAddr) & DumpStats) == 0)
1561                        break;
1562                udelay(10);
1563        }
1564        cpw32(StatsAddr, 0);
1565        cpw32(StatsAddr + 4, 0);
1566        cpr32(StatsAddr);
1567
1568        i = 0;
1569        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1570        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1571        tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1572        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1573        tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1574        tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1575        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1576        tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1577        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1578        tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1579        tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1580        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1581        tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1582        tmp_stats[i++] = cp->cp_stats.rx_frags;
1583        BUG_ON(i != CP_NUM_STATS);
1584
1585        dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1586}
1587
1588static const struct ethtool_ops cp_ethtool_ops = {
1589        .get_drvinfo            = cp_get_drvinfo,
1590        .get_regs_len           = cp_get_regs_len,
1591        .get_sset_count         = cp_get_sset_count,
1592        .get_settings           = cp_get_settings,
1593        .set_settings           = cp_set_settings,
1594        .nway_reset             = cp_nway_reset,
1595        .get_link               = ethtool_op_get_link,
1596        .get_msglevel           = cp_get_msglevel,
1597        .set_msglevel           = cp_set_msglevel,
1598        .get_regs               = cp_get_regs,
1599        .get_wol                = cp_get_wol,
1600        .set_wol                = cp_set_wol,
1601        .get_strings            = cp_get_strings,
1602        .get_ethtool_stats      = cp_get_ethtool_stats,
1603        .get_eeprom_len         = cp_get_eeprom_len,
1604        .get_eeprom             = cp_get_eeprom,
1605        .set_eeprom             = cp_set_eeprom,
1606        .get_ringparam          = cp_get_ringparam,
1607};
1608
1609static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1610{
1611        struct cp_private *cp = netdev_priv(dev);
1612        int rc;
1613        unsigned long flags;
1614
1615        if (!netif_running(dev))
1616                return -EINVAL;
1617
1618        spin_lock_irqsave(&cp->lock, flags);
1619        rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1620        spin_unlock_irqrestore(&cp->lock, flags);
1621        return rc;
1622}
1623
1624static int cp_set_mac_address(struct net_device *dev, void *p)
1625{
1626        struct cp_private *cp = netdev_priv(dev);
1627        struct sockaddr *addr = p;
1628
1629        if (!is_valid_ether_addr(addr->sa_data))
1630                return -EADDRNOTAVAIL;
1631
1632        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1633
1634        spin_lock_irq(&cp->lock);
1635
1636        cpw8_f(Cfg9346, Cfg9346_Unlock);
1637        cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1638        cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1639        cpw8_f(Cfg9346, Cfg9346_Lock);
1640
1641        spin_unlock_irq(&cp->lock);
1642
1643        return 0;
1644}
1645
1646/* Serial EEPROM section. */
1647
1648/*  EEPROM_Ctrl bits. */
1649#define EE_SHIFT_CLK    0x04    /* EEPROM shift clock. */
1650#define EE_CS                   0x08    /* EEPROM chip select. */
1651#define EE_DATA_WRITE   0x02    /* EEPROM chip data in. */
1652#define EE_WRITE_0              0x00
1653#define EE_WRITE_1              0x02
1654#define EE_DATA_READ    0x01    /* EEPROM chip data out. */
1655#define EE_ENB                  (0x80 | EE_CS)
1656
1657/* Delay between EEPROM clock transitions.
1658   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1659 */
1660
1661#define eeprom_delay()  readb(ee_addr)
1662
1663/* The EEPROM commands include the alway-set leading bit. */
1664#define EE_EXTEND_CMD   (4)
1665#define EE_WRITE_CMD    (5)
1666#define EE_READ_CMD             (6)
1667#define EE_ERASE_CMD    (7)
1668
1669#define EE_EWDS_ADDR    (0)
1670#define EE_WRAL_ADDR    (1)
1671#define EE_ERAL_ADDR    (2)
1672#define EE_EWEN_ADDR    (3)
1673
1674#define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1675
1676static void eeprom_cmd_start(void __iomem *ee_addr)
1677{
1678        writeb (EE_ENB & ~EE_CS, ee_addr);
1679        writeb (EE_ENB, ee_addr);
1680        eeprom_delay ();
1681}
1682
1683static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1684{
1685        int i;
1686
1687        /* Shift the command bits out. */
1688        for (i = cmd_len - 1; i >= 0; i--) {
1689                int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1690                writeb (EE_ENB | dataval, ee_addr);
1691                eeprom_delay ();
1692                writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1693                eeprom_delay ();
1694        }
1695        writeb (EE_ENB, ee_addr);
1696        eeprom_delay ();
1697}
1698
1699static void eeprom_cmd_end(void __iomem *ee_addr)
1700{
1701        writeb(0, ee_addr);
1702        eeprom_delay ();
1703}
1704
1705static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1706                              int addr_len)
1707{
1708        int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1709
1710        eeprom_cmd_start(ee_addr);
1711        eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1712        eeprom_cmd_end(ee_addr);
1713}
1714
1715static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1716{
1717        int i;
1718        u16 retval = 0;
1719        void __iomem *ee_addr = ioaddr + Cfg9346;
1720        int read_cmd = location | (EE_READ_CMD << addr_len);
1721
1722        eeprom_cmd_start(ee_addr);
1723        eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1724
1725        for (i = 16; i > 0; i--) {
1726                writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1727                eeprom_delay ();
1728                retval =
1729                    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1730                                     0);
1731                writeb (EE_ENB, ee_addr);
1732                eeprom_delay ();
1733        }
1734
1735        eeprom_cmd_end(ee_addr);
1736
1737        return retval;
1738}
1739
1740static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1741                         int addr_len)
1742{
1743        int i;
1744        void __iomem *ee_addr = ioaddr + Cfg9346;
1745        int write_cmd = location | (EE_WRITE_CMD << addr_len);
1746
1747        eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1748
1749        eeprom_cmd_start(ee_addr);
1750        eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1751        eeprom_cmd(ee_addr, val, 16);
1752        eeprom_cmd_end(ee_addr);
1753
1754        eeprom_cmd_start(ee_addr);
1755        for (i = 0; i < 20000; i++)
1756                if (readb(ee_addr) & EE_DATA_READ)
1757                        break;
1758        eeprom_cmd_end(ee_addr);
1759
1760        eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1761}
1762
1763static int cp_get_eeprom_len(struct net_device *dev)
1764{
1765        struct cp_private *cp = netdev_priv(dev);
1766        int size;
1767
1768        spin_lock_irq(&cp->lock);
1769        size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1770        spin_unlock_irq(&cp->lock);
1771
1772        return size;
1773}
1774
1775static int cp_get_eeprom(struct net_device *dev,
1776                         struct ethtool_eeprom *eeprom, u8 *data)
1777{
1778        struct cp_private *cp = netdev_priv(dev);
1779        unsigned int addr_len;
1780        u16 val;
1781        u32 offset = eeprom->offset >> 1;
1782        u32 len = eeprom->len;
1783        u32 i = 0;
1784
1785        eeprom->magic = CP_EEPROM_MAGIC;
1786
1787        spin_lock_irq(&cp->lock);
1788
1789        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1790
1791        if (eeprom->offset & 1) {
1792                val = read_eeprom(cp->regs, offset, addr_len);
1793                data[i++] = (u8)(val >> 8);
1794                offset++;
1795        }
1796
1797        while (i < len - 1) {
1798                val = read_eeprom(cp->regs, offset, addr_len);
1799                data[i++] = (u8)val;
1800                data[i++] = (u8)(val >> 8);
1801                offset++;
1802        }
1803
1804        if (i < len) {
1805                val = read_eeprom(cp->regs, offset, addr_len);
1806                data[i] = (u8)val;
1807        }
1808
1809        spin_unlock_irq(&cp->lock);
1810        return 0;
1811}
1812
1813static int cp_set_eeprom(struct net_device *dev,
1814                         struct ethtool_eeprom *eeprom, u8 *data)
1815{
1816        struct cp_private *cp = netdev_priv(dev);
1817        unsigned int addr_len;
1818        u16 val;
1819        u32 offset = eeprom->offset >> 1;
1820        u32 len = eeprom->len;
1821        u32 i = 0;
1822
1823        if (eeprom->magic != CP_EEPROM_MAGIC)
1824                return -EINVAL;
1825
1826        spin_lock_irq(&cp->lock);
1827
1828        addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1829
1830        if (eeprom->offset & 1) {
1831                val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1832                val |= (u16)data[i++] << 8;
1833                write_eeprom(cp->regs, offset, val, addr_len);
1834                offset++;
1835        }
1836
1837        while (i < len - 1) {
1838                val = (u16)data[i++];
1839                val |= (u16)data[i++] << 8;
1840                write_eeprom(cp->regs, offset, val, addr_len);
1841                offset++;
1842        }
1843
1844        if (i < len) {
1845                val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1846                val |= (u16)data[i];
1847                write_eeprom(cp->regs, offset, val, addr_len);
1848        }
1849
1850        spin_unlock_irq(&cp->lock);
1851        return 0;
1852}
1853
1854/* Put the board into D3cold state and wait for WakeUp signal */
1855static void cp_set_d3_state (struct cp_private *cp)
1856{
1857        pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */
1858        pci_set_power_state (cp->pdev, PCI_D3hot);
1859}
1860
1861static netdev_features_t cp_features_check(struct sk_buff *skb,
1862                                           struct net_device *dev,
1863                                           netdev_features_t features)
1864{
1865        if (skb_shinfo(skb)->gso_size > MSSMask)
1866                features &= ~NETIF_F_TSO;
1867
1868        return vlan_features_check(skb, features);
1869}
1870static const struct net_device_ops cp_netdev_ops = {
1871        .ndo_open               = cp_open,
1872        .ndo_stop               = cp_close,
1873        .ndo_validate_addr      = eth_validate_addr,
1874        .ndo_set_mac_address    = cp_set_mac_address,
1875        .ndo_set_rx_mode        = cp_set_rx_mode,
1876        .ndo_get_stats          = cp_get_stats,
1877        .ndo_do_ioctl           = cp_ioctl,
1878        .ndo_start_xmit         = cp_start_xmit,
1879        .ndo_tx_timeout         = cp_tx_timeout,
1880        .ndo_set_features       = cp_set_features,
1881        .ndo_change_mtu         = cp_change_mtu,
1882        .ndo_features_check     = cp_features_check,
1883
1884#ifdef CONFIG_NET_POLL_CONTROLLER
1885        .ndo_poll_controller    = cp_poll_controller,
1886#endif
1887};
1888
1889static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1890{
1891        struct net_device *dev;
1892        struct cp_private *cp;
1893        int rc;
1894        void __iomem *regs;
1895        resource_size_t pciaddr;
1896        unsigned int addr_len, i, pci_using_dac;
1897
1898        pr_info_once("%s", version);
1899
1900        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1901            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1902                dev_info(&pdev->dev,
1903                         "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1904                         pdev->vendor, pdev->device, pdev->revision);
1905                return -ENODEV;
1906        }
1907
1908        dev = alloc_etherdev(sizeof(struct cp_private));
1909        if (!dev)
1910                return -ENOMEM;
1911        SET_NETDEV_DEV(dev, &pdev->dev);
1912
1913        cp = netdev_priv(dev);
1914        cp->pdev = pdev;
1915        cp->dev = dev;
1916        cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1917        spin_lock_init (&cp->lock);
1918        cp->mii_if.dev = dev;
1919        cp->mii_if.mdio_read = mdio_read;
1920        cp->mii_if.mdio_write = mdio_write;
1921        cp->mii_if.phy_id = CP_INTERNAL_PHY;
1922        cp->mii_if.phy_id_mask = 0x1f;
1923        cp->mii_if.reg_num_mask = 0x1f;
1924        cp_set_rxbufsize(cp);
1925
1926        rc = pci_enable_device(pdev);
1927        if (rc)
1928                goto err_out_free;
1929
1930        rc = pci_set_mwi(pdev);
1931        if (rc)
1932                goto err_out_disable;
1933
1934        rc = pci_request_regions(pdev, DRV_NAME);
1935        if (rc)
1936                goto err_out_mwi;
1937
1938        pciaddr = pci_resource_start(pdev, 1);
1939        if (!pciaddr) {
1940                rc = -EIO;
1941                dev_err(&pdev->dev, "no MMIO resource\n");
1942                goto err_out_res;
1943        }
1944        if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1945                rc = -EIO;
1946                dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1947                       (unsigned long long)pci_resource_len(pdev, 1));
1948                goto err_out_res;
1949        }
1950
1951        /* Configure DMA attributes. */
1952        if ((sizeof(dma_addr_t) > 4) &&
1953            !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1954            !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1955                pci_using_dac = 1;
1956        } else {
1957                pci_using_dac = 0;
1958
1959                rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1960                if (rc) {
1961                        dev_err(&pdev->dev,
1962                                "No usable DMA configuration, aborting\n");
1963                        goto err_out_res;
1964                }
1965                rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1966                if (rc) {
1967                        dev_err(&pdev->dev,
1968                                "No usable consistent DMA configuration, aborting\n");
1969                        goto err_out_res;
1970                }
1971        }
1972
1973        cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1974                    PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1975
1976        dev->features |= NETIF_F_RXCSUM;
1977        dev->hw_features |= NETIF_F_RXCSUM;
1978
1979        regs = ioremap(pciaddr, CP_REGS_SIZE);
1980        if (!regs) {
1981                rc = -EIO;
1982                dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1983                        (unsigned long long)pci_resource_len(pdev, 1),
1984                       (unsigned long long)pciaddr);
1985                goto err_out_res;
1986        }
1987        cp->regs = regs;
1988
1989        cp_stop_hw(cp);
1990
1991        /* read MAC address from EEPROM */
1992        addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1993        for (i = 0; i < 3; i++)
1994                ((__le16 *) (dev->dev_addr))[i] =
1995                    cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1996
1997        dev->netdev_ops = &cp_netdev_ops;
1998        netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1999        dev->ethtool_ops = &cp_ethtool_ops;
2000        dev->watchdog_timeo = TX_TIMEOUT;
2001
2002        dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2003                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2004
2005        if (pci_using_dac)
2006                dev->features |= NETIF_F_HIGHDMA;
2007
2008        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2009                NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2010        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2011                NETIF_F_HIGHDMA;
2012
2013        rc = register_netdev(dev);
2014        if (rc)
2015                goto err_out_iomap;
2016
2017        netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2018                    regs, dev->dev_addr, pdev->irq);
2019
2020        pci_set_drvdata(pdev, dev);
2021
2022        /* enable busmastering and memory-write-invalidate */
2023        pci_set_master(pdev);
2024
2025        if (cp->wol_enabled)
2026                cp_set_d3_state (cp);
2027
2028        return 0;
2029
2030err_out_iomap:
2031        iounmap(regs);
2032err_out_res:
2033        pci_release_regions(pdev);
2034err_out_mwi:
2035        pci_clear_mwi(pdev);
2036err_out_disable:
2037        pci_disable_device(pdev);
2038err_out_free:
2039        free_netdev(dev);
2040        return rc;
2041}
2042
2043static void cp_remove_one (struct pci_dev *pdev)
2044{
2045        struct net_device *dev = pci_get_drvdata(pdev);
2046        struct cp_private *cp = netdev_priv(dev);
2047
2048        unregister_netdev(dev);
2049        iounmap(cp->regs);
2050        if (cp->wol_enabled)
2051                pci_set_power_state (pdev, PCI_D0);
2052        pci_release_regions(pdev);
2053        pci_clear_mwi(pdev);
2054        pci_disable_device(pdev);
2055        free_netdev(dev);
2056}
2057
2058#ifdef CONFIG_PM
2059static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2060{
2061        struct net_device *dev = pci_get_drvdata(pdev);
2062        struct cp_private *cp = netdev_priv(dev);
2063        unsigned long flags;
2064
2065        if (!netif_running(dev))
2066                return 0;
2067
2068        netif_device_detach (dev);
2069        netif_stop_queue (dev);
2070
2071        spin_lock_irqsave (&cp->lock, flags);
2072
2073        /* Disable Rx and Tx */
2074        cpw16 (IntrMask, 0);
2075        cpw8  (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2076
2077        spin_unlock_irqrestore (&cp->lock, flags);
2078
2079        pci_save_state(pdev);
2080        pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2081        pci_set_power_state(pdev, pci_choose_state(pdev, state));
2082
2083        return 0;
2084}
2085
2086static int cp_resume (struct pci_dev *pdev)
2087{
2088        struct net_device *dev = pci_get_drvdata (pdev);
2089        struct cp_private *cp = netdev_priv(dev);
2090        unsigned long flags;
2091
2092        if (!netif_running(dev))
2093                return 0;
2094
2095        netif_device_attach (dev);
2096
2097        pci_set_power_state(pdev, PCI_D0);
2098        pci_restore_state(pdev);
2099        pci_enable_wake(pdev, PCI_D0, 0);
2100
2101        /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2102        cp_init_rings_index (cp);
2103        cp_init_hw (cp);
2104        cp_enable_irq(cp);
2105        netif_start_queue (dev);
2106
2107        spin_lock_irqsave (&cp->lock, flags);
2108
2109        mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2110
2111        spin_unlock_irqrestore (&cp->lock, flags);
2112
2113        return 0;
2114}
2115#endif /* CONFIG_PM */
2116
2117static const struct pci_device_id cp_pci_tbl[] = {
2118        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
2119        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
2120        { },
2121};
2122MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2123
2124static struct pci_driver cp_driver = {
2125        .name         = DRV_NAME,
2126        .id_table     = cp_pci_tbl,
2127        .probe        = cp_init_one,
2128        .remove       = cp_remove_one,
2129#ifdef CONFIG_PM
2130        .resume       = cp_resume,
2131        .suspend      = cp_suspend,
2132#endif
2133};
2134
2135module_pci_driver(cp_driver);
2136
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.