linux/drivers/net/ethernet/via/via-rhine.c
<<
>>
Prefs
   1/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
   2/*
   3        Written 1998-2001 by Donald Becker.
   4
   5        Current Maintainer: Roger Luethi <rl@hellgate.ch>
   6
   7        This software may be used and distributed according to the terms of
   8        the GNU General Public License (GPL), incorporated herein by reference.
   9        Drivers based on or derived from this code fall under the GPL and must
  10        retain the authorship, copyright and license notice.  This file is not
  11        a complete program and may only be used when the entire operating
  12        system is licensed under the GPL.
  13
  14        This driver is designed for the VIA VT86C100A Rhine-I.
  15        It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
  16        and management NIC 6105M).
  17
  18        The author may be reached as becker@scyld.com, or C/O
  19        Scyld Computing Corporation
  20        410 Severn Ave., Suite 210
  21        Annapolis MD 21403
  22
  23
  24        This driver contains some changes from the original Donald Becker
  25        version. He may or may not be interested in bug reports on this
  26        code. You can find his versions at:
  27        http://www.scyld.com/network/via-rhine.html
  28        [link no longer provides useful info -jgarzik]
  29
  30*/
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#define DRV_NAME        "via-rhine"
  35#define DRV_VERSION     "1.5.0"
  36#define DRV_RELDATE     "2010-10-09"
  37
  38
  39/* A few user-configurable values.
  40   These may be modified when a driver module is loaded. */
  41
  42#define DEBUG
  43static int debug = 1;   /* 1 normal messages, 0 quiet .. 7 verbose. */
  44static int max_interrupt_work = 20;
  45
  46/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  47   Setting to > 1518 effectively disables this feature. */
  48#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  49        defined(CONFIG_SPARC) || defined(__ia64__) ||              \
  50        defined(__sh__) || defined(__mips__)
  51static int rx_copybreak = 1518;
  52#else
  53static int rx_copybreak;
  54#endif
  55
  56/* Work-around for broken BIOSes: they are unable to get the chip back out of
  57   power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
  58static int avoid_D3;
  59
  60/*
  61 * In case you are looking for 'options[]' or 'full_duplex[]', they
  62 * are gone. Use ethtool(8) instead.
  63 */
  64
  65/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  66   The Rhine has a 64 element 8390-like hash table. */
  67static const int multicast_filter_limit = 32;
  68
  69
  70/* Operational parameters that are set at compile time. */
  71
  72/* Keep the ring sizes a power of two for compile efficiency.
  73   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  74   Making the Tx ring too large decreases the effectiveness of channel
  75   bonding and packet priority.
  76   There are no ill effects from too-large receive rings. */
  77#define TX_RING_SIZE    16
  78#define TX_QUEUE_LEN    10      /* Limit ring entries actually used. */
  79#define RX_RING_SIZE    64
  80
  81/* Operational parameters that usually are not changed. */
  82
  83/* Time in jiffies before concluding the transmitter is hung. */
  84#define TX_TIMEOUT      (2*HZ)
  85
  86#define PKT_BUF_SZ      1536    /* Size of each temporary Rx buffer.*/
  87
  88#include <linux/module.h>
  89#include <linux/moduleparam.h>
  90#include <linux/kernel.h>
  91#include <linux/string.h>
  92#include <linux/timer.h>
  93#include <linux/errno.h>
  94#include <linux/ioport.h>
  95#include <linux/interrupt.h>
  96#include <linux/pci.h>
  97#include <linux/dma-mapping.h>
  98#include <linux/netdevice.h>
  99#include <linux/etherdevice.h>
 100#include <linux/skbuff.h>
 101#include <linux/init.h>
 102#include <linux/delay.h>
 103#include <linux/mii.h>
 104#include <linux/ethtool.h>
 105#include <linux/crc32.h>
 106#include <linux/if_vlan.h>
 107#include <linux/bitops.h>
 108#include <linux/workqueue.h>
 109#include <asm/processor.h>      /* Processor type for cache alignment. */
 110#include <asm/io.h>
 111#include <asm/irq.h>
 112#include <asm/uaccess.h>
 113#include <linux/dmi.h>
 114
 115/* These identify the driver base version and may not be removed. */
 116static const char version[] __devinitconst =
 117        "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
 118
 119/* This driver was written to use PCI memory space. Some early versions
 120   of the Rhine may only work correctly with I/O space accesses. */
 121#ifdef CONFIG_VIA_RHINE_MMIO
 122#define USE_MMIO
 123#else
 124#endif
 125
 126MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 127MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
 128MODULE_LICENSE("GPL");
 129
 130module_param(max_interrupt_work, int, 0);
 131module_param(debug, int, 0);
 132module_param(rx_copybreak, int, 0);
 133module_param(avoid_D3, bool, 0);
 134MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
 135MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
 136MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 137MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 138
 139#define MCAM_SIZE       32
 140#define VCAM_SIZE       32
 141
 142/*
 143                Theory of Operation
 144
 145I. Board Compatibility
 146
 147This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
 148controller.
 149
 150II. Board-specific settings
 151
 152Boards with this chip are functional only in a bus-master PCI slot.
 153
 154Many operational settings are loaded from the EEPROM to the Config word at
 155offset 0x78. For most of these settings, this driver assumes that they are
 156correct.
 157If this driver is compiled to use PCI memory space operations the EEPROM
 158must be configured to enable memory ops.
 159
 160III. Driver operation
 161
 162IIIa. Ring buffers
 163
 164This driver uses two statically allocated fixed-size descriptor lists
 165formed into rings by a branch from the final descriptor to the beginning of
 166the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
 167
 168IIIb/c. Transmit/Receive Structure
 169
 170This driver attempts to use a zero-copy receive and transmit scheme.
 171
 172Alas, all data buffers are required to start on a 32 bit boundary, so
 173the driver must often copy transmit packets into bounce buffers.
 174
 175The driver allocates full frame size skbuffs for the Rx ring buffers at
 176open() time and passes the skb->data field to the chip as receive data
 177buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
 178a fresh skbuff is allocated and the frame is copied to the new skbuff.
 179When the incoming frame is larger, the skbuff is passed directly up the
 180protocol stack. Buffers consumed this way are replaced by newly allocated
 181skbuffs in the last phase of rhine_rx().
 182
 183The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 184using a full-sized skbuff for small frames vs. the copying costs of larger
 185frames. New boards are typically used in generously configured machines
 186and the underfilled buffers have negligible impact compared to the benefit of
 187a single allocation size, so the default value of zero results in never
 188copying packets. When copying is done, the cost is usually mitigated by using
 189a combined copy/checksum routine. Copying also preloads the cache, which is
 190most useful with small frames.
 191
 192Since the VIA chips are only able to transfer data to buffers on 32 bit
 193boundaries, the IP header at offset 14 in an ethernet frame isn't
 194longword aligned for further processing. Copying these unaligned buffers
 195has the beneficial effect of 16-byte aligning the IP header.
 196
 197IIId. Synchronization
 198
 199The driver runs as two independent, single-threaded flows of control. One
 200is the send-packet routine, which enforces single-threaded use by the
 201netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
 202which is single threaded by the hardware and interrupt handling software.
 203
 204The send packet thread has partial control over the Tx ring. It locks the
 205netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
 206the ring is not available it stops the transmit queue by
 207calling netif_stop_queue.
 208
 209The interrupt handler has exclusive control over the Rx ring and records stats
 210from the Tx ring. After reaping the stats, it marks the Tx queue entry as
 211empty by incrementing the dirty_tx mark. If at least half of the entries in
 212the Rx ring are available the transmit queue is woken up if it was stopped.
 213
 214IV. Notes
 215
 216IVb. References
 217
 218Preliminary VT86C100A manual from http://www.via.com.tw/
 219http://www.scyld.com/expert/100mbps.html
 220http://www.scyld.com/expert/NWay.html
 221ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
 222ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
 223
 224
 225IVc. Errata
 226
 227The VT86C100A manual is not reliable information.
 228The 3043 chip does not handle unaligned transmit or receive buffers, resulting
 229in significant performance degradation for bounce buffer copies on transmit
 230and unaligned IP headers on receive.
 231The chip does not pad to minimum transmit length.
 232
 233*/
 234
 235
 236/* This table drives the PCI probe routines. It's mostly boilerplate in all
 237   of the drivers, and will likely be provided by some future kernel.
 238   Note the matching code -- the first table entry matchs all 56** cards but
 239   second only the 1234 card.
 240*/
 241
 242enum rhine_revs {
 243        VT86C100A       = 0x00,
 244        VTunknown0      = 0x20,
 245        VT6102          = 0x40,
 246        VT8231          = 0x50, /* Integrated MAC */
 247        VT8233          = 0x60, /* Integrated MAC */
 248        VT8235          = 0x74, /* Integrated MAC */
 249        VT8237          = 0x78, /* Integrated MAC */
 250        VTunknown1      = 0x7C,
 251        VT6105          = 0x80,
 252        VT6105_B0       = 0x83,
 253        VT6105L         = 0x8A,
 254        VT6107          = 0x8C,
 255        VTunknown2      = 0x8E,
 256        VT6105M         = 0x90, /* Management adapter */
 257};
 258
 259enum rhine_quirks {
 260        rqWOL           = 0x0001,       /* Wake-On-LAN support */
 261        rqForceReset    = 0x0002,
 262        rq6patterns     = 0x0040,       /* 6 instead of 4 patterns for WOL */
 263        rqStatusWBRace  = 0x0080,       /* Tx Status Writeback Error possible */
 264        rqRhineI        = 0x0100,       /* See comment below */
 265};
 266/*
 267 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
 268 * MMIO as well as for the collision counter and the Tx FIFO underflow
 269 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
 270 */
 271
 272/* Beware of PCI posted writes */
 273#define IOSYNC  do { ioread8(ioaddr + StationAddr); } while (0)
 274
 275static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
 276        { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },    /* VT86C100A */
 277        { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6102 */
 278        { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },    /* 6105{,L,LOM} */
 279        { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },    /* VT6105M */
 280        { }     /* terminate list */
 281};
 282MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
 283
 284
 285/* Offsets to the device registers. */
 286enum register_offsets {
 287        StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
 288        ChipCmd1=0x09, TQWake=0x0A,
 289        IntrStatus=0x0C, IntrEnable=0x0E,
 290        MulticastFilter0=0x10, MulticastFilter1=0x14,
 291        RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
 292        MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 293        MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 294        ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 295        RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 296        StickyHW=0x83, IntrStatus2=0x84,
 297        CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 298        WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 299        WOLcrClr1=0xA6, WOLcgClr=0xA7,
 300        PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
 301};
 302
 303/* Bits in ConfigD */
 304enum backoff_bits {
 305        BackOptional=0x01, BackModify=0x02,
 306        BackCaptureEffect=0x04, BackRandom=0x08
 307};
 308
 309/* Bits in the TxConfig (TCR) register */
 310enum tcr_bits {
 311        TCR_PQEN=0x01,
 312        TCR_LB0=0x02,           /* loopback[0] */
 313        TCR_LB1=0x04,           /* loopback[1] */
 314        TCR_OFSET=0x08,
 315        TCR_RTGOPT=0x10,
 316        TCR_RTFT0=0x20,
 317        TCR_RTFT1=0x40,
 318        TCR_RTSF=0x80,
 319};
 320
 321/* Bits in the CamCon (CAMC) register */
 322enum camcon_bits {
 323        CAMC_CAMEN=0x01,
 324        CAMC_VCAMSL=0x02,
 325        CAMC_CAMWR=0x04,
 326        CAMC_CAMRD=0x08,
 327};
 328
 329/* Bits in the PCIBusConfig1 (BCR1) register */
 330enum bcr1_bits {
 331        BCR1_POT0=0x01,
 332        BCR1_POT1=0x02,
 333        BCR1_POT2=0x04,
 334        BCR1_CTFT0=0x08,
 335        BCR1_CTFT1=0x10,
 336        BCR1_CTSF=0x20,
 337        BCR1_TXQNOBK=0x40,      /* for VT6105 */
 338        BCR1_VIDFR=0x80,        /* for VT6105 */
 339        BCR1_MED0=0x40,         /* for VT6102 */
 340        BCR1_MED1=0x80,         /* for VT6102 */
 341};
 342
 343#ifdef USE_MMIO
 344/* Registers we check that mmio and reg are the same. */
 345static const int mmio_verify_registers[] = {
 346        RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
 347        0
 348};
 349#endif
 350
 351/* Bits in the interrupt status/mask registers. */
 352enum intr_status_bits {
 353        IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
 354        IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
 355        IntrPCIErr=0x0040,
 356        IntrStatsMax=0x0080, IntrRxEarly=0x0100,
 357        IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
 358        IntrTxAborted=0x2000, IntrLinkChange=0x4000,
 359        IntrRxWakeUp=0x8000,
 360        IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
 361        IntrTxDescRace=0x080000,        /* mapped from IntrStatus2 */
 362        IntrTxErrSummary=0x082218,
 363};
 364
 365/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
 366enum wol_bits {
 367        WOLucast        = 0x10,
 368        WOLmagic        = 0x20,
 369        WOLbmcast       = 0x30,
 370        WOLlnkon        = 0x40,
 371        WOLlnkoff       = 0x80,
 372};
 373
 374/* The Rx and Tx buffer descriptors. */
 375struct rx_desc {
 376        __le32 rx_status;
 377        __le32 desc_length; /* Chain flag, Buffer/frame length */
 378        __le32 addr;
 379        __le32 next_desc;
 380};
 381struct tx_desc {
 382        __le32 tx_status;
 383        __le32 desc_length; /* Chain flag, Tx Config, Frame length */
 384        __le32 addr;
 385        __le32 next_desc;
 386};
 387
 388/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
 389#define TXDESC          0x00e08000
 390
 391enum rx_status_bits {
 392        RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
 393};
 394
 395/* Bits in *_desc.*_status */
 396enum desc_status_bits {
 397        DescOwn=0x80000000
 398};
 399
 400/* Bits in *_desc.*_length */
 401enum desc_length_bits {
 402        DescTag=0x00010000
 403};
 404
 405/* Bits in ChipCmd. */
 406enum chip_cmd_bits {
 407        CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
 408        CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
 409        Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
 410        Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
 411};
 412
 413struct rhine_private {
 414        /* Bit mask for configured VLAN ids */
 415        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 416
 417        /* Descriptor rings */
 418        struct rx_desc *rx_ring;
 419        struct tx_desc *tx_ring;
 420        dma_addr_t rx_ring_dma;
 421        dma_addr_t tx_ring_dma;
 422
 423        /* The addresses of receive-in-place skbuffs. */
 424        struct sk_buff *rx_skbuff[RX_RING_SIZE];
 425        dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
 426
 427        /* The saved address of a sent-in-place packet/buffer, for later free(). */
 428        struct sk_buff *tx_skbuff[TX_RING_SIZE];
 429        dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
 430
 431        /* Tx bounce buffers (Rhine-I only) */
 432        unsigned char *tx_buf[TX_RING_SIZE];
 433        unsigned char *tx_bufs;
 434        dma_addr_t tx_bufs_dma;
 435
 436        struct pci_dev *pdev;
 437        long pioaddr;
 438        struct net_device *dev;
 439        struct napi_struct napi;
 440        spinlock_t lock;
 441        struct work_struct reset_task;
 442
 443        /* Frequently used values: keep some adjacent for cache effect. */
 444        u32 quirks;
 445        struct rx_desc *rx_head_desc;
 446        unsigned int cur_rx, dirty_rx;  /* Producer/consumer ring indices */
 447        unsigned int cur_tx, dirty_tx;
 448        unsigned int rx_buf_sz;         /* Based on MTU+slack. */
 449        u8 wolopts;
 450
 451        u8 tx_thresh, rx_thresh;
 452
 453        struct mii_if_info mii_if;
 454        void __iomem *base;
 455};
 456
 457#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
 458#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
 459#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
 460
 461#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
 462#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
 463#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
 464
 465#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
 466#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
 467#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
 468
 469#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
 470#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
 471#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
 472
 473
 474static int  mdio_read(struct net_device *dev, int phy_id, int location);
 475static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 476static int  rhine_open(struct net_device *dev);
 477static void rhine_reset_task(struct work_struct *work);
 478static void rhine_tx_timeout(struct net_device *dev);
 479static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
 480                                  struct net_device *dev);
 481static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 482static void rhine_tx(struct net_device *dev);
 483static int rhine_rx(struct net_device *dev, int limit);
 484static void rhine_error(struct net_device *dev, int intr_status);
 485static void rhine_set_rx_mode(struct net_device *dev);
 486static struct net_device_stats *rhine_get_stats(struct net_device *dev);
 487static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 488static const struct ethtool_ops netdev_ethtool_ops;
 489static int  rhine_close(struct net_device *dev);
 490static void rhine_shutdown (struct pci_dev *pdev);
 491static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
 492static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
 493static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
 494static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
 495static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
 496static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
 497static void rhine_init_cam_filter(struct net_device *dev);
 498static void rhine_update_vcam(struct net_device *dev);
 499
 500#define RHINE_WAIT_FOR(condition)                               \
 501do {                                                            \
 502        int i = 1024;                                           \
 503        while (!(condition) && --i)                             \
 504                ;                                               \
 505        if (debug > 1 && i < 512)                               \
 506                pr_info("%4d cycles used @ %s:%d\n",            \
 507                        1024 - i, __func__, __LINE__);          \
 508} while (0)
 509
 510static inline u32 get_intr_status(struct net_device *dev)
 511{
 512        struct rhine_private *rp = netdev_priv(dev);
 513        void __iomem *ioaddr = rp->base;
 514        u32 intr_status;
 515
 516        intr_status = ioread16(ioaddr + IntrStatus);
 517        /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
 518        if (rp->quirks & rqStatusWBRace)
 519                intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
 520        return intr_status;
 521}
 522
 523/*
 524 * Get power related registers into sane state.
 525 * Notify user about past WOL event.
 526 */
 527static void rhine_power_init(struct net_device *dev)
 528{
 529        struct rhine_private *rp = netdev_priv(dev);
 530        void __iomem *ioaddr = rp->base;
 531        u16 wolstat;
 532
 533        if (rp->quirks & rqWOL) {
 534                /* Make sure chip is in power state D0 */
 535                iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
 536
 537                /* Disable "force PME-enable" */
 538                iowrite8(0x80, ioaddr + WOLcgClr);
 539
 540                /* Clear power-event config bits (WOL) */
 541                iowrite8(0xFF, ioaddr + WOLcrClr);
 542                /* More recent cards can manage two additional patterns */
 543                if (rp->quirks & rq6patterns)
 544                        iowrite8(0x03, ioaddr + WOLcrClr1);
 545
 546                /* Save power-event status bits */
 547                wolstat = ioread8(ioaddr + PwrcsrSet);
 548                if (rp->quirks & rq6patterns)
 549                        wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
 550
 551                /* Clear power-event status bits */
 552                iowrite8(0xFF, ioaddr + PwrcsrClr);
 553                if (rp->quirks & rq6patterns)
 554                        iowrite8(0x03, ioaddr + PwrcsrClr1);
 555
 556                if (wolstat) {
 557                        char *reason;
 558                        switch (wolstat) {
 559                        case WOLmagic:
 560                                reason = "Magic packet";
 561                                break;
 562                        case WOLlnkon:
 563                                reason = "Link went up";
 564                                break;
 565                        case WOLlnkoff:
 566                                reason = "Link went down";
 567                                break;
 568                        case WOLucast:
 569                                reason = "Unicast packet";
 570                                break;
 571                        case WOLbmcast:
 572                                reason = "Multicast/broadcast packet";
 573                                break;
 574                        default:
 575                                reason = "Unknown";
 576                        }
 577                        netdev_info(dev, "Woke system up. Reason: %s\n",
 578                                    reason);
 579                }
 580        }
 581}
 582
 583static void rhine_chip_reset(struct net_device *dev)
 584{
 585        struct rhine_private *rp = netdev_priv(dev);
 586        void __iomem *ioaddr = rp->base;
 587
 588        iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
 589        IOSYNC;
 590
 591        if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
 592                netdev_info(dev, "Reset not complete yet. Trying harder.\n");
 593
 594                /* Force reset */
 595                if (rp->quirks & rqForceReset)
 596                        iowrite8(0x40, ioaddr + MiscCmd);
 597
 598                /* Reset can take somewhat longer (rare) */
 599                RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
 600        }
 601
 602        if (debug > 1)
 603                netdev_info(dev, "Reset %s\n",
 604                            (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
 605                            "failed" : "succeeded");
 606}
 607
 608#ifdef USE_MMIO
 609static void enable_mmio(long pioaddr, u32 quirks)
 610{
 611        int n;
 612        if (quirks & rqRhineI) {
 613                /* More recent docs say that this bit is reserved ... */
 614                n = inb(pioaddr + ConfigA) | 0x20;
 615                outb(n, pioaddr + ConfigA);
 616        } else {
 617                n = inb(pioaddr + ConfigD) | 0x80;
 618                outb(n, pioaddr + ConfigD);
 619        }
 620}
 621#endif
 622
 623/*
 624 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
 625 * (plus 0x6C for Rhine-I/II)
 626 */
 627static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
 628{
 629        struct rhine_private *rp = netdev_priv(dev);
 630        void __iomem *ioaddr = rp->base;
 631
 632        outb(0x20, pioaddr + MACRegEEcsr);
 633        RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
 634
 635#ifdef USE_MMIO
 636        /*
 637         * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
 638         * MMIO. If reloading EEPROM was done first this could be avoided, but
 639         * it is not known if that still works with the "win98-reboot" problem.
 640         */
 641        enable_mmio(pioaddr, rp->quirks);
 642#endif
 643
 644        /* Turn off EEPROM-controlled wake-up (magic packet) */
 645        if (rp->quirks & rqWOL)
 646                iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
 647
 648}
 649
 650#ifdef CONFIG_NET_POLL_CONTROLLER
 651static void rhine_poll(struct net_device *dev)
 652{
 653        disable_irq(dev->irq);
 654        rhine_interrupt(dev->irq, (void *)dev);
 655        enable_irq(dev->irq);
 656}
 657#endif
 658
 659static int rhine_napipoll(struct napi_struct *napi, int budget)
 660{
 661        struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
 662        struct net_device *dev = rp->dev;
 663        void __iomem *ioaddr = rp->base;
 664        int work_done;
 665
 666        work_done = rhine_rx(dev, budget);
 667
 668        if (work_done < budget) {
 669                napi_complete(napi);
 670
 671                iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
 672                          IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
 673                          IntrTxDone | IntrTxError | IntrTxUnderrun |
 674                          IntrPCIErr | IntrStatsMax | IntrLinkChange,
 675                          ioaddr + IntrEnable);
 676        }
 677        return work_done;
 678}
 679
 680static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
 681{
 682        struct rhine_private *rp = netdev_priv(dev);
 683
 684        /* Reset the chip to erase previous misconfiguration. */
 685        rhine_chip_reset(dev);
 686
 687        /* Rhine-I needs extra time to recuperate before EEPROM reload */
 688        if (rp->quirks & rqRhineI)
 689                msleep(5);
 690
 691        /* Reload EEPROM controlled bytes cleared by soft reset */
 692        rhine_reload_eeprom(pioaddr, dev);
 693}
 694
 695static const struct net_device_ops rhine_netdev_ops = {
 696        .ndo_open                = rhine_open,
 697        .ndo_stop                = rhine_close,
 698        .ndo_start_xmit          = rhine_start_tx,
 699        .ndo_get_stats           = rhine_get_stats,
 700        .ndo_set_rx_mode         = rhine_set_rx_mode,
 701        .ndo_change_mtu          = eth_change_mtu,
 702        .ndo_validate_addr       = eth_validate_addr,
 703        .ndo_set_mac_address     = eth_mac_addr,
 704        .ndo_do_ioctl            = netdev_ioctl,
 705        .ndo_tx_timeout          = rhine_tx_timeout,
 706        .ndo_vlan_rx_add_vid     = rhine_vlan_rx_add_vid,
 707        .ndo_vlan_rx_kill_vid    = rhine_vlan_rx_kill_vid,
 708#ifdef CONFIG_NET_POLL_CONTROLLER
 709        .ndo_poll_controller     = rhine_poll,
 710#endif
 711};
 712
 713static int __devinit rhine_init_one(struct pci_dev *pdev,
 714                                    const struct pci_device_id *ent)
 715{
 716        struct net_device *dev;
 717        struct rhine_private *rp;
 718        int i, rc;
 719        u32 quirks;
 720        long pioaddr;
 721        long memaddr;
 722        void __iomem *ioaddr;
 723        int io_size, phy_id;
 724        const char *name;
 725#ifdef USE_MMIO
 726        int bar = 1;
 727#else
 728        int bar = 0;
 729#endif
 730
 731/* when built into the kernel, we only print version if device is found */
 732#ifndef MODULE
 733        pr_info_once("%s\n", version);
 734#endif
 735
 736        io_size = 256;
 737        phy_id = 0;
 738        quirks = 0;
 739        name = "Rhine";
 740        if (pdev->revision < VTunknown0) {
 741                quirks = rqRhineI;
 742                io_size = 128;
 743        }
 744        else if (pdev->revision >= VT6102) {
 745                quirks = rqWOL | rqForceReset;
 746                if (pdev->revision < VT6105) {
 747                        name = "Rhine II";
 748                        quirks |= rqStatusWBRace;       /* Rhine-II exclusive */
 749                }
 750                else {
 751                        phy_id = 1;     /* Integrated PHY, phy_id fixed to 1 */
 752                        if (pdev->revision >= VT6105_B0)
 753                                quirks |= rq6patterns;
 754                        if (pdev->revision < VT6105M)
 755                                name = "Rhine III";
 756                        else
 757                                name = "Rhine III (Management Adapter)";
 758                }
 759        }
 760
 761        rc = pci_enable_device(pdev);
 762        if (rc)
 763                goto err_out;
 764
 765        /* this should always be supported */
 766        rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 767        if (rc) {
 768                dev_err(&pdev->dev,
 769                        "32-bit PCI DMA addresses not supported by the card!?\n");
 770                goto err_out;
 771        }
 772
 773        /* sanity check */
 774        if ((pci_resource_len(pdev, 0) < io_size) ||
 775            (pci_resource_len(pdev, 1) < io_size)) {
 776                rc = -EIO;
 777                dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
 778                goto err_out;
 779        }
 780
 781        pioaddr = pci_resource_start(pdev, 0);
 782        memaddr = pci_resource_start(pdev, 1);
 783
 784        pci_set_master(pdev);
 785
 786        dev = alloc_etherdev(sizeof(struct rhine_private));
 787        if (!dev) {
 788                rc = -ENOMEM;
 789                dev_err(&pdev->dev, "alloc_etherdev failed\n");
 790                goto err_out;
 791        }
 792        SET_NETDEV_DEV(dev, &pdev->dev);
 793
 794        rp = netdev_priv(dev);
 795        rp->dev = dev;
 796        rp->quirks = quirks;
 797        rp->pioaddr = pioaddr;
 798        rp->pdev = pdev;
 799
 800        rc = pci_request_regions(pdev, DRV_NAME);
 801        if (rc)
 802                goto err_out_free_netdev;
 803
 804        ioaddr = pci_iomap(pdev, bar, io_size);
 805        if (!ioaddr) {
 806                rc = -EIO;
 807                dev_err(&pdev->dev,
 808                        "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
 809                        pci_name(pdev), io_size, memaddr);
 810                goto err_out_free_res;
 811        }
 812
 813#ifdef USE_MMIO
 814        enable_mmio(pioaddr, quirks);
 815
 816        /* Check that selected MMIO registers match the PIO ones */
 817        i = 0;
 818        while (mmio_verify_registers[i]) {
 819                int reg = mmio_verify_registers[i++];
 820                unsigned char a = inb(pioaddr+reg);
 821                unsigned char b = readb(ioaddr+reg);
 822                if (a != b) {
 823                        rc = -EIO;
 824                        dev_err(&pdev->dev,
 825                                "MMIO do not match PIO [%02x] (%02x != %02x)\n",
 826                                reg, a, b);
 827                        goto err_out_unmap;
 828                }
 829        }
 830#endif /* USE_MMIO */
 831
 832        dev->base_addr = (unsigned long)ioaddr;
 833        rp->base = ioaddr;
 834
 835        /* Get chip registers into a sane state */
 836        rhine_power_init(dev);
 837        rhine_hw_init(dev, pioaddr);
 838
 839        for (i = 0; i < 6; i++)
 840                dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
 841
 842        if (!is_valid_ether_addr(dev->dev_addr)) {
 843                /* Report it and use a random ethernet address instead */
 844                netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
 845                random_ether_addr(dev->dev_addr);
 846                netdev_info(dev, "Using random MAC address: %pM\n",
 847                            dev->dev_addr);
 848        }
 849        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 850
 851        /* For Rhine-I/II, phy_id is loaded from EEPROM */
 852        if (!phy_id)
 853                phy_id = ioread8(ioaddr + 0x6C);
 854
 855        dev->irq = pdev->irq;
 856
 857        spin_lock_init(&rp->lock);
 858        INIT_WORK(&rp->reset_task, rhine_reset_task);
 859
 860        rp->mii_if.dev = dev;
 861        rp->mii_if.mdio_read = mdio_read;
 862        rp->mii_if.mdio_write = mdio_write;
 863        rp->mii_if.phy_id_mask = 0x1f;
 864        rp->mii_if.reg_num_mask = 0x1f;
 865
 866        /* The chip-specific entries in the device structure. */
 867        dev->netdev_ops = &rhine_netdev_ops;
 868        dev->ethtool_ops = &netdev_ethtool_ops,
 869        dev->watchdog_timeo = TX_TIMEOUT;
 870
 871        netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
 872
 873        if (rp->quirks & rqRhineI)
 874                dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 875
 876        if (pdev->revision >= VT6105M)
 877                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 878                NETIF_F_HW_VLAN_FILTER;
 879
 880        /* dev->name not defined before register_netdev()! */
 881        rc = register_netdev(dev);
 882        if (rc)
 883                goto err_out_unmap;
 884
 885        netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
 886                    name,
 887#ifdef USE_MMIO
 888                    memaddr,
 889#else
 890                    (long)ioaddr,
 891#endif
 892                    dev->dev_addr, pdev->irq);
 893
 894        pci_set_drvdata(pdev, dev);
 895
 896        {
 897                u16 mii_cmd;
 898                int mii_status = mdio_read(dev, phy_id, 1);
 899                mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
 900                mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
 901                if (mii_status != 0xffff && mii_status != 0x0000) {
 902                        rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
 903                        netdev_info(dev,
 904                                    "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
 905                                    phy_id,
 906                                    mii_status, rp->mii_if.advertising,
 907                                    mdio_read(dev, phy_id, 5));
 908
 909                        /* set IFF_RUNNING */
 910                        if (mii_status & BMSR_LSTATUS)
 911                                netif_carrier_on(dev);
 912                        else
 913                                netif_carrier_off(dev);
 914
 915                }
 916        }
 917        rp->mii_if.phy_id = phy_id;
 918        if (debug > 1 && avoid_D3)
 919                netdev_info(dev, "No D3 power state at shutdown\n");
 920
 921        return 0;
 922
 923err_out_unmap:
 924        pci_iounmap(pdev, ioaddr);
 925err_out_free_res:
 926        pci_release_regions(pdev);
 927err_out_free_netdev:
 928        free_netdev(dev);
 929err_out:
 930        return rc;
 931}
 932
 933static int alloc_ring(struct net_device* dev)
 934{
 935        struct rhine_private *rp = netdev_priv(dev);
 936        void *ring;
 937        dma_addr_t ring_dma;
 938
 939        ring = pci_alloc_consistent(rp->pdev,
 940                                    RX_RING_SIZE * sizeof(struct rx_desc) +
 941                                    TX_RING_SIZE * sizeof(struct tx_desc),
 942                                    &ring_dma);
 943        if (!ring) {
 944                netdev_err(dev, "Could not allocate DMA memory\n");
 945                return -ENOMEM;
 946        }
 947        if (rp->quirks & rqRhineI) {
 948                rp->tx_bufs = pci_alloc_consistent(rp->pdev,
 949                                                   PKT_BUF_SZ * TX_RING_SIZE,
 950                                                   &rp->tx_bufs_dma);
 951                if (rp->tx_bufs == NULL) {
 952                        pci_free_consistent(rp->pdev,
 953                                    RX_RING_SIZE * sizeof(struct rx_desc) +
 954                                    TX_RING_SIZE * sizeof(struct tx_desc),
 955                                    ring, ring_dma);
 956                        return -ENOMEM;
 957                }
 958        }
 959
 960        rp->rx_ring = ring;
 961        rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
 962        rp->rx_ring_dma = ring_dma;
 963        rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
 964
 965        return 0;
 966}
 967
 968static void free_ring(struct net_device* dev)
 969{
 970        struct rhine_private *rp = netdev_priv(dev);
 971
 972        pci_free_consistent(rp->pdev,
 973                            RX_RING_SIZE * sizeof(struct rx_desc) +
 974                            TX_RING_SIZE * sizeof(struct tx_desc),
 975                            rp->rx_ring, rp->rx_ring_dma);
 976        rp->tx_ring = NULL;
 977
 978        if (rp->tx_bufs)
 979                pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
 980                                    rp->tx_bufs, rp->tx_bufs_dma);
 981
 982        rp->tx_bufs = NULL;
 983
 984}
 985
 986static void alloc_rbufs(struct net_device *dev)
 987{
 988        struct rhine_private *rp = netdev_priv(dev);
 989        dma_addr_t next;
 990        int i;
 991
 992        rp->dirty_rx = rp->cur_rx = 0;
 993
 994        rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
 995        rp->rx_head_desc = &rp->rx_ring[0];
 996        next = rp->rx_ring_dma;
 997
 998        /* Init the ring entries */
 999        for (i = 0; i < RX_RING_SIZE; i++) {
1000                rp->rx_ring[i].rx_status = 0;
1001                rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1002                next += sizeof(struct rx_desc);
1003                rp->rx_ring[i].next_desc = cpu_to_le32(next);
1004                rp->rx_skbuff[i] = NULL;
1005        }
1006        /* Mark the last entry as wrapping the ring. */
1007        rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1008
1009        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1010        for (i = 0; i < RX_RING_SIZE; i++) {
1011                struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1012                rp->rx_skbuff[i] = skb;
1013                if (skb == NULL)
1014                        break;
1015                skb->dev = dev;                 /* Mark as being used by this device. */
1016
1017                rp->rx_skbuff_dma[i] =
1018                        pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1019                                       PCI_DMA_FROMDEVICE);
1020
1021                rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1022                rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1023        }
1024        rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1025}
1026
1027static void free_rbufs(struct net_device* dev)
1028{
1029        struct rhine_private *rp = netdev_priv(dev);
1030        int i;
1031
1032        /* Free all the skbuffs in the Rx queue. */
1033        for (i = 0; i < RX_RING_SIZE; i++) {
1034                rp->rx_ring[i].rx_status = 0;
1035                rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1036                if (rp->rx_skbuff[i]) {
1037                        pci_unmap_single(rp->pdev,
1038                                         rp->rx_skbuff_dma[i],
1039                                         rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1040                        dev_kfree_skb(rp->rx_skbuff[i]);
1041                }
1042                rp->rx_skbuff[i] = NULL;
1043        }
1044}
1045
1046static void alloc_tbufs(struct net_device* dev)
1047{
1048        struct rhine_private *rp = netdev_priv(dev);
1049        dma_addr_t next;
1050        int i;
1051
1052        rp->dirty_tx = rp->cur_tx = 0;
1053        next = rp->tx_ring_dma;
1054        for (i = 0; i < TX_RING_SIZE; i++) {
1055                rp->tx_skbuff[i] = NULL;
1056                rp->tx_ring[i].tx_status = 0;
1057                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1058                next += sizeof(struct tx_desc);
1059                rp->tx_ring[i].next_desc = cpu_to_le32(next);
1060                if (rp->quirks & rqRhineI)
1061                        rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1062        }
1063        rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1064
1065}
1066
1067static void free_tbufs(struct net_device* dev)
1068{
1069        struct rhine_private *rp = netdev_priv(dev);
1070        int i;
1071
1072        for (i = 0; i < TX_RING_SIZE; i++) {
1073                rp->tx_ring[i].tx_status = 0;
1074                rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1075                rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1076                if (rp->tx_skbuff[i]) {
1077                        if (rp->tx_skbuff_dma[i]) {
1078                                pci_unmap_single(rp->pdev,
1079                                                 rp->tx_skbuff_dma[i],
1080                                                 rp->tx_skbuff[i]->len,
1081                                                 PCI_DMA_TODEVICE);
1082                        }
1083                        dev_kfree_skb(rp->tx_skbuff[i]);
1084                }
1085                rp->tx_skbuff[i] = NULL;
1086                rp->tx_buf[i] = NULL;
1087        }
1088}
1089
1090static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1091{
1092        struct rhine_private *rp = netdev_priv(dev);
1093        void __iomem *ioaddr = rp->base;
1094
1095        mii_check_media(&rp->mii_if, debug, init_media);
1096
1097        if (rp->mii_if.full_duplex)
1098            iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1099                   ioaddr + ChipCmd1);
1100        else
1101            iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1102                   ioaddr + ChipCmd1);
1103        if (debug > 1)
1104                netdev_info(dev, "force_media %d, carrier %d\n",
1105                            rp->mii_if.force_media, netif_carrier_ok(dev));
1106}
1107
1108/* Called after status of force_media possibly changed */
1109static void rhine_set_carrier(struct mii_if_info *mii)
1110{
1111        if (mii->force_media) {
1112                /* autoneg is off: Link is always assumed to be up */
1113                if (!netif_carrier_ok(mii->dev))
1114                        netif_carrier_on(mii->dev);
1115        }
1116        else    /* Let MMI library update carrier status */
1117                rhine_check_media(mii->dev, 0);
1118        if (debug > 1)
1119                netdev_info(mii->dev, "force_media %d, carrier %d\n",
1120                            mii->force_media, netif_carrier_ok(mii->dev));
1121}
1122
1123/**
1124 * rhine_set_cam - set CAM multicast filters
1125 * @ioaddr: register block of this Rhine
1126 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1127 * @addr: multicast address (6 bytes)
1128 *
1129 * Load addresses into multicast filters.
1130 */
1131static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1132{
1133        int i;
1134
1135        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1136        wmb();
1137
1138        /* Paranoid -- idx out of range should never happen */
1139        idx &= (MCAM_SIZE - 1);
1140
1141        iowrite8((u8) idx, ioaddr + CamAddr);
1142
1143        for (i = 0; i < 6; i++, addr++)
1144                iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1145        udelay(10);
1146        wmb();
1147
1148        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1149        udelay(10);
1150
1151        iowrite8(0, ioaddr + CamCon);
1152}
1153
1154/**
1155 * rhine_set_vlan_cam - set CAM VLAN filters
1156 * @ioaddr: register block of this Rhine
1157 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1158 * @addr: VLAN ID (2 bytes)
1159 *
1160 * Load addresses into VLAN filters.
1161 */
1162static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1163{
1164        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1165        wmb();
1166
1167        /* Paranoid -- idx out of range should never happen */
1168        idx &= (VCAM_SIZE - 1);
1169
1170        iowrite8((u8) idx, ioaddr + CamAddr);
1171
1172        iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1173        udelay(10);
1174        wmb();
1175
1176        iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1177        udelay(10);
1178
1179        iowrite8(0, ioaddr + CamCon);
1180}
1181
1182/**
1183 * rhine_set_cam_mask - set multicast CAM mask
1184 * @ioaddr: register block of this Rhine
1185 * @mask: multicast CAM mask
1186 *
1187 * Mask sets multicast filters active/inactive.
1188 */
1189static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1190{
1191        iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1192        wmb();
1193
1194        /* write mask */
1195        iowrite32(mask, ioaddr + CamMask);
1196
1197        /* disable CAMEN */
1198        iowrite8(0, ioaddr + CamCon);
1199}
1200
1201/**
1202 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1203 * @ioaddr: register block of this Rhine
1204 * @mask: VLAN CAM mask
1205 *
1206 * Mask sets VLAN filters active/inactive.
1207 */
1208static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1209{
1210        iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1211        wmb();
1212
1213        /* write mask */
1214        iowrite32(mask, ioaddr + CamMask);
1215
1216        /* disable CAMEN */
1217        iowrite8(0, ioaddr + CamCon);
1218}
1219
1220/**
1221 * rhine_init_cam_filter - initialize CAM filters
1222 * @dev: network device
1223 *
1224 * Initialize (disable) hardware VLAN and multicast support on this
1225 * Rhine.
1226 */
1227static void rhine_init_cam_filter(struct net_device *dev)
1228{
1229        struct rhine_private *rp = netdev_priv(dev);
1230        void __iomem *ioaddr = rp->base;
1231
1232        /* Disable all CAMs */
1233        rhine_set_vlan_cam_mask(ioaddr, 0);
1234        rhine_set_cam_mask(ioaddr, 0);
1235
1236        /* disable hardware VLAN support */
1237        BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1238        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1239}
1240
1241/**
1242 * rhine_update_vcam - update VLAN CAM filters
1243 * @rp: rhine_private data of this Rhine
1244 *
1245 * Update VLAN CAM filters to match configuration change.
1246 */
1247static void rhine_update_vcam(struct net_device *dev)
1248{
1249        struct rhine_private *rp = netdev_priv(dev);
1250        void __iomem *ioaddr = rp->base;
1251        u16 vid;
1252        u32 vCAMmask = 0;       /* 32 vCAMs (6105M and better) */
1253        unsigned int i = 0;
1254
1255        for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1256                rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1257                vCAMmask |= 1 << i;
1258                if (++i >= VCAM_SIZE)
1259                        break;
1260        }
1261        rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1262}
1263
1264static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1265{
1266        struct rhine_private *rp = netdev_priv(dev);
1267
1268        spin_lock_irq(&rp->lock);
1269        set_bit(vid, rp->active_vlans);
1270        rhine_update_vcam(dev);
1271        spin_unlock_irq(&rp->lock);
1272}
1273
1274static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1275{
1276        struct rhine_private *rp = netdev_priv(dev);
1277
1278        spin_lock_irq(&rp->lock);
1279        clear_bit(vid, rp->active_vlans);
1280        rhine_update_vcam(dev);
1281        spin_unlock_irq(&rp->lock);
1282}
1283
1284static void init_registers(struct net_device *dev)
1285{
1286        struct rhine_private *rp = netdev_priv(dev);
1287        void __iomem *ioaddr = rp->base;
1288        int i;
1289
1290        for (i = 0; i < 6; i++)
1291                iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1292
1293        /* Initialize other registers. */
1294        iowrite16(0x0006, ioaddr + PCIBusConfig);       /* Tune configuration??? */
1295        /* Configure initial FIFO thresholds. */
1296        iowrite8(0x20, ioaddr + TxConfig);
1297        rp->tx_thresh = 0x20;
1298        rp->rx_thresh = 0x60;           /* Written in rhine_set_rx_mode(). */
1299
1300        iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1301        iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1302
1303        rhine_set_rx_mode(dev);
1304
1305        if (rp->pdev->revision >= VT6105M)
1306                rhine_init_cam_filter(dev);
1307
1308        napi_enable(&rp->napi);
1309
1310        /* Enable interrupts by setting the interrupt mask. */
1311        iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1312               IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1313               IntrTxDone | IntrTxError | IntrTxUnderrun |
1314               IntrPCIErr | IntrStatsMax | IntrLinkChange,
1315               ioaddr + IntrEnable);
1316
1317        iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1318               ioaddr + ChipCmd);
1319        rhine_check_media(dev, 1);
1320}
1321
1322/* Enable MII link status auto-polling (required for IntrLinkChange) */
1323static void rhine_enable_linkmon(void __iomem *ioaddr)
1324{
1325        iowrite8(0, ioaddr + MIICmd);
1326        iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1327        iowrite8(0x80, ioaddr + MIICmd);
1328
1329        RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1330
1331        iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1332}
1333
1334/* Disable MII link status auto-polling (required for MDIO access) */
1335static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1336{
1337        iowrite8(0, ioaddr + MIICmd);
1338
1339        if (quirks & rqRhineI) {
1340                iowrite8(0x01, ioaddr + MIIRegAddr);    // MII_BMSR
1341
1342                /* Can be called from ISR. Evil. */
1343                mdelay(1);
1344
1345                /* 0x80 must be set immediately before turning it off */
1346                iowrite8(0x80, ioaddr + MIICmd);
1347
1348                RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1349
1350                /* Heh. Now clear 0x80 again. */
1351                iowrite8(0, ioaddr + MIICmd);
1352        }
1353        else
1354                RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1355}
1356
1357/* Read and write over the MII Management Data I/O (MDIO) interface. */
1358
1359static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1360{
1361        struct rhine_private *rp = netdev_priv(dev);
1362        void __iomem *ioaddr = rp->base;
1363        int result;
1364
1365        rhine_disable_linkmon(ioaddr, rp->quirks);
1366
1367        /* rhine_disable_linkmon already cleared MIICmd */
1368        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1369        iowrite8(regnum, ioaddr + MIIRegAddr);
1370        iowrite8(0x40, ioaddr + MIICmd);                /* Trigger read */
1371        RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1372        result = ioread16(ioaddr + MIIData);
1373
1374        rhine_enable_linkmon(ioaddr);
1375        return result;
1376}
1377
1378static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1379{
1380        struct rhine_private *rp = netdev_priv(dev);
1381        void __iomem *ioaddr = rp->base;
1382
1383        rhine_disable_linkmon(ioaddr, rp->quirks);
1384
1385        /* rhine_disable_linkmon already cleared MIICmd */
1386        iowrite8(phy_id, ioaddr + MIIPhyAddr);
1387        iowrite8(regnum, ioaddr + MIIRegAddr);
1388        iowrite16(value, ioaddr + MIIData);
1389        iowrite8(0x20, ioaddr + MIICmd);                /* Trigger write */
1390        RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1391
1392        rhine_enable_linkmon(ioaddr);
1393}
1394
1395static int rhine_open(struct net_device *dev)
1396{
1397        struct rhine_private *rp = netdev_priv(dev);
1398        void __iomem *ioaddr = rp->base;
1399        int rc;
1400
1401        rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1402                        dev);
1403        if (rc)
1404                return rc;
1405
1406        if (debug > 1)
1407                netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1408
1409        rc = alloc_ring(dev);
1410        if (rc) {
1411                free_irq(rp->pdev->irq, dev);
1412                return rc;
1413        }
1414        alloc_rbufs(dev);
1415        alloc_tbufs(dev);
1416        rhine_chip_reset(dev);
1417        init_registers(dev);
1418        if (debug > 2)
1419                netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
1420                           __func__, ioread16(ioaddr + ChipCmd),
1421                           mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1422
1423        netif_start_queue(dev);
1424
1425        return 0;
1426}
1427
1428static void rhine_reset_task(struct work_struct *work)
1429{
1430        struct rhine_private *rp = container_of(work, struct rhine_private,
1431                                                reset_task);
1432        struct net_device *dev = rp->dev;
1433
1434        /* protect against concurrent rx interrupts */
1435        disable_irq(rp->pdev->irq);
1436
1437        napi_disable(&rp->napi);
1438
1439        spin_lock_bh(&rp->lock);
1440
1441        /* clear all descriptors */
1442        free_tbufs(dev);
1443        free_rbufs(dev);
1444        alloc_tbufs(dev);
1445        alloc_rbufs(dev);
1446
1447        /* Reinitialize the hardware. */
1448        rhine_chip_reset(dev);
1449        init_registers(dev);
1450
1451        spin_unlock_bh(&rp->lock);
1452        enable_irq(rp->pdev->irq);
1453
1454        dev->trans_start = jiffies; /* prevent tx timeout */
1455        dev->stats.tx_errors++;
1456        netif_wake_queue(dev);
1457}
1458
1459static void rhine_tx_timeout(struct net_device *dev)
1460{
1461        struct rhine_private *rp = netdev_priv(dev);
1462        void __iomem *ioaddr = rp->base;
1463
1464        netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1465                    ioread16(ioaddr + IntrStatus),
1466                    mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1467
1468        schedule_work(&rp->reset_task);
1469}
1470
1471static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1472                                  struct net_device *dev)
1473{
1474        struct rhine_private *rp = netdev_priv(dev);
1475        void __iomem *ioaddr = rp->base;
1476        unsigned entry;
1477        unsigned long flags;
1478
1479        /* Caution: the write order is important here, set the field
1480           with the "ownership" bits last. */
1481
1482        /* Calculate the next Tx descriptor entry. */
1483        entry = rp->cur_tx % TX_RING_SIZE;
1484
1485        if (skb_padto(skb, ETH_ZLEN))
1486                return NETDEV_TX_OK;
1487
1488        rp->tx_skbuff[entry] = skb;
1489
1490        if ((rp->quirks & rqRhineI) &&
1491            (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1492                /* Must use alignment buffer. */
1493                if (skb->len > PKT_BUF_SZ) {
1494                        /* packet too long, drop it */
1495                        dev_kfree_skb(skb);
1496                        rp->tx_skbuff[entry] = NULL;
1497                        dev->stats.tx_dropped++;
1498                        return NETDEV_TX_OK;
1499                }
1500
1501                /* Padding is not copied and so must be redone. */
1502                skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1503                if (skb->len < ETH_ZLEN)
1504                        memset(rp->tx_buf[entry] + skb->len, 0,
1505                               ETH_ZLEN - skb->len);
1506                rp->tx_skbuff_dma[entry] = 0;
1507                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1508                                                      (rp->tx_buf[entry] -
1509                                                       rp->tx_bufs));
1510        } else {
1511                rp->tx_skbuff_dma[entry] =
1512                        pci_map_single(rp->pdev, skb->data, skb->len,
1513                                       PCI_DMA_TODEVICE);
1514                rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1515        }
1516
1517        rp->tx_ring[entry].desc_length =
1518                cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1519
1520        if (unlikely(vlan_tx_tag_present(skb))) {
1521                rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1522                /* request tagging */
1523                rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1524        }
1525        else
1526                rp->tx_ring[entry].tx_status = 0;
1527
1528        /* lock eth irq */
1529        spin_lock_irqsave(&rp->lock, flags);
1530        wmb();
1531        rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1532        wmb();
1533
1534        rp->cur_tx++;
1535
1536        /* Non-x86 Todo: explicitly flush cache lines here. */
1537
1538        if (vlan_tx_tag_present(skb))
1539                /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1540                BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1541
1542        /* Wake the potentially-idle transmit channel */
1543        iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1544               ioaddr + ChipCmd1);
1545        IOSYNC;
1546
1547        if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1548                netif_stop_queue(dev);
1549
1550        spin_unlock_irqrestore(&rp->lock, flags);
1551
1552        if (debug > 4) {
1553                netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1554                           rp->cur_tx-1, entry);
1555        }
1556        return NETDEV_TX_OK;
1557}
1558
1559/* The interrupt handler does all of the Rx thread work and cleans up
1560   after the Tx thread. */
1561static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1562{
1563        struct net_device *dev = dev_instance;
1564        struct rhine_private *rp = netdev_priv(dev);
1565        void __iomem *ioaddr = rp->base;
1566        u32 intr_status;
1567        int boguscnt = max_interrupt_work;
1568        int handled = 0;
1569
1570        while ((intr_status = get_intr_status(dev))) {
1571                handled = 1;
1572
1573                /* Acknowledge all of the current interrupt sources ASAP. */
1574                if (intr_status & IntrTxDescRace)
1575                        iowrite8(0x08, ioaddr + IntrStatus2);
1576                iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1577                IOSYNC;
1578
1579                if (debug > 4)
1580                        netdev_dbg(dev, "Interrupt, status %08x\n",
1581                                   intr_status);
1582
1583                if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1584                                   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1585                        iowrite16(IntrTxAborted |
1586                                  IntrTxDone | IntrTxError | IntrTxUnderrun |
1587                                  IntrPCIErr | IntrStatsMax | IntrLinkChange,
1588                                  ioaddr + IntrEnable);
1589
1590                        napi_schedule(&rp->napi);
1591                }
1592
1593                if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1594                        if (intr_status & IntrTxErrSummary) {
1595                                /* Avoid scavenging before Tx engine turned off */
1596                                RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1597                                if (debug > 2 &&
1598                                    ioread8(ioaddr+ChipCmd) & CmdTxOn)
1599                                        netdev_warn(dev,
1600                                                    "%s: Tx engine still on\n",
1601                                                    __func__);
1602                        }
1603                        rhine_tx(dev);
1604                }
1605
1606                /* Abnormal error summary/uncommon events handlers. */
1607                if (intr_status & (IntrPCIErr | IntrLinkChange |
1608                                   IntrStatsMax | IntrTxError | IntrTxAborted |
1609                                   IntrTxUnderrun | IntrTxDescRace))
1610                        rhine_error(dev, intr_status);
1611
1612                if (--boguscnt < 0) {
1613                        netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
1614                                    intr_status);
1615                        break;
1616                }
1617        }
1618
1619        if (debug > 3)
1620                netdev_dbg(dev, "exiting interrupt, status=%08x\n",
1621                           ioread16(ioaddr + IntrStatus));
1622        return IRQ_RETVAL(handled);
1623}
1624
1625/* This routine is logically part of the interrupt handler, but isolated
1626   for clarity. */
1627static void rhine_tx(struct net_device *dev)
1628{
1629        struct rhine_private *rp = netdev_priv(dev);
1630        int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1631
1632        spin_lock(&rp->lock);
1633
1634        /* find and cleanup dirty tx descriptors */
1635        while (rp->dirty_tx != rp->cur_tx) {
1636                txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1637                if (debug > 6)
1638                        netdev_dbg(dev, "Tx scavenge %d status %08x\n",
1639                                   entry, txstatus);
1640                if (txstatus & DescOwn)
1641                        break;
1642                if (txstatus & 0x8000) {
1643                        if (debug > 1)
1644                                netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1645                                           txstatus);
1646                        dev->stats.tx_errors++;
1647                        if (txstatus & 0x0400)
1648                                dev->stats.tx_carrier_errors++;
1649                        if (txstatus & 0x0200)
1650                                dev->stats.tx_window_errors++;
1651                        if (txstatus & 0x0100)
1652                                dev->stats.tx_aborted_errors++;
1653                        if (txstatus & 0x0080)
1654                                dev->stats.tx_heartbeat_errors++;
1655                        if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1656                            (txstatus & 0x0800) || (txstatus & 0x1000)) {
1657                                dev->stats.tx_fifo_errors++;
1658                                rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1659                                break; /* Keep the skb - we try again */
1660                        }
1661                        /* Transmitter restarted in 'abnormal' handler. */
1662                } else {
1663                        if (rp->quirks & rqRhineI)
1664                                dev->stats.collisions += (txstatus >> 3) & 0x0F;
1665                        else
1666                                dev->stats.collisions += txstatus & 0x0F;
1667                        if (debug > 6)
1668                                netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
1669                                           (txstatus >> 3) & 0xF,
1670                                           txstatus & 0xF);
1671                        dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1672                        dev->stats.tx_packets++;
1673                }
1674                /* Free the original skb. */
1675                if (rp->tx_skbuff_dma[entry]) {
1676                        pci_unmap_single(rp->pdev,
1677                                         rp->tx_skbuff_dma[entry],
1678                                         rp->tx_skbuff[entry]->len,
1679                                         PCI_DMA_TODEVICE);
1680                }
1681                dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1682                rp->tx_skbuff[entry] = NULL;
1683                entry = (++rp->dirty_tx) % TX_RING_SIZE;
1684        }
1685        if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1686                netif_wake_queue(dev);
1687
1688        spin_unlock(&rp->lock);
1689}
1690
1691/**
1692 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1693 * @skb: pointer to sk_buff
1694 * @data_size: used data area of the buffer including CRC
1695 *
1696 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1697 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1698 * aligned following the CRC.
1699 */
1700static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1701{
1702        u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1703        return be16_to_cpup((__be16 *)trailer);
1704}
1705
1706/* Process up to limit frames from receive ring */
1707static int rhine_rx(struct net_device *dev, int limit)
1708{
1709        struct rhine_private *rp = netdev_priv(dev);
1710        int count;
1711        int entry = rp->cur_rx % RX_RING_SIZE;
1712
1713        if (debug > 4) {
1714                netdev_dbg(dev, "%s(), entry %d status %08x\n",
1715                           __func__, entry,
1716                           le32_to_cpu(rp->rx_head_desc->rx_status));
1717        }
1718
1719        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1720        for (count = 0; count < limit; ++count) {
1721                struct rx_desc *desc = rp->rx_head_desc;
1722                u32 desc_status = le32_to_cpu(desc->rx_status);
1723                u32 desc_length = le32_to_cpu(desc->desc_length);
1724                int data_size = desc_status >> 16;
1725
1726                if (desc_status & DescOwn)
1727                        break;
1728
1729                if (debug > 4)
1730                        netdev_dbg(dev, "%s() status is %08x\n",
1731                                   __func__, desc_status);
1732
1733                if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1734                        if ((desc_status & RxWholePkt) != RxWholePkt) {
1735                                netdev_warn(dev,
1736        "Oversized Ethernet frame spanned multiple buffers, "
1737        "entry %#x length %d status %08x!\n",
1738                                            entry, data_size,
1739                                            desc_status);
1740                                netdev_warn(dev,
1741                                            "Oversized Ethernet frame %p vs %p\n",
1742                                            rp->rx_head_desc,
1743                                            &rp->rx_ring[entry]);
1744                                dev->stats.rx_length_errors++;
1745                        } else if (desc_status & RxErr) {
1746                                /* There was a error. */
1747                                if (debug > 2)
1748                                        netdev_dbg(dev, "%s() Rx error was %08x\n",
1749                                                   __func__, desc_status);
1750                                dev->stats.rx_errors++;
1751                                if (desc_status & 0x0030)
1752                                        dev->stats.rx_length_errors++;
1753                                if (desc_status & 0x0048)
1754                                        dev->stats.rx_fifo_errors++;
1755                                if (desc_status & 0x0004)
1756                                        dev->stats.rx_frame_errors++;
1757                                if (desc_status & 0x0002) {
1758                                        /* this can also be updated outside the interrupt handler */
1759                                        spin_lock(&rp->lock);
1760                                        dev->stats.rx_crc_errors++;
1761                                        spin_unlock(&rp->lock);
1762                                }
1763                        }
1764                } else {
1765                        struct sk_buff *skb = NULL;
1766                        /* Length should omit the CRC */
1767                        int pkt_len = data_size - 4;
1768                        u16 vlan_tci = 0;
1769
1770                        /* Check if the packet is long enough to accept without
1771                           copying to a minimally-sized skbuff. */
1772                        if (pkt_len < rx_copybreak)
1773                                skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1774                        if (skb) {
1775                                pci_dma_sync_single_for_cpu(rp->pdev,
1776                                                            rp->rx_skbuff_dma[entry],
1777                                                            rp->rx_buf_sz,
1778                                                            PCI_DMA_FROMDEVICE);
1779
1780                                skb_copy_to_linear_data(skb,
1781                                                 rp->rx_skbuff[entry]->data,
1782                                                 pkt_len);
1783                                skb_put(skb, pkt_len);
1784                                pci_dma_sync_single_for_device(rp->pdev,
1785                                                               rp->rx_skbuff_dma[entry],
1786                                                               rp->rx_buf_sz,
1787                                                               PCI_DMA_FROMDEVICE);
1788                        } else {
1789                                skb = rp->rx_skbuff[entry];
1790                                if (skb == NULL) {
1791                                        netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1792                                        break;
1793                                }
1794                                rp->rx_skbuff[entry] = NULL;
1795                                skb_put(skb, pkt_len);
1796                                pci_unmap_single(rp->pdev,
1797                                                 rp->rx_skbuff_dma[entry],
1798                                                 rp->rx_buf_sz,
1799                                                 PCI_DMA_FROMDEVICE);
1800                        }
1801
1802                        if (unlikely(desc_length & DescTag))
1803                                vlan_tci = rhine_get_vlan_tci(skb, data_size);
1804
1805                        skb->protocol = eth_type_trans(skb, dev);
1806
1807                        if (unlikely(desc_length & DescTag))
1808                                __vlan_hwaccel_put_tag(skb, vlan_tci);
1809                        netif_receive_skb(skb);
1810                        dev->stats.rx_bytes += pkt_len;
1811                        dev->stats.rx_packets++;
1812                }
1813                entry = (++rp->cur_rx) % RX_RING_SIZE;
1814                rp->rx_head_desc = &rp->rx_ring[entry];
1815        }
1816
1817        /* Refill the Rx ring buffers. */
1818        for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1819                struct sk_buff *skb;
1820                entry = rp->dirty_rx % RX_RING_SIZE;
1821                if (rp->rx_skbuff[entry] == NULL) {
1822                        skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1823                        rp->rx_skbuff[entry] = skb;
1824                        if (skb == NULL)
1825                                break;  /* Better luck next round. */
1826                        skb->dev = dev; /* Mark as being used by this device. */
1827                        rp->rx_skbuff_dma[entry] =
1828                                pci_map_single(rp->pdev, skb->data,
1829                                               rp->rx_buf_sz,
1830                                               PCI_DMA_FROMDEVICE);
1831                        rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1832                }
1833                rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1834        }
1835
1836        return count;
1837}
1838
1839/*
1840 * Clears the "tally counters" for CRC errors and missed frames(?).
1841 * It has been reported that some chips need a write of 0 to clear
1842 * these, for others the counters are set to 1 when written to and
1843 * instead cleared when read. So we clear them both ways ...
1844 */
1845static inline void clear_tally_counters(void __iomem *ioaddr)
1846{
1847        iowrite32(0, ioaddr + RxMissed);
1848        ioread16(ioaddr + RxCRCErrs);
1849        ioread16(ioaddr + RxMissed);
1850}
1851
1852static void rhine_restart_tx(struct net_device *dev) {
1853        struct rhine_private *rp = netdev_priv(dev);
1854        void __iomem *ioaddr = rp->base;
1855        int entry = rp->dirty_tx % TX_RING_SIZE;
1856        u32 intr_status;
1857
1858        /*
1859         * If new errors occurred, we need to sort them out before doing Tx.
1860         * In that case the ISR will be back here RSN anyway.
1861         */
1862        intr_status = get_intr_status(dev);
1863
1864        if ((intr_status & IntrTxErrSummary) == 0) {
1865
1866                /* We know better than the chip where it should continue. */
1867                iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1868                       ioaddr + TxRingPtr);
1869
1870                iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1871                       ioaddr + ChipCmd);
1872
1873                if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1874                        /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1875                        BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1876
1877                iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1878                       ioaddr + ChipCmd1);
1879                IOSYNC;
1880        }
1881        else {
1882                /* This should never happen */
1883                if (debug > 1)
1884                        netdev_warn(dev, "%s() Another error occurred %08x\n",
1885                                   __func__, intr_status);
1886        }
1887
1888}
1889
1890static void rhine_error(struct net_device *dev, int intr_status)
1891{
1892        struct rhine_private *rp = netdev_priv(dev);
1893        void __iomem *ioaddr = rp->base;
1894
1895        spin_lock(&rp->lock);
1896
1897        if (intr_status & IntrLinkChange)
1898                rhine_check_media(dev, 0);
1899        if (intr_status & IntrStatsMax) {
1900                dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1901                dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1902                clear_tally_counters(ioaddr);
1903        }
1904        if (intr_status & IntrTxAborted) {
1905                if (debug > 1)
1906                        netdev_info(dev, "Abort %08x, frame dropped\n",
1907                                    intr_status);
1908        }
1909        if (intr_status & IntrTxUnderrun) {
1910                if (rp->tx_thresh < 0xE0)
1911                        BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1912                if (debug > 1)
1913                        netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
1914                                    rp->tx_thresh);
1915        }
1916        if (intr_status & IntrTxDescRace) {
1917                if (debug > 2)
1918                        netdev_info(dev, "Tx descriptor write-back race\n");
1919        }
1920        if ((intr_status & IntrTxError) &&
1921            (intr_status & (IntrTxAborted |
1922             IntrTxUnderrun | IntrTxDescRace)) == 0) {
1923                if (rp->tx_thresh < 0xE0) {
1924                        BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1925                }
1926                if (debug > 1)
1927                        netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
1928                                    rp->tx_thresh);
1929        }
1930        if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1931                           IntrTxError))
1932                rhine_restart_tx(dev);
1933
1934        if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1935                            IntrTxError | IntrTxAborted | IntrNormalSummary |
1936                            IntrTxDescRace)) {
1937                if (debug > 1)
1938                        netdev_err(dev, "Something Wicked happened! %08x\n",
1939                                   intr_status);
1940        }
1941
1942        spin_unlock(&rp->lock);
1943}
1944
1945static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1946{
1947        struct rhine_private *rp = netdev_priv(dev);
1948        void __iomem *ioaddr = rp->base;
1949        unsigned long flags;
1950
1951        spin_lock_irqsave(&rp->lock, flags);
1952        dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1953        dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1954        clear_tally_counters(ioaddr);
1955        spin_unlock_irqrestore(&rp->lock, flags);
1956
1957        return &dev->stats;
1958}
1959
1960static void rhine_set_rx_mode(struct net_device *dev)
1961{
1962        struct rhine_private *rp = netdev_priv(dev);
1963        void __iomem *ioaddr = rp->base;
1964        u32 mc_filter[2];       /* Multicast hash filter */
1965        u8 rx_mode = 0x0C;      /* Note: 0x02=accept runt, 0x01=accept errs */
1966        struct netdev_hw_addr *ha;
1967
1968        if (dev->flags & IFF_PROMISC) {         /* Set promiscuous. */
1969                rx_mode = 0x1C;
1970                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1971                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1972        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1973                   (dev->flags & IFF_ALLMULTI)) {
1974                /* Too many to match, or accept all multicasts. */
1975                iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1976                iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1977        } else if (rp->pdev->revision >= VT6105M) {
1978                int i = 0;
1979                u32 mCAMmask = 0;       /* 32 mCAMs (6105M and better) */
1980                netdev_for_each_mc_addr(ha, dev) {
1981                        if (i == MCAM_SIZE)
1982                                break;
1983                        rhine_set_cam(ioaddr, i, ha->addr);
1984                        mCAMmask |= 1 << i;
1985                        i++;
1986                }
1987                rhine_set_cam_mask(ioaddr, mCAMmask);
1988        } else {
1989                memset(mc_filter, 0, sizeof(mc_filter));
1990                netdev_for_each_mc_addr(ha, dev) {
1991                        int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1992
1993                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1994                }
1995                iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1996                iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1997        }
1998        /* enable/disable VLAN receive filtering */
1999        if (rp->pdev->revision >= VT6105M) {
2000                if (dev->flags & IFF_PROMISC)
2001                        BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2002                else
2003                        BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2004        }
2005        BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2006}
2007
2008static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2009{
2010        struct rhine_private *rp = netdev_priv(dev);
2011
2012        strcpy(info->driver, DRV_NAME);
2013        strcpy(info->version, DRV_VERSION);
2014        strcpy(info->bus_info, pci_name(rp->pdev));
2015}
2016
2017static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2018{
2019        struct rhine_private *rp = netdev_priv(dev);
2020        int rc;
2021
2022        spin_lock_irq(&rp->lock);
2023        rc = mii_ethtool_gset(&rp->mii_if, cmd);
2024        spin_unlock_irq(&rp->lock);
2025
2026        return rc;
2027}
2028
2029static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2030{
2031        struct rhine_private *rp = netdev_priv(dev);
2032        int rc;
2033
2034        spin_lock_irq(&rp->lock);
2035        rc = mii_ethtool_sset(&rp->mii_if, cmd);
2036        spin_unlock_irq(&rp->lock);
2037        rhine_set_carrier(&rp->mii_if);
2038
2039        return rc;
2040}
2041
2042static int netdev_nway_reset(struct net_device *dev)
2043{
2044        struct rhine_private *rp = netdev_priv(dev);
2045
2046        return mii_nway_restart(&rp->mii_if);
2047}
2048
2049static u32 netdev_get_link(struct net_device *dev)
2050{
2051        struct rhine_private *rp = netdev_priv(dev);
2052
2053        return mii_link_ok(&rp->mii_if);
2054}
2055
2056static u32 netdev_get_msglevel(struct net_device *dev)
2057{
2058        return debug;
2059}
2060
2061static void netdev_set_msglevel(struct net_device *dev, u32 value)
2062{
2063        debug = value;
2064}
2065
2066static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2067{
2068        struct rhine_private *rp = netdev_priv(dev);
2069
2070        if (!(rp->quirks & rqWOL))
2071                return;
2072
2073        spin_lock_irq(&rp->lock);
2074        wol->supported = WAKE_PHY | WAKE_MAGIC |
2075                         WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;  /* Untested */
2076        wol->wolopts = rp->wolopts;
2077        spin_unlock_irq(&rp->lock);
2078}
2079
2080static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2081{
2082        struct rhine_private *rp = netdev_priv(dev);
2083        u32 support = WAKE_PHY | WAKE_MAGIC |
2084                      WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;     /* Untested */
2085
2086        if (!(rp->quirks & rqWOL))
2087                return -EINVAL;
2088
2089        if (wol->wolopts & ~support)
2090                return -EINVAL;
2091
2092        spin_lock_irq(&rp->lock);
2093        rp->wolopts = wol->wolopts;
2094        spin_unlock_irq(&rp->lock);
2095
2096        return 0;
2097}
2098
2099static const struct ethtool_ops netdev_ethtool_ops = {
2100        .get_drvinfo            = netdev_get_drvinfo,
2101        .get_settings           = netdev_get_settings,
2102        .set_settings           = netdev_set_settings,
2103        .nway_reset             = netdev_nway_reset,
2104        .get_link               = netdev_get_link,
2105        .get_msglevel           = netdev_get_msglevel,
2106        .set_msglevel           = netdev_set_msglevel,
2107        .get_wol                = rhine_get_wol,
2108        .set_wol                = rhine_set_wol,
2109};
2110
2111static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2112{
2113        struct rhine_private *rp = netdev_priv(dev);
2114        int rc;
2115
2116        if (!netif_running(dev))
2117                return -EINVAL;
2118
2119        spin_lock_irq(&rp->lock);
2120        rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2121        spin_unlock_irq(&rp->lock);
2122        rhine_set_carrier(&rp->mii_if);
2123
2124        return rc;
2125}
2126
2127static int rhine_close(struct net_device *dev)
2128{
2129        struct rhine_private *rp = netdev_priv(dev);
2130        void __iomem *ioaddr = rp->base;
2131
2132        napi_disable(&rp->napi);
2133        cancel_work_sync(&rp->reset_task);
2134        netif_stop_queue(dev);
2135
2136        spin_lock_irq(&rp->lock);
2137
2138        if (debug > 1)
2139                netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
2140                           ioread16(ioaddr + ChipCmd));
2141
2142        /* Switch to loopback mode to avoid hardware races. */
2143        iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2144
2145        /* Disable interrupts by clearing the interrupt mask. */
2146        iowrite16(0x0000, ioaddr + IntrEnable);
2147
2148        /* Stop the chip's Tx and Rx processes. */
2149        iowrite16(CmdStop, ioaddr + ChipCmd);
2150
2151        spin_unlock_irq(&rp->lock);
2152
2153        free_irq(rp->pdev->irq, dev);
2154        free_rbufs(dev);
2155        free_tbufs(dev);
2156        free_ring(dev);
2157
2158        return 0;
2159}
2160
2161
2162static void __devexit rhine_remove_one(struct pci_dev *pdev)
2163{
2164        struct net_device *dev = pci_get_drvdata(pdev);
2165        struct rhine_private *rp = netdev_priv(dev);
2166
2167        unregister_netdev(dev);
2168
2169        pci_iounmap(pdev, rp->base);
2170        pci_release_regions(pdev);
2171
2172        free_netdev(dev);
2173        pci_disable_device(pdev);
2174        pci_set_drvdata(pdev, NULL);
2175}
2176
2177static void rhine_shutdown (struct pci_dev *pdev)
2178{
2179        struct net_device *dev = pci_get_drvdata(pdev);
2180        struct rhine_private *rp = netdev_priv(dev);
2181        void __iomem *ioaddr = rp->base;
2182
2183        if (!(rp->quirks & rqWOL))
2184                return; /* Nothing to do for non-WOL adapters */
2185
2186        rhine_power_init(dev);
2187
2188        /* Make sure we use pattern 0, 1 and not 4, 5 */
2189        if (rp->quirks & rq6patterns)
2190                iowrite8(0x04, ioaddr + WOLcgClr);
2191
2192        if (rp->wolopts & WAKE_MAGIC) {
2193                iowrite8(WOLmagic, ioaddr + WOLcrSet);
2194                /*
2195                 * Turn EEPROM-controlled wake-up back on -- some hardware may
2196                 * not cooperate otherwise.
2197                 */
2198                iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2199        }
2200
2201        if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2202                iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2203
2204        if (rp->wolopts & WAKE_PHY)
2205                iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2206
2207        if (rp->wolopts & WAKE_UCAST)
2208                iowrite8(WOLucast, ioaddr + WOLcrSet);
2209
2210        if (rp->wolopts) {
2211                /* Enable legacy WOL (for old motherboards) */
2212                iowrite8(0x01, ioaddr + PwcfgSet);
2213                iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2214        }
2215
2216        /* Hit power state D3 (sleep) */
2217        if (!avoid_D3)
2218                iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2219
2220        /* TODO: Check use of pci_enable_wake() */
2221
2222}
2223
2224#ifdef CONFIG_PM
2225static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
2226{
2227        struct net_device *dev = pci_get_drvdata(pdev);
2228        struct rhine_private *rp = netdev_priv(dev);
2229        unsigned long flags;
2230
2231        if (!netif_running(dev))
2232                return 0;
2233
2234        napi_disable(&rp->napi);
2235
2236        netif_device_detach(dev);
2237        pci_save_state(pdev);
2238
2239        spin_lock_irqsave(&rp->lock, flags);
2240        rhine_shutdown(pdev);
2241        spin_unlock_irqrestore(&rp->lock, flags);
2242
2243        free_irq(dev->irq, dev);
2244        return 0;
2245}
2246
2247static int rhine_resume(struct pci_dev *pdev)
2248{
2249        struct net_device *dev = pci_get_drvdata(pdev);
2250        struct rhine_private *rp = netdev_priv(dev);
2251        unsigned long flags;
2252        int ret;
2253
2254        if (!netif_running(dev))
2255                return 0;
2256
2257        if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
2258                netdev_err(dev, "request_irq failed\n");
2259
2260        ret = pci_set_power_state(pdev, PCI_D0);
2261        if (debug > 1)
2262                netdev_info(dev, "Entering power state D0 %s (%d)\n",
2263                            ret ? "failed" : "succeeded", ret);
2264
2265        pci_restore_state(pdev);
2266
2267        spin_lock_irqsave(&rp->lock, flags);
2268#ifdef USE_MMIO
2269        enable_mmio(rp->pioaddr, rp->quirks);
2270#endif
2271        rhine_power_init(dev);
2272        free_tbufs(dev);
2273        free_rbufs(dev);
2274        alloc_tbufs(dev);
2275        alloc_rbufs(dev);
2276        init_registers(dev);
2277        spin_unlock_irqrestore(&rp->lock, flags);
2278
2279        netif_device_attach(dev);
2280
2281        return 0;
2282}
2283#endif /* CONFIG_PM */
2284
2285static struct pci_driver rhine_driver = {
2286        .name           = DRV_NAME,
2287        .id_table       = rhine_pci_tbl,
2288        .probe          = rhine_init_one,
2289        .remove         = __devexit_p(rhine_remove_one),
2290#ifdef CONFIG_PM
2291        .suspend        = rhine_suspend,
2292        .resume         = rhine_resume,
2293#endif /* CONFIG_PM */
2294        .shutdown =     rhine_shutdown,
2295};
2296
2297static struct dmi_system_id __initdata rhine_dmi_table[] = {
2298        {
2299                .ident = "EPIA-M",
2300                .matches = {
2301                        DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2302                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2303                },
2304        },
2305        {
2306                .ident = "KV7",
2307                .matches = {
2308                        DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2309                        DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2310                },
2311        },
2312        { NULL }
2313};
2314
2315static int __init rhine_init(void)
2316{
2317/* when a module, this is printed whether or not devices are found in probe */
2318#ifdef MODULE
2319        pr_info("%s\n", version);
2320#endif
2321        if (dmi_check_system(rhine_dmi_table)) {
2322                /* these BIOSes fail at PXE boot if chip is in D3 */
2323                avoid_D3 = 1;
2324                pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2325        }
2326        else if (avoid_D3)
2327                pr_info("avoid_D3 set\n");
2328
2329        return pci_register_driver(&rhine_driver);
2330}
2331
2332
2333static void __exit rhine_cleanup(void)
2334{
2335        pci_unregister_driver(&rhine_driver);
2336}
2337
2338
2339module_init(rhine_init);
2340module_exit(rhine_cleanup);
2341
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.