linux/drivers/net/eepro100.c
<<
>>
Prefs
   1/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
   2/*
   3        Written 1996-1999 by Donald Becker.
   4
   5        The driver also contains updates by different kernel developers
   6        (see incomplete list below).
   7        Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
   8        Please use this email address and linux-kernel mailing list for bug reports.
   9
  10        This software may be used and distributed according to the terms
  11        of the GNU General Public License, incorporated herein by reference.
  12
  13        This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
  14        It should work with all i82557/558/559 boards.
  15
  16        Version history:
  17        1998 Apr - 2000 Feb  Andrey V. Savochkin <saw@saw.sw.com.sg>
  18                Serious fixes for multicast filter list setting, TX timeout routine;
  19                RX ring refilling logic;  other stuff
  20        2000 Feb  Jeff Garzik <jgarzik@pobox.com>
  21                Convert to new PCI driver interface
  22        2000 Mar 24  Dragan Stancevic <visitor@valinux.com>
  23                Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
  24        2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
  25                PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
  26        2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
  27                rx_align support: enables rx DMA without causing unaligned accesses.
  28*/
  29
  30static const char * const version =
  31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
  32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
  33
  34/* A few user-configurable values that apply to all boards.
  35   First set is undocumented and spelled per Intel recommendations. */
  36
  37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
  38static int txfifo = 8;          /* Tx FIFO threshold in 4 byte units, 0-15 */
  39static int rxfifo = 8;          /* Rx FIFO threshold, default 32 bytes. */
  40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
  41static int txdmacount = 128;
  42static int rxdmacount /* = 0 */;
  43
  44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
  45        defined(__arm__)
  46  /* align rx buffers to 2 bytes so that IP header is aligned */
  47# define rx_align(skb)          skb_reserve((skb), 2)
  48# define RxFD_ALIGNMENT         __attribute__ ((aligned (2), packed))
  49#else
  50# define rx_align(skb)
  51# define RxFD_ALIGNMENT
  52#endif
  53
  54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
  55   Lower values use more memory, but are faster. */
  56static int rx_copybreak = 200;
  57
  58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  59static int max_interrupt_work = 20;
  60
  61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
  62static int multicast_filter_limit = 64;
  63
  64/* 'options' is used to pass a transceiver override or full-duplex flag
  65   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
  66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
  67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
  68
  69/* A few values that may be tweaked. */
  70/* The ring sizes should be a power of two for efficiency. */
  71#define TX_RING_SIZE    64
  72#define RX_RING_SIZE    64
  73/* How much slots multicast filter setup may take.
  74   Do not descrease without changing set_rx_mode() implementaion. */
  75#define TX_MULTICAST_SIZE   2
  76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
  77/* Actual number of TX packets queued, must be
  78   <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
  79#define TX_QUEUE_LIMIT  (TX_RING_SIZE-TX_MULTICAST_RESERV)
  80/* Hysteresis marking queue as no longer full. */
  81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
  82
  83/* Operational parameters that usually are not changed. */
  84
  85/* Time in jiffies before concluding the transmitter is hung. */
  86#define TX_TIMEOUT              (2*HZ)
  87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
  88#define PKT_BUF_SZ              1536
  89
  90#include <linux/module.h>
  91
  92#include <linux/kernel.h>
  93#include <linux/string.h>
  94#include <linux/errno.h>
  95#include <linux/ioport.h>
  96#include <linux/slab.h>
  97#include <linux/interrupt.h>
  98#include <linux/timer.h>
  99#include <linux/pci.h>
 100#include <linux/spinlock.h>
 101#include <linux/init.h>
 102#include <linux/mii.h>
 103#include <linux/delay.h>
 104#include <linux/bitops.h>
 105
 106#include <asm/io.h>
 107#include <asm/uaccess.h>
 108#include <asm/irq.h>
 109
 110#include <linux/netdevice.h>
 111#include <linux/etherdevice.h>
 112#include <linux/rtnetlink.h>
 113#include <linux/skbuff.h>
 114#include <linux/ethtool.h>
 115
 116static int use_io;
 117static int debug = -1;
 118#define DEBUG_DEFAULT           (NETIF_MSG_DRV          | \
 119                                 NETIF_MSG_HW           | \
 120                                 NETIF_MSG_RX_ERR       | \
 121                                 NETIF_MSG_TX_ERR)
 122#define DEBUG                   ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
 123
 124
 125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
 126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
 127MODULE_LICENSE("GPL");
 128module_param(use_io, int, 0);
 129module_param(debug, int, 0);
 130module_param_array(options, int, NULL, 0);
 131module_param_array(full_duplex, int, NULL, 0);
 132module_param(congenb, int, 0);
 133module_param(txfifo, int, 0);
 134module_param(rxfifo, int, 0);
 135module_param(txdmacount, int, 0);
 136module_param(rxdmacount, int, 0);
 137module_param(rx_copybreak, int, 0);
 138module_param(max_interrupt_work, int, 0);
 139module_param(multicast_filter_limit, int, 0);
 140MODULE_PARM_DESC(debug, "debug level (0-6)");
 141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
 142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
 143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
 144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
 145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
 146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
 147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
 148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
 149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
 150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
 151
 152#define RUN_AT(x) (jiffies + (x))
 153
 154#define netdevice_start(dev)
 155#define netdevice_stop(dev)
 156#define netif_set_tx_timeout(dev, tf, tm) \
 157                                                                do { \
 158                                                                        (dev)->tx_timeout = (tf); \
 159                                                                        (dev)->watchdog_timeo = (tm); \
 160                                                                } while(0)
 161
 162
 163
 164/*
 165                                Theory of Operation
 166
 167I. Board Compatibility
 168
 169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
 170single-chip fast Ethernet controller for PCI, as used on the Intel
 171EtherExpress Pro 100 adapter.
 172
 173II. Board-specific settings
 174
 175PCI bus devices are configured by the system at boot time, so no jumpers
 176need to be set on the board.  The system BIOS should be set to assign the
 177PCI INTA signal to an otherwise unused system IRQ line.  While it's
 178possible to share PCI interrupt lines, it negatively impacts performance and
 179only recent kernels support it.
 180
 181III. Driver operation
 182
 183IIIA. General
 184The Speedo3 is very similar to other Intel network chips, that is to say
 185"apparently designed on a different planet".  This chips retains the complex
 186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
 187also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
 188Tx mode, but in a simplified lower-overhead manner: it associates only a
 189single buffer descriptor with each frame descriptor.
 190
 191Despite the extra space overhead in each receive skbuff, the driver must use
 192the simplified Rx buffer mode to assure that only a single data buffer is
 193associated with each RxFD. The driver implements this by reserving space
 194for the Rx descriptor at the head of each Rx skbuff.
 195
 196The Speedo-3 has receive and command unit base addresses that are added to
 197almost all descriptor pointers.  The driver sets these to zero, so that all
 198pointer fields are absolute addresses.
 199
 200The System Control Block (SCB) of some previous Intel chips exists on the
 201chip in both PCI I/O and memory space.  This driver uses the I/O space
 202registers, but might switch to memory mapped mode to better support non-x86
 203processors.
 204
 205IIIB. Transmit structure
 206
 207The driver must use the complex Tx command+descriptor mode in order to
 208have a indirect pointer to the skbuff data section.  Each Tx command block
 209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
 210(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
 211speedo_private data structure for each adapter instance.
 212
 213The newer i82558 explicitly supports this structure, and can read the two
 214TxBDs in the same PCI burst as the TxCB.
 215
 216This ring structure is used for all normal transmit packets, but the
 217transmit packet descriptors aren't long enough for most non-Tx commands such
 218as CmdConfigure.  This is complicated by the possibility that the chip has
 219already loaded the link address in the previous descriptor.  So for these
 220commands we convert the next free descriptor on the ring to a NoOp, and point
 221that descriptor's link to the complex command.
 222
 223An additional complexity of these non-transmit commands are that they may be
 224added asynchronous to the normal transmit queue, so we disable interrupts
 225whenever the Tx descriptor ring is manipulated.
 226
 227A notable aspect of these special configure commands is that they do
 228work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
 229is done at interrupt time using the 'dirty_tx' index, and checking for the
 230command-complete bit.  While the setup frames may have the NoOp command on the
 231Tx ring marked as complete, but not have completed the setup command, this
 232is not a problem.  The tx_ring entry can be still safely reused, as the
 233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
 234
 235Commands may have bits set e.g. CmdSuspend in the command word to either
 236suspend or stop the transmit/command unit.  This driver always flags the last
 237command with CmdSuspend, erases the CmdSuspend in the previous command, and
 238then issues a CU_RESUME.
 239Note: Watch out for the potential race condition here: imagine
 240        erasing the previous suspend
 241                the chip processes the previous command
 242                the chip processes the final command, and suspends
 243        doing the CU_RESUME
 244                the chip processes the next-yet-valid post-final-command.
 245So blindly sending a CU_RESUME is only safe if we do it immediately after
 246after erasing the previous CmdSuspend, without the possibility of an
 247intervening delay.  Thus the resume command is always within the
 248interrupts-disabled region.  This is a timing dependence, but handling this
 249condition in a timing-independent way would considerably complicate the code.
 250
 251Note: In previous generation Intel chips, restarting the command unit was a
 252notoriously slow process.  This is presumably no longer true.
 253
 254IIIC. Receive structure
 255
 256Because of the bus-master support on the Speedo3 this driver uses the new
 257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
 258This scheme allocates full-sized skbuffs as receive buffers.  The value
 259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
 260trade-off the memory wasted by passing the full-sized skbuff to the queue
 261layer for all frames vs. the copying cost of copying a frame to a
 262correctly-sized skbuff.
 263
 264For small frames the copying cost is negligible (esp. considering that we
 265are pre-loading the cache with immediately useful header information), so we
 266allocate a new, minimally-sized skbuff.  For large frames the copying cost
 267is non-trivial, and the larger copy might flush the cache of useful data, so
 268we pass up the skbuff the packet was received into.
 269
 270IV. Notes
 271
 272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
 273that stated that I could disclose the information.  But I still resent
 274having to sign an Intel NDA when I'm helping Intel sell their own product!
 275
 276*/
 277
 278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
 279
 280/* Offsets to the various registers.
 281   All accesses need not be longword aligned. */
 282enum speedo_offsets {
 283        SCBStatus = 0, SCBCmd = 2,      /* Rx/Command Unit command and status. */
 284        SCBIntmask = 3,
 285        SCBPointer = 4,                         /* General purpose pointer. */
 286        SCBPort = 8,                            /* Misc. commands and operands.  */
 287        SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
 288        SCBCtrlMDI = 16,                        /* MDI interface control. */
 289        SCBEarlyRx = 20,                        /* Early receive byte count. */
 290};
 291/* Commands that can be put in a command list entry. */
 292enum commands {
 293        CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
 294        CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
 295        CmdDump = 0x60000, CmdDiagnose = 0x70000,
 296        CmdSuspend = 0x40000000,        /* Suspend after completion. */
 297        CmdIntr = 0x20000000,           /* Interrupt after completion. */
 298        CmdTxFlex = 0x00080000,         /* Use "Flexible mode" for CmdTx command. */
 299};
 300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
 301   status bits.  Previous driver versions used separate 16 bit fields for
 302   commands and statuses.  --SAW
 303 */
 304#if defined(__alpha__)
 305# define clear_suspend(cmd)  clear_bit(30, &(cmd)->cmd_status);
 306#else
 307# define clear_suspend(cmd)  ((__le16 *)&(cmd)->cmd_status)[1] &= ~cpu_to_le16(1<<14)
 308#endif
 309
 310enum SCBCmdBits {
 311        SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
 312        SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
 313        SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
 314        /* The rest are Rx and Tx commands. */
 315        CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
 316        CUCmdBase=0x0060,       /* CU Base address (set to zero) . */
 317        CUDumpStats=0x0070, /* Dump then reset stats counters. */
 318        RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
 319        RxResumeNoResources=0x0007,
 320};
 321
 322enum SCBPort_cmds {
 323        PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
 324};
 325
 326/* The Speedo3 Rx and Tx frame/buffer descriptors. */
 327struct descriptor {                         /* A generic descriptor. */
 328        volatile __le32 cmd_status;     /* All command and status fields. */
 329        __le32 link;                                /* struct descriptor *  */
 330        unsigned char params[0];
 331};
 332
 333/* The Speedo3 Rx and Tx buffer descriptors. */
 334struct RxFD {                                   /* Receive frame descriptor. */
 335        volatile __le32 status;
 336        __le32 link;                                    /* struct RxFD * */
 337        __le32 rx_buf_addr;                     /* void * */
 338        __le32 count;
 339} RxFD_ALIGNMENT;
 340
 341/* Selected elements of the Tx/RxFD.status word. */
 342enum RxFD_bits {
 343        RxComplete=0x8000, RxOK=0x2000,
 344        RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
 345        RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
 346        TxUnderrun=0x1000,  StatusComplete=0x8000,
 347};
 348
 349#define CONFIG_DATA_SIZE 22
 350struct TxFD {                                   /* Transmit frame descriptor set. */
 351        __le32 status;
 352        __le32 link;                                    /* void * */
 353        __le32 tx_desc_addr;                    /* Always points to the tx_buf_addr element. */
 354        __le32 count;                                   /* # of TBD (=1), Tx start thresh., etc. */
 355        /* This constitutes two "TBD" entries -- we only use one. */
 356#define TX_DESCR_BUF_OFFSET 16
 357        __le32 tx_buf_addr0;                    /* void *, frame to be transmitted.  */
 358        __le32 tx_buf_size0;                    /* Length of Tx frame. */
 359        __le32 tx_buf_addr1;                    /* void *, frame to be transmitted.  */
 360        __le32 tx_buf_size1;                    /* Length of Tx frame. */
 361        /* the structure must have space for at least CONFIG_DATA_SIZE starting
 362         * from tx_desc_addr field */
 363};
 364
 365/* Multicast filter setting block.  --SAW */
 366struct speedo_mc_block {
 367        struct speedo_mc_block *next;
 368        unsigned int tx;
 369        dma_addr_t frame_dma;
 370        unsigned int len;
 371        struct descriptor frame __attribute__ ((__aligned__(16)));
 372};
 373
 374/* Elements of the dump_statistics block. This block must be lword aligned. */
 375struct speedo_stats {
 376        __le32 tx_good_frames;
 377        __le32 tx_coll16_errs;
 378        __le32 tx_late_colls;
 379        __le32 tx_underruns;
 380        __le32 tx_lost_carrier;
 381        __le32 tx_deferred;
 382        __le32 tx_one_colls;
 383        __le32 tx_multi_colls;
 384        __le32 tx_total_colls;
 385        __le32 rx_good_frames;
 386        __le32 rx_crc_errs;
 387        __le32 rx_align_errs;
 388        __le32 rx_resource_errs;
 389        __le32 rx_overrun_errs;
 390        __le32 rx_colls_errs;
 391        __le32 rx_runt_errs;
 392        __le32 done_marker;
 393};
 394
 395enum Rx_ring_state_bits {
 396        RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
 397};
 398
 399/* Do not change the position (alignment) of the first few elements!
 400   The later elements are grouped for cache locality.
 401
 402   Unfortunately, all the positions have been shifted since there.
 403   A new re-alignment is required.  2000/03/06  SAW */
 404struct speedo_private {
 405    void __iomem *regs;
 406        struct TxFD     *tx_ring;               /* Commands (usually CmdTxPacket). */
 407        struct RxFD *rx_ringp[RX_RING_SIZE];    /* Rx descriptor, used as ring. */
 408        /* The addresses of a Tx/Rx-in-place packets/buffers. */
 409        struct sk_buff *tx_skbuff[TX_RING_SIZE];
 410        struct sk_buff *rx_skbuff[RX_RING_SIZE];
 411        /* Mapped addresses of the rings. */
 412        dma_addr_t tx_ring_dma;
 413#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
 414        dma_addr_t rx_ring_dma[RX_RING_SIZE];
 415        struct descriptor *last_cmd;            /* Last command sent. */
 416        unsigned int cur_tx, dirty_tx;          /* The ring entries to be free()ed. */
 417        spinlock_t lock;                        /* Group with Tx control cache line. */
 418        u32 tx_threshold;                       /* The value for txdesc.count. */
 419        struct RxFD *last_rxf;                  /* Last filled RX buffer. */
 420        dma_addr_t last_rxf_dma;
 421        unsigned int cur_rx, dirty_rx;          /* The next free ring entry */
 422        long last_rx_time;                      /* Last Rx, in jiffies, to handle Rx hang. */
 423        struct net_device_stats stats;
 424        struct speedo_stats *lstats;
 425        dma_addr_t lstats_dma;
 426        int chip_id;
 427        struct pci_dev *pdev;
 428        struct timer_list timer;                /* Media selection timer. */
 429        struct speedo_mc_block *mc_setup_head;  /* Multicast setup frame list head. */
 430        struct speedo_mc_block *mc_setup_tail;  /* Multicast setup frame list tail. */
 431        long in_interrupt;                      /* Word-aligned dev->interrupt */
 432        unsigned char acpi_pwr;
 433        signed char rx_mode;                    /* Current PROMISC/ALLMULTI setting. */
 434        unsigned int tx_full:1;                 /* The Tx queue is full. */
 435        unsigned int flow_ctrl:1;               /* Use 802.3x flow control. */
 436        unsigned int rx_bug:1;                  /* Work around receiver hang errata. */
 437        unsigned char default_port:8;           /* Last dev->if_port value. */
 438        unsigned char rx_ring_state;            /* RX ring status flags. */
 439        unsigned short phy[2];                  /* PHY media interfaces available. */
 440        unsigned short partner;                 /* Link partner caps. */
 441        struct mii_if_info mii_if;              /* MII API hooks, info */
 442        u32 msg_enable;                         /* debug message level */
 443};
 444
 445/* The parameters for a CmdConfigure operation.
 446   There are so many options that it would be difficult to document each bit.
 447   We mostly use the default or recommended settings. */
 448static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
 449        22, 0x08, 0, 0,  0, 0, 0x32, 0x03,  1, /* 1=Use MII  0=Use AUI */
 450        0, 0x2E, 0,  0x60, 0,
 451        0xf2, 0x48,   0, 0x40, 0xf2, 0x80,              /* 0x40=Force full-duplex */
 452        0x3f, 0x05, };
 453static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
 454        22, 0x08, 0, 1,  0, 0, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
 455        0, 0x2E, 0,  0x60, 0x08, 0x88,
 456        0x68, 0, 0x40, 0xf2, 0x84,              /* Disable FC */
 457        0x31, 0x05, };
 458
 459/* PHY media interface chips. */
 460static const char * const phys[] = {
 461        "None", "i82553-A/B", "i82553-C", "i82503",
 462        "DP83840", "80c240", "80c24", "i82555",
 463        "unknown-8", "unknown-9", "DP83840A", "unknown-11",
 464        "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
 465enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
 466                                         S80C24, I82555, DP83840A=10, };
 467static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
 468#define EE_READ_CMD             (6)
 469
 470static int eepro100_init_one(struct pci_dev *pdev,
 471                const struct pci_device_id *ent);
 472
 473static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
 474static int mdio_read(struct net_device *dev, int phy_id, int location);
 475static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 476static int speedo_open(struct net_device *dev);
 477static void speedo_resume(struct net_device *dev);
 478static void speedo_timer(unsigned long data);
 479static void speedo_init_rx_ring(struct net_device *dev);
 480static void speedo_tx_timeout(struct net_device *dev);
 481static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
 482static void speedo_refill_rx_buffers(struct net_device *dev, int force);
 483static int speedo_rx(struct net_device *dev);
 484static void speedo_tx_buffer_gc(struct net_device *dev);
 485static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
 486static int speedo_close(struct net_device *dev);
 487static struct net_device_stats *speedo_get_stats(struct net_device *dev);
 488static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 489static void set_rx_mode(struct net_device *dev);
 490static void speedo_show_state(struct net_device *dev);
 491static const struct ethtool_ops ethtool_ops;
 492
 493
 494
 495#ifdef honor_default_port
 496/* Optional driver feature to allow forcing the transceiver setting.
 497   Not recommended. */
 498static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
 499                                                   0x2000, 0x2100, 0x0400, 0x3100};
 500#endif
 501
 502/* How to wait for the command unit to accept a command.
 503   Typically this takes 0 ticks. */
 504static inline unsigned char wait_for_cmd_done(struct net_device *dev,
 505                                                                                                struct speedo_private *sp)
 506{
 507        int wait = 1000;
 508        void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
 509        unsigned char r;
 510
 511        do  {
 512                udelay(1);
 513                r = ioread8(cmd_ioaddr);
 514        } while(r && --wait >= 0);
 515
 516        if (wait < 0)
 517                printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
 518        return r;
 519}
 520
 521static int __devinit eepro100_init_one (struct pci_dev *pdev,
 522                const struct pci_device_id *ent)
 523{
 524        void __iomem *ioaddr;
 525        int irq, pci_bar;
 526        int acpi_idle_state = 0, pm;
 527        static int cards_found /* = 0 */;
 528        unsigned long pci_base;
 529
 530#ifndef MODULE
 531        /* when built-in, we only print version if device is found */
 532        static int did_version;
 533        if (did_version++ == 0)
 534                printk(version);
 535#endif
 536
 537        /* save power state before pci_enable_device overwrites it */
 538        pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
 539        if (pm) {
 540                u16 pwr_command;
 541                pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
 542                acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
 543        }
 544
 545        if (pci_enable_device(pdev))
 546                goto err_out_free_mmio_region;
 547
 548        pci_set_master(pdev);
 549
 550        if (!request_region(pci_resource_start(pdev, 1),
 551                        pci_resource_len(pdev, 1), "eepro100")) {
 552                dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
 553                goto err_out_none;
 554        }
 555        if (!request_mem_region(pci_resource_start(pdev, 0),
 556                        pci_resource_len(pdev, 0), "eepro100")) {
 557                dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
 558                goto err_out_free_pio_region;
 559        }
 560
 561        irq = pdev->irq;
 562        pci_bar = use_io ? 1 : 0;
 563        pci_base = pci_resource_start(pdev, pci_bar);
 564        if (DEBUG & NETIF_MSG_PROBE)
 565                printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
 566                       pci_base, irq);
 567
 568        ioaddr = pci_iomap(pdev, pci_bar, 0);
 569        if (!ioaddr) {
 570                dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
 571                goto err_out_free_mmio_region;
 572        }
 573
 574        if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
 575                cards_found++;
 576        else
 577                goto err_out_iounmap;
 578
 579        return 0;
 580
 581err_out_iounmap: ;
 582        pci_iounmap(pdev, ioaddr);
 583err_out_free_mmio_region:
 584        release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
 585err_out_free_pio_region:
 586        release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
 587err_out_none:
 588        return -ENODEV;
 589}
 590
 591#ifdef CONFIG_NET_POLL_CONTROLLER
 592/*
 593 * Polling 'interrupt' - used by things like netconsole to send skbs
 594 * without having to re-enable interrupts. It's not called while
 595 * the interrupt routine is executing.
 596 */
 597
 598static void poll_speedo (struct net_device *dev)
 599{
 600        /* disable_irq is not very nice, but with the funny lockless design
 601           we have no other choice. */
 602        disable_irq(dev->irq);
 603        speedo_interrupt (dev->irq, dev);
 604        enable_irq(dev->irq);
 605}
 606#endif
 607
 608static int __devinit speedo_found1(struct pci_dev *pdev,
 609                void __iomem *ioaddr, int card_idx, int acpi_idle_state)
 610{
 611        struct net_device *dev;
 612        struct speedo_private *sp;
 613        const char *product;
 614        int i, option;
 615        u16 eeprom[0x100];
 616        int size;
 617        void *tx_ring_space;
 618        dma_addr_t tx_ring_dma;
 619        DECLARE_MAC_BUF(mac);
 620
 621        size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
 622        tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
 623        if (tx_ring_space == NULL)
 624                return -1;
 625
 626        dev = alloc_etherdev(sizeof(struct speedo_private));
 627        if (dev == NULL) {
 628                printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
 629                pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
 630                return -1;
 631        }
 632
 633        SET_NETDEV_DEV(dev, &pdev->dev);
 634
 635        if (dev->mem_start > 0)
 636                option = dev->mem_start;
 637        else if (card_idx >= 0  &&  options[card_idx] >= 0)
 638                option = options[card_idx];
 639        else
 640                option = 0;
 641
 642        rtnl_lock();
 643        if (dev_alloc_name(dev, dev->name) < 0)
 644                goto err_free_unlock;
 645
 646        /* Read the station address EEPROM before doing the reset.
 647           Nominally his should even be done before accepting the device, but
 648           then we wouldn't have a device name with which to report the error.
 649           The size test is for 6 bit vs. 8 bit address serial EEPROMs.
 650        */
 651        {
 652                void __iomem *iobase;
 653                int read_cmd, ee_size;
 654                u16 sum;
 655                int j;
 656
 657                /* Use IO only to avoid postponed writes and satisfy EEPROM timing
 658                   requirements. */
 659                iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
 660                if (!iobase)
 661                        goto err_free_unlock;
 662                if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
 663                        == 0xffe0000) {
 664                        ee_size = 0x100;
 665                        read_cmd = EE_READ_CMD << 24;
 666                } else {
 667                        ee_size = 0x40;
 668                        read_cmd = EE_READ_CMD << 22;
 669                }
 670
 671                for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
 672                        u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
 673                        eeprom[i] = value;
 674                        sum += value;
 675                        if (i < 3) {
 676                                dev->dev_addr[j++] = value;
 677                                dev->dev_addr[j++] = value >> 8;
 678                        }
 679                }
 680                if (sum != 0xBABA)
 681                        printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
 682                                   "check settings before activating this device!\n",
 683                                   dev->name, sum);
 684                /* Don't  unregister_netdev(dev);  as the EEPro may actually be
 685                   usable, especially if the MAC address is set later.
 686                   On the other hand, it may be unusable if MDI data is corrupted. */
 687
 688                pci_iounmap(pdev, iobase);
 689        }
 690
 691        /* Reset the chip: stop Tx and Rx processes and clear counters.
 692           This takes less than 10usec and will easily finish before the next
 693           action. */
 694        iowrite32(PortReset, ioaddr + SCBPort);
 695        ioread32(ioaddr + SCBPort);
 696        udelay(10);
 697
 698        if (eeprom[3] & 0x0100)
 699                product = "OEM i82557/i82558 10/100 Ethernet";
 700        else
 701                product = pci_name(pdev);
 702
 703        printk(KERN_INFO "%s: %s, %s, IRQ %d.\n", dev->name, product,
 704                   print_mac(mac, dev->dev_addr), pdev->irq);
 705
 706        sp = netdev_priv(dev);
 707
 708        /* we must initialize this early, for mdio_{read,write} */
 709        sp->regs = ioaddr;
 710
 711#if 1 || defined(kernel_bloat)
 712        /* OK, this is pure kernel bloat.  I don't like it when other drivers
 713           waste non-pageable kernel space to emit similar messages, but I need
 714           them for bug reports. */
 715        {
 716                const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
 717                /* The self-test results must be paragraph aligned. */
 718                volatile s32 *self_test_results;
 719                int boguscnt = 16000;   /* Timeout for set-test. */
 720                if ((eeprom[3] & 0x03) != 0x03)
 721                        printk(KERN_INFO "  Receiver lock-up bug exists -- enabling"
 722                                   " work-around.\n");
 723                printk(KERN_INFO "  Board assembly %4.4x%2.2x-%3.3d, Physical"
 724                           " connectors present:",
 725                           eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
 726                for (i = 0; i < 4; i++)
 727                        if (eeprom[5] & (1<<i))
 728                                printk(connectors[i]);
 729                printk("\n"KERN_INFO"  Primary interface chip %s PHY #%d.\n",
 730                           phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
 731                if (eeprom[7] & 0x0700)
 732                        printk(KERN_INFO "    Secondary interface chip %s.\n",
 733                                   phys[(eeprom[7]>>8)&7]);
 734                if (((eeprom[6]>>8) & 0x3f) == DP83840
 735                        ||  ((eeprom[6]>>8) & 0x3f) == DP83840A) {
 736                        int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
 737                        if (congenb)
 738                          mdi_reg23 |= 0x0100;
 739                        printk(KERN_INFO"  DP83840 specific setup, setting register 23 to %4.4x.\n",
 740                                   mdi_reg23);
 741                        mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
 742                }
 743                if ((option >= 0) && (option & 0x70)) {
 744                        printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
 745                                   (option & 0x20 ? 100 : 10),
 746                                   (option & 0x10 ? "full" : "half"));
 747                        mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
 748                                           ((option & 0x20) ? 0x2000 : 0) |     /* 100mbps? */
 749                                           ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
 750                }
 751
 752                /* Perform a system self-test. */
 753                self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
 754                self_test_results[0] = 0;
 755                self_test_results[1] = -1;
 756                iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
 757                do {
 758                        udelay(10);
 759                } while (self_test_results[1] == -1  &&  --boguscnt >= 0);
 760
 761                if (boguscnt < 0) {             /* Test optimized out. */
 762                        printk(KERN_ERR "Self test failed, status %8.8x:\n"
 763                                   KERN_ERR " Failure to initialize the i82557.\n"
 764                                   KERN_ERR " Verify that the card is a bus-master"
 765                                   " capable slot.\n",
 766                                   self_test_results[1]);
 767                } else
 768                        printk(KERN_INFO "  General self-test: %s.\n"
 769                                   KERN_INFO "  Serial sub-system self-test: %s.\n"
 770                                   KERN_INFO "  Internal registers self-test: %s.\n"
 771                                   KERN_INFO "  ROM checksum self-test: %s (%#8.8x).\n",
 772                                   self_test_results[1] & 0x1000 ? "failed" : "passed",
 773                                   self_test_results[1] & 0x0020 ? "failed" : "passed",
 774                                   self_test_results[1] & 0x0008 ? "failed" : "passed",
 775                                   self_test_results[1] & 0x0004 ? "failed" : "passed",
 776                                   self_test_results[0]);
 777        }
 778#endif  /* kernel_bloat */
 779
 780        iowrite32(PortReset, ioaddr + SCBPort);
 781        ioread32(ioaddr + SCBPort);
 782        udelay(10);
 783
 784        /* Return the chip to its original power state. */
 785        pci_set_power_state(pdev, acpi_idle_state);
 786
 787        pci_set_drvdata (pdev, dev);
 788        SET_NETDEV_DEV(dev, &pdev->dev);
 789
 790        dev->irq = pdev->irq;
 791
 792        sp->pdev = pdev;
 793        sp->msg_enable = DEBUG;
 794        sp->acpi_pwr = acpi_idle_state;
 795        sp->tx_ring = tx_ring_space;
 796        sp->tx_ring_dma = tx_ring_dma;
 797        sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
 798        sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
 799        init_timer(&sp->timer); /* used in ioctl() */
 800        spin_lock_init(&sp->lock);
 801
 802        sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
 803        if (card_idx >= 0) {
 804                if (full_duplex[card_idx] >= 0)
 805                        sp->mii_if.full_duplex = full_duplex[card_idx];
 806        }
 807        sp->default_port = option >= 0 ? (option & 0x0f) : 0;
 808
 809        sp->phy[0] = eeprom[6];
 810        sp->phy[1] = eeprom[7];
 811
 812        sp->mii_if.phy_id = eeprom[6] & 0x1f;
 813        sp->mii_if.phy_id_mask = 0x1f;
 814        sp->mii_if.reg_num_mask = 0x1f;
 815        sp->mii_if.dev = dev;
 816        sp->mii_if.mdio_read = mdio_read;
 817        sp->mii_if.mdio_write = mdio_write;
 818
 819        sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
 820        if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
 821            || (pdev->device == 0x2449) || (pdev->device == 0x2459)
 822            || (pdev->device == 0x245D)) {
 823                sp->chip_id = 1;
 824        }
 825
 826        if (sp->rx_bug)
 827                printk(KERN_INFO "  Receiver lock-up workaround activated.\n");
 828
 829        /* The Speedo-specific entries in the device structure. */
 830        dev->open = &speedo_open;
 831        dev->hard_start_xmit = &speedo_start_xmit;
 832        netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
 833        dev->stop = &speedo_close;
 834        dev->get_stats = &speedo_get_stats;
 835        dev->set_multicast_list = &set_rx_mode;
 836        dev->do_ioctl = &speedo_ioctl;
 837        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 838#ifdef CONFIG_NET_POLL_CONTROLLER
 839        dev->poll_controller = &poll_speedo;
 840#endif
 841
 842        if (register_netdevice(dev))
 843                goto err_free_unlock;
 844        rtnl_unlock();
 845
 846        return 0;
 847
 848 err_free_unlock:
 849        rtnl_unlock();
 850        free_netdev(dev);
 851        return -1;
 852}
 853
 854static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
 855{
 856        void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
 857        int wait = 0;
 858        do
 859                if (ioread8(cmd_ioaddr) == 0) break;
 860        while(++wait <= 200);
 861        if (wait > 100)
 862                printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
 863                       ioread8(cmd_ioaddr), wait);
 864
 865        iowrite8(cmd, cmd_ioaddr);
 866
 867        for (wait = 0; wait <= 100; wait++)
 868                if (ioread8(cmd_ioaddr) == 0) return;
 869        for (; wait <= 20000; wait++)
 870                if (ioread8(cmd_ioaddr) == 0) return;
 871                else udelay(1);
 872        printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
 873               "  Current status %8.8x.\n",
 874               cmd, wait, ioread32(sp->regs + SCBStatus));
 875}
 876
 877/* Serial EEPROM section.
 878   A "bit" grungy, but we work our way through bit-by-bit :->. */
 879/*  EEPROM_Ctrl bits. */
 880#define EE_SHIFT_CLK    0x01    /* EEPROM shift clock. */
 881#define EE_CS                   0x02    /* EEPROM chip select. */
 882#define EE_DATA_WRITE   0x04    /* EEPROM chip data in. */
 883#define EE_DATA_READ    0x08    /* EEPROM chip data out. */
 884#define EE_ENB                  (0x4800 | EE_CS)
 885#define EE_WRITE_0              0x4802
 886#define EE_WRITE_1              0x4806
 887#define EE_OFFSET               SCBeeprom
 888
 889/* The fixes for the code were kindly provided by Dragan Stancevic
 890   <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
 891   access timing.
 892   The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
 893   interval for serial EEPROM.  However, it looks like that there is an
 894   additional requirement dictating larger udelay's in the code below.
 895   2000/05/24  SAW */
 896static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
 897{
 898        unsigned retval = 0;
 899        void __iomem *ee_addr = ioaddr + SCBeeprom;
 900
 901        iowrite16(EE_ENB, ee_addr); udelay(2);
 902        iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
 903
 904        /* Shift the command bits out. */
 905        do {
 906                short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
 907                iowrite16(dataval, ee_addr); udelay(2);
 908                iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
 909                retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
 910        } while (--cmd_len >= 0);
 911        iowrite16(EE_ENB, ee_addr); udelay(2);
 912
 913        /* Terminate the EEPROM access. */
 914        iowrite16(EE_ENB & ~EE_CS, ee_addr);
 915        return retval;
 916}
 917
 918static int mdio_read(struct net_device *dev, int phy_id, int location)
 919{
 920        struct speedo_private *sp = netdev_priv(dev);
 921        void __iomem *ioaddr = sp->regs;
 922        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
 923        iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
 924        do {
 925                val = ioread32(ioaddr + SCBCtrlMDI);
 926                if (--boguscnt < 0) {
 927                        printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
 928                        break;
 929                }
 930        } while (! (val & 0x10000000));
 931        return val & 0xffff;
 932}
 933
 934static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 935{
 936        struct speedo_private *sp = netdev_priv(dev);
 937        void __iomem *ioaddr = sp->regs;
 938        int val, boguscnt = 64*10;              /* <64 usec. to complete, typ 27 ticks */
 939        iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
 940                 ioaddr + SCBCtrlMDI);
 941        do {
 942                val = ioread32(ioaddr + SCBCtrlMDI);
 943                if (--boguscnt < 0) {
 944                        printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
 945                        break;
 946                }
 947        } while (! (val & 0x10000000));
 948}
 949
 950static int
 951speedo_open(struct net_device *dev)
 952{
 953        struct speedo_private *sp = netdev_priv(dev);
 954        void __iomem *ioaddr = sp->regs;
 955        int retval;
 956
 957        if (netif_msg_ifup(sp))
 958                printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
 959
 960        pci_set_power_state(sp->pdev, PCI_D0);
 961
 962        /* Set up the Tx queue early.. */
 963        sp->cur_tx = 0;
 964        sp->dirty_tx = 0;
 965        sp->last_cmd = NULL;
 966        sp->tx_full = 0;
 967        sp->in_interrupt = 0;
 968
 969        /* .. we can safely take handler calls during init. */
 970        retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
 971        if (retval) {
 972                return retval;
 973        }
 974
 975        dev->if_port = sp->default_port;
 976
 977#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
 978        /* Retrigger negotiation to reset previous errors. */
 979        if ((sp->phy[0] & 0x8000) == 0) {
 980                int phy_addr = sp->phy[0] & 0x1f ;
 981                /* Use 0x3300 for restarting NWay, other values to force xcvr:
 982                   0x0000 10-HD
 983                   0x0100 10-FD
 984                   0x2000 100-HD
 985                   0x2100 100-FD
 986                */
 987#ifdef honor_default_port
 988                mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
 989#else
 990                mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
 991#endif
 992        }
 993#endif
 994
 995        speedo_init_rx_ring(dev);
 996
 997        /* Fire up the hardware. */
 998        iowrite16(SCBMaskAll, ioaddr + SCBCmd);
 999        speedo_resume(dev);
1000
1001        netdevice_start(dev);
1002        netif_start_queue(dev);
1003
1004        /* Setup the chip and configure the multicast list. */
1005        sp->mc_setup_head = NULL;
1006        sp->mc_setup_tail = NULL;
1007        sp->flow_ctrl = sp->partner = 0;
1008        sp->rx_mode = -1;                       /* Invalid -> always reset the mode. */
1009        set_rx_mode(dev);
1010        if ((sp->phy[0] & 0x8000) == 0)
1011                sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1012
1013        mii_check_link(&sp->mii_if);
1014
1015        if (netif_msg_ifup(sp)) {
1016                printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1017                           dev->name, ioread16(ioaddr + SCBStatus));
1018        }
1019
1020        /* Set the timer.  The timer serves a dual purpose:
1021           1) to monitor the media interface (e.g. link beat) and perhaps switch
1022           to an alternate media type
1023           2) to monitor Rx activity, and restart the Rx process if the receiver
1024           hangs. */
1025        sp->timer.expires = RUN_AT((24*HZ)/10);                         /* 2.4 sec. */
1026        sp->timer.data = (unsigned long)dev;
1027        sp->timer.function = &speedo_timer;                                     /* timer handler */
1028        add_timer(&sp->timer);
1029
1030        /* No need to wait for the command unit to accept here. */
1031        if ((sp->phy[0] & 0x8000) == 0)
1032                mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1033
1034        return 0;
1035}
1036
1037/* Start the chip hardware after a full reset. */
1038static void speedo_resume(struct net_device *dev)
1039{
1040        struct speedo_private *sp = netdev_priv(dev);
1041        void __iomem *ioaddr = sp->regs;
1042
1043        /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1044        sp->tx_threshold = 0x01208000;
1045
1046        /* Set the segment registers to '0'. */
1047        if (wait_for_cmd_done(dev, sp) != 0) {
1048                iowrite32(PortPartialReset, ioaddr + SCBPort);
1049                udelay(10);
1050        }
1051
1052        iowrite32(0, ioaddr + SCBPointer);
1053        ioread32(ioaddr + SCBPointer);                  /* Flush to PCI. */
1054        udelay(10);                     /* Bogus, but it avoids the bug. */
1055
1056        /* Note: these next two operations can take a while. */
1057        do_slow_command(dev, sp, RxAddrLoad);
1058        do_slow_command(dev, sp, CUCmdBase);
1059
1060        /* Load the statistics block and rx ring addresses. */
1061        iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1062        ioread32(ioaddr + SCBPointer);                  /* Flush to PCI */
1063
1064        iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1065        sp->lstats->done_marker = 0;
1066        wait_for_cmd_done(dev, sp);
1067
1068        if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1069                if (netif_msg_rx_err(sp))
1070                        printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1071                                        dev->name);
1072        } else {
1073                iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1074                         ioaddr + SCBPointer);
1075                ioread32(ioaddr + SCBPointer);          /* Flush to PCI */
1076        }
1077
1078        /* Note: RxStart should complete instantly. */
1079        do_slow_command(dev, sp, RxStart);
1080        do_slow_command(dev, sp, CUDumpStats);
1081
1082        /* Fill the first command with our physical address. */
1083        {
1084                struct descriptor *ias_cmd;
1085
1086                ias_cmd =
1087                        (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1088                /* Avoid a bug(?!) here by marking the command already completed. */
1089                ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1090                ias_cmd->link =
1091                        cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1092                memcpy(ias_cmd->params, dev->dev_addr, 6);
1093                if (sp->last_cmd)
1094                        clear_suspend(sp->last_cmd);
1095                sp->last_cmd = ias_cmd;
1096        }
1097
1098        /* Start the chip's Tx process and unmask interrupts. */
1099        iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1100                 ioaddr + SCBPointer);
1101        /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1102           remain masked --Dragan */
1103        iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1104}
1105
1106/*
1107 * Sometimes the receiver stops making progress.  This routine knows how to
1108 * get it going again, without losing packets or being otherwise nasty like
1109 * a chip reset would be.  Previously the driver had a whole sequence
1110 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1111 * do another, etc.  But those things don't really matter.  Separate logic
1112 * in the ISR provides for allocating buffers--the other half of operation
1113 * is just making sure the receiver is active.  speedo_rx_soft_reset does that.
1114 * This problem with the old, more involved algorithm is shown up under
1115 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1116 */
1117static void
1118speedo_rx_soft_reset(struct net_device *dev)
1119{
1120        struct speedo_private *sp = netdev_priv(dev);
1121        struct RxFD *rfd;
1122        void __iomem *ioaddr;
1123
1124        ioaddr = sp->regs;
1125        if (wait_for_cmd_done(dev, sp) != 0) {
1126                printk("%s: previous command stalled\n", dev->name);
1127                return;
1128        }
1129        /*
1130        * Put the hardware into a known state.
1131        */
1132        iowrite8(RxAbort, ioaddr + SCBCmd);
1133
1134        rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1135
1136        rfd->rx_buf_addr = cpu_to_le32(0xffffffff);
1137
1138        if (wait_for_cmd_done(dev, sp) != 0) {
1139                printk("%s: RxAbort command stalled\n", dev->name);
1140                return;
1141        }
1142        iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1143                ioaddr + SCBPointer);
1144        iowrite8(RxStart, ioaddr + SCBCmd);
1145}
1146
1147
1148/* Media monitoring and control. */
1149static void speedo_timer(unsigned long data)
1150{
1151        struct net_device *dev = (struct net_device *)data;
1152        struct speedo_private *sp = netdev_priv(dev);
1153        void __iomem *ioaddr = sp->regs;
1154        int phy_num = sp->phy[0] & 0x1f;
1155
1156        /* We have MII and lost link beat. */
1157        if ((sp->phy[0] & 0x8000) == 0) {
1158                int partner = mdio_read(dev, phy_num, MII_LPA);
1159                if (partner != sp->partner) {
1160                        int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1161                        if (netif_msg_link(sp)) {
1162                                printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1163                                printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1164                                           dev->name, sp->partner, partner, sp->mii_if.advertising);
1165                        }
1166                        sp->partner = partner;
1167                        if (flow_ctrl != sp->flow_ctrl) {
1168                                sp->flow_ctrl = flow_ctrl;
1169                                sp->rx_mode = -1;       /* Trigger a reload. */
1170                        }
1171                }
1172        }
1173        mii_check_link(&sp->mii_if);
1174        if (netif_msg_timer(sp)) {
1175                printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1176                           dev->name, ioread16(ioaddr + SCBStatus));
1177        }
1178        if (sp->rx_mode < 0  ||
1179                (sp->rx_bug  && jiffies - sp->last_rx_time > 2*HZ)) {
1180                /* We haven't received a packet in a Long Time.  We might have been
1181                   bitten by the receiver hang bug.  This can be cleared by sending
1182                   a set multicast list command. */
1183                if (netif_msg_timer(sp))
1184                        printk(KERN_DEBUG "%s: Sending a multicast list set command"
1185                                   " from a timer routine,"
1186                                   " m=%d, j=%ld, l=%ld.\n",
1187                                   dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1188                set_rx_mode(dev);
1189        }
1190        /* We must continue to monitor the media. */
1191        sp->timer.expires = RUN_AT(2*HZ);                       /* 2.0 sec. */
1192        add_timer(&sp->timer);
1193}
1194
1195static void speedo_show_state(struct net_device *dev)
1196{
1197        struct speedo_private *sp = netdev_priv(dev);
1198        int i;
1199
1200        if (netif_msg_pktdata(sp)) {
1201                printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %u / %u:\n",
1202                    dev->name, sp->cur_tx, sp->dirty_tx);
1203                for (i = 0; i < TX_RING_SIZE; i++)
1204                        printk(KERN_DEBUG "%s:  %c%c%2d %8.8x.\n", dev->name,
1205                            i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1206                            i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1207                            i, sp->tx_ring[i].status);
1208
1209                printk(KERN_DEBUG "%s: Printing Rx ring"
1210                    " (next to receive into %u, dirty index %u).\n",
1211                    dev->name, sp->cur_rx, sp->dirty_rx);
1212                for (i = 0; i < RX_RING_SIZE; i++)
1213                        printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1214                            sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1215                            i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1216                            i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1217                            i, (sp->rx_ringp[i] != NULL) ?
1218                            (unsigned)sp->rx_ringp[i]->status : 0);
1219        }
1220
1221#if 0
1222        {
1223                void __iomem *ioaddr = sp->regs;
1224                int phy_num = sp->phy[0] & 0x1f;
1225                for (i = 0; i < 16; i++) {
1226                        /* FIXME: what does it mean?  --SAW */
1227                        if (i == 6) i = 21;
1228                        printk(KERN_DEBUG "%s:  PHY index %d register %d is %4.4x.\n",
1229                                   dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1230                }
1231        }
1232#endif
1233
1234}
1235
1236/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1237static void
1238speedo_init_rx_ring(struct net_device *dev)
1239{
1240        struct speedo_private *sp = netdev_priv(dev);
1241        struct RxFD *rxf, *last_rxf = NULL;
1242        dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1243        int i;
1244
1245        sp->cur_rx = 0;
1246
1247        for (i = 0; i < RX_RING_SIZE; i++) {
1248                struct sk_buff *skb;
1249                skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1250                if (skb)
1251                        rx_align(skb);        /* Align IP on 16 byte boundary */
1252                sp->rx_skbuff[i] = skb;
1253                if (skb == NULL)
1254                        break;                  /* OK.  Just initially short of Rx bufs. */
1255                skb->dev = dev;                 /* Mark as being used by this device. */
1256                rxf = (struct RxFD *)skb->data;
1257                sp->rx_ringp[i] = rxf;
1258                sp->rx_ring_dma[i] =
1259                        pci_map_single(sp->pdev, rxf,
1260                                        PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1261                skb_reserve(skb, sizeof(struct RxFD));
1262                if (last_rxf) {
1263                        last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1264                        pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1265                                                                                   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1266                }
1267                last_rxf = rxf;
1268                last_rxf_dma = sp->rx_ring_dma[i];
1269                rxf->status = cpu_to_le32(0x00000001);  /* '1' is flag value only. */
1270                rxf->link = 0;                                          /* None yet. */
1271                /* This field unused by i82557. */
1272                rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1273                rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1274                pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1275                                                                           sizeof(struct RxFD), PCI_DMA_TODEVICE);
1276        }
1277        sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1278        /* Mark the last entry as end-of-list. */
1279        last_rxf->status = cpu_to_le32(0xC0000002);     /* '2' is flag value only. */
1280        pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1281                                                                   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1282        sp->last_rxf = last_rxf;
1283        sp->last_rxf_dma = last_rxf_dma;
1284}
1285
1286static void speedo_purge_tx(struct net_device *dev)
1287{
1288        struct speedo_private *sp = netdev_priv(dev);
1289        int entry;
1290
1291        while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1292                entry = sp->dirty_tx % TX_RING_SIZE;
1293                if (sp->tx_skbuff[entry]) {
1294                        sp->stats.tx_errors++;
1295                        pci_unmap_single(sp->pdev,
1296                                        le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1297                                        sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1298                        dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1299                        sp->tx_skbuff[entry] = NULL;
1300                }
1301                sp->dirty_tx++;
1302        }
1303        while (sp->mc_setup_head != NULL) {
1304                struct speedo_mc_block *t;
1305                if (netif_msg_tx_err(sp))
1306                        printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1307                pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1308                                sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1309                t = sp->mc_setup_head->next;
1310                kfree(sp->mc_setup_head);
1311                sp->mc_setup_head = t;
1312        }
1313        sp->mc_setup_tail = NULL;
1314        sp->tx_full = 0;
1315        netif_wake_queue(dev);
1316}
1317
1318static void reset_mii(struct net_device *dev)
1319{
1320        struct speedo_private *sp = netdev_priv(dev);
1321
1322        /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1323        if ((sp->phy[0] & 0x8000) == 0) {
1324                int phy_addr = sp->phy[0] & 0x1f;
1325                int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1326                int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1327                mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1328                mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1329                mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1330                mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1331#ifdef honor_default_port
1332                mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1333#else
1334                mdio_read(dev, phy_addr, MII_BMCR);
1335                mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1336                mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1337#endif
1338        }
1339}
1340
1341static void speedo_tx_timeout(struct net_device *dev)
1342{
1343        struct speedo_private *sp = netdev_priv(dev);
1344        void __iomem *ioaddr = sp->regs;
1345        int status = ioread16(ioaddr + SCBStatus);
1346        unsigned long flags;
1347
1348        if (netif_msg_tx_err(sp)) {
1349                printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1350                   " %4.4x at %d/%d command %8.8x.\n",
1351                   dev->name, status, ioread16(ioaddr + SCBCmd),
1352                   sp->dirty_tx, sp->cur_tx,
1353                   sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1354
1355        }
1356        speedo_show_state(dev);
1357#if 0
1358        if ((status & 0x00C0) != 0x0080
1359                &&  (status & 0x003C) == 0x0010) {
1360                /* Only the command unit has stopped. */
1361                printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1362                           dev->name);
1363                iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1364                         ioaddr + SCBPointer);
1365                iowrite16(CUStart, ioaddr + SCBCmd);
1366                reset_mii(dev);
1367        } else {
1368#else
1369        {
1370#endif
1371                del_timer_sync(&sp->timer);
1372                /* Reset the Tx and Rx units. */
1373                iowrite32(PortReset, ioaddr + SCBPort);
1374                /* We may get spurious interrupts here.  But I don't think that they
1375                   may do much harm.  1999/12/09 SAW */
1376                udelay(10);
1377                /* Disable interrupts. */
1378                iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1379                synchronize_irq(dev->irq);
1380                speedo_tx_buffer_gc(dev);
1381                /* Free as much as possible.
1382                   It helps to recover from a hang because of out-of-memory.
1383                   It also simplifies speedo_resume() in case TX ring is full or
1384                   close-to-be full. */
1385                speedo_purge_tx(dev);
1386                speedo_refill_rx_buffers(dev, 1);
1387                spin_lock_irqsave(&sp->lock, flags);
1388                speedo_resume(dev);
1389                sp->rx_mode = -1;
1390                dev->trans_start = jiffies;
1391                spin_unlock_irqrestore(&sp->lock, flags);
1392                set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1393                /* Reset MII transceiver.  Do it before starting the timer to serialize
1394                   mdio_xxx operations.  Yes, it's a paranoya :-)  2000/05/09 SAW */
1395                reset_mii(dev);
1396                sp->timer.expires = RUN_AT(2*HZ);
1397                add_timer(&sp->timer);
1398        }
1399        return;
1400}
1401
1402static int
1403speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1404{
1405        struct speedo_private *sp = netdev_priv(dev);
1406        void __iomem *ioaddr = sp->regs;
1407        int entry;
1408
1409        /* Prevent interrupts from changing the Tx ring from underneath us. */
1410        unsigned long flags;
1411
1412        spin_lock_irqsave(&sp->lock, flags);
1413
1414        /* Check if there are enough space. */
1415        if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1416                printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1417                netif_stop_queue(dev);
1418                sp->tx_full = 1;
1419                spin_unlock_irqrestore(&sp->lock, flags);
1420                return 1;
1421        }
1422
1423        /* Calculate the Tx descriptor entry. */
1424        entry = sp->cur_tx++ % TX_RING_SIZE;
1425
1426        sp->tx_skbuff[entry] = skb;
1427        sp->tx_ring[entry].status =
1428                cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1429        if (!(entry & ((TX_RING_SIZE>>2)-1)))
1430                sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1431        sp->tx_ring[entry].link =
1432                cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1433        sp->tx_ring[entry].tx_desc_addr =
1434                cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1435        /* The data region is always in one buffer descriptor. */
1436        sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1437        sp->tx_ring[entry].tx_buf_addr0 =
1438                cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1439                                           skb->len, PCI_DMA_TODEVICE));
1440        sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1441
1442        /* workaround for hardware bug on 10 mbit half duplex */
1443
1444        if ((sp->partner == 0) && (sp->chip_id == 1)) {
1445                wait_for_cmd_done(dev, sp);
1446                iowrite8(0 , ioaddr + SCBCmd);
1447                udelay(1);
1448        }
1449
1450        /* Trigger the command unit resume. */
1451        wait_for_cmd_done(dev, sp);
1452        clear_suspend(sp->last_cmd);
1453        /* We want the time window between clearing suspend flag on the previous
1454           command and resuming CU to be as small as possible.
1455           Interrupts in between are very undesired.  --SAW */
1456        iowrite8(CUResume, ioaddr + SCBCmd);
1457        sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1458
1459        /* Leave room for set_rx_mode(). If there is no more space than reserved
1460           for multicast filter mark the ring as full. */
1461        if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1462                netif_stop_queue(dev);
1463                sp->tx_full = 1;
1464        }
1465
1466        spin_unlock_irqrestore(&sp->lock, flags);
1467
1468        dev->trans_start = jiffies;
1469
1470        return 0;
1471}
1472
1473static void speedo_tx_buffer_gc(struct net_device *dev)
1474{
1475        unsigned int dirty_tx;
1476        struct speedo_private *sp = netdev_priv(dev);
1477
1478        dirty_tx = sp->dirty_tx;
1479        while ((int)(sp->cur_tx - dirty_tx) > 0) {
1480                int entry = dirty_tx % TX_RING_SIZE;
1481                int status = le32_to_cpu(sp->tx_ring[entry].status);
1482
1483                if (netif_msg_tx_done(sp))
1484                        printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1485                                   entry, status);
1486                if ((status & StatusComplete) == 0)
1487                        break;                  /* It still hasn't been processed. */
1488                if (status & TxUnderrun)
1489                        if (sp->tx_threshold < 0x01e08000) {
1490                                if (netif_msg_tx_err(sp))
1491                                        printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1492                                                   dev->name);
1493                                sp->tx_threshold += 0x00040000;
1494                        }
1495                /* Free the original skb. */
1496                if (sp->tx_skbuff[entry]) {
1497                        sp->stats.tx_packets++; /* Count only user packets. */
1498                        sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1499                        pci_unmap_single(sp->pdev,
1500                                        le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1501                                        sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1502                        dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1503                        sp->tx_skbuff[entry] = NULL;
1504                }
1505                dirty_tx++;
1506        }
1507
1508        if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1509                printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1510                           " full=%d.\n",
1511                           dirty_tx, sp->cur_tx, sp->tx_full);
1512                dirty_tx += TX_RING_SIZE;
1513        }
1514
1515        while (sp->mc_setup_head != NULL
1516                   && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1517                struct speedo_mc_block *t;
1518                if (netif_msg_tx_err(sp))
1519                        printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1520                pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1521                                sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1522                t = sp->mc_setup_head->next;
1523                kfree(sp->mc_setup_head);
1524                sp->mc_setup_head = t;
1525        }
1526        if (sp->mc_setup_head == NULL)
1527                sp->mc_setup_tail = NULL;
1528
1529        sp->dirty_tx = dirty_tx;
1530}
1531
1532/* The interrupt handler does all of the Rx thread work and cleans up
1533   after the Tx thread. */
1534static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
1535{
1536        struct net_device *dev = (struct net_device *)dev_instance;
1537        struct speedo_private *sp;
1538        void __iomem *ioaddr;
1539        long boguscnt = max_interrupt_work;
1540        unsigned short status;
1541        unsigned int handled = 0;
1542
1543        sp = netdev_priv(dev);
1544        ioaddr = sp->regs;
1545
1546#ifndef final_version
1547        /* A lock to prevent simultaneous entry on SMP machines. */
1548        if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1549                printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1550                           dev->name);
1551                sp->in_interrupt = 0;   /* Avoid halting machine. */
1552                return IRQ_NONE;
1553        }
1554#endif
1555
1556        do {
1557                status = ioread16(ioaddr + SCBStatus);
1558                /* Acknowledge all of the current interrupt sources ASAP. */
1559                /* Will change from 0xfc00 to 0xff00 when we start handling
1560                   FCP and ER interrupts --Dragan */
1561                iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1562
1563                if (netif_msg_intr(sp))
1564                        printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
1565                                   dev->name, status);
1566
1567                if ((status & 0xfc00) == 0)
1568                        break;
1569                handled = 1;
1570
1571
1572                if ((status & 0x5000) ||        /* Packet received, or Rx error. */
1573                        (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1574                                                                        /* Need to gather the postponed packet. */
1575                        speedo_rx(dev);
1576
1577                /* Always check if all rx buffers are allocated.  --SAW */
1578                speedo_refill_rx_buffers(dev, 0);
1579
1580                spin_lock(&sp->lock);
1581                /*
1582                 * The chip may have suspended reception for various reasons.
1583                 * Check for that, and re-prime it should this be the case.
1584                 */
1585                switch ((status >> 2) & 0xf) {
1586                case 0: /* Idle */
1587                        break;
1588                case 1: /* Suspended */
1589                case 2: /* No resources (RxFDs) */
1590                case 9: /* Suspended with no more RBDs */
1591                case 10: /* No resources due to no RBDs */
1592                case 12: /* Ready with no RBDs */
1593                        speedo_rx_soft_reset(dev);
1594                        break;
1595                case 3:  case 5:  case 6:  case 7:  case 8:
1596                case 11:  case 13:  case 14:  case 15:
1597                        /* these are all reserved values */
1598                        break;
1599                }
1600
1601
1602                /* User interrupt, Command/Tx unit interrupt or CU not active. */
1603                if (status & 0xA400) {
1604                        speedo_tx_buffer_gc(dev);
1605                        if (sp->tx_full
1606                                && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1607                                /* The ring is no longer full. */
1608                                sp->tx_full = 0;
1609                                netif_wake_queue(dev); /* Attention: under a spinlock.  --SAW */
1610                        }
1611                }
1612
1613                spin_unlock(&sp->lock);
1614
1615                if (--boguscnt < 0) {
1616                        printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1617                                   dev->name, status);
1618                        /* Clear all interrupt sources. */
1619                        /* Will change from 0xfc00 to 0xff00 when we start handling
1620                           FCP and ER interrupts --Dragan */
1621                        iowrite16(0xfc00, ioaddr + SCBStatus);
1622                        break;
1623                }
1624        } while (1);
1625
1626        if (netif_msg_intr(sp))
1627                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1628                           dev->name, ioread16(ioaddr + SCBStatus));
1629
1630        clear_bit(0, (void*)&sp->in_interrupt);
1631        return IRQ_RETVAL(handled);
1632}
1633
1634static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1635{
1636        struct speedo_private *sp = netdev_priv(dev);
1637        struct RxFD *rxf;
1638        struct sk_buff *skb;
1639        /* Get a fresh skbuff to replace the consumed one. */
1640        skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1641        if (skb)
1642                rx_align(skb);          /* Align IP on 16 byte boundary */
1643        sp->rx_skbuff[entry] = skb;
1644        if (skb == NULL) {
1645                sp->rx_ringp[entry] = NULL;
1646                return NULL;
1647        }
1648        rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1649        sp->rx_ring_dma[entry] =
1650                pci_map_single(sp->pdev, rxf,
1651                                           PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1652        skb->dev = dev;
1653        skb_reserve(skb, sizeof(struct RxFD));
1654        rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1655        pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1656                                                                   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1657        return rxf;
1658}
1659
1660static inline void speedo_rx_link(struct net_device *dev, int entry,
1661                                                                  struct RxFD *rxf, dma_addr_t rxf_dma)
1662{
1663        struct speedo_private *sp = netdev_priv(dev);
1664        rxf->status = cpu_to_le32(0xC0000001);  /* '1' for driver use only. */
1665        rxf->link = 0;                  /* None yet. */
1666        rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1667        sp->last_rxf->link = cpu_to_le32(rxf_dma);
1668        sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1669        pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1670                                                                   sizeof(struct RxFD), PCI_DMA_TODEVICE);
1671        sp->last_rxf = rxf;
1672        sp->last_rxf_dma = rxf_dma;
1673}
1674
1675static int speedo_refill_rx_buf(struct net_device *dev, int force)
1676{
1677        struct speedo_private *sp = netdev_priv(dev);
1678        int entry;
1679        struct RxFD *rxf;
1680
1681        entry = sp->dirty_rx % RX_RING_SIZE;
1682        if (sp->rx_skbuff[entry] == NULL) {
1683                rxf = speedo_rx_alloc(dev, entry);
1684                if (rxf == NULL) {
1685                        unsigned int forw;
1686                        int forw_entry;
1687                        if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1688                                printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1689                                                dev->name, force);
1690                                sp->rx_ring_state |= RrOOMReported;
1691                        }
1692                        speedo_show_state(dev);
1693                        if (!force)
1694                                return -1;      /* Better luck next time!  */
1695                        /* Borrow an skb from one of next entries. */
1696                        for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1697                                if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1698                                        break;
1699                        if (forw == sp->cur_rx)
1700                                return -1;
1701                        forw_entry = forw % RX_RING_SIZE;
1702                        sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1703                        sp->rx_skbuff[forw_entry] = NULL;
1704                        rxf = sp->rx_ringp[forw_entry];
1705                        sp->rx_ringp[forw_entry] = NULL;
1706                        sp->rx_ringp[entry] = rxf;
1707                }
1708        } else {
1709                rxf = sp->rx_ringp[entry];
1710        }
1711        speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1712        sp->dirty_rx++;
1713        sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1714        return 0;
1715}
1716
1717static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1718{
1719        struct speedo_private *sp = netdev_priv(dev);
1720
1721        /* Refill the RX ring. */
1722        while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1723                        speedo_refill_rx_buf(dev, force) != -1);
1724}
1725
1726static int
1727speedo_rx(struct net_device *dev)
1728{
1729        struct speedo_private *sp = netdev_priv(dev);
1730        int entry = sp->cur_rx % RX_RING_SIZE;
1731        int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1732        int alloc_ok = 1;
1733        int npkts = 0;
1734
1735        if (netif_msg_intr(sp))
1736                printk(KERN_DEBUG " In speedo_rx().\n");
1737        /* If we own the next entry, it's a new packet. Send it up. */
1738        while (sp->rx_ringp[entry] != NULL) {
1739                int status;
1740                int pkt_len;
1741
1742                pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1743                                                                        sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1744                status = le32_to_cpu(sp->rx_ringp[entry]->status);
1745                pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1746
1747                if (!(status & RxComplete))
1748                        break;
1749
1750                if (--rx_work_limit < 0)
1751                        break;
1752
1753                /* Check for a rare out-of-memory case: the current buffer is
1754                   the last buffer allocated in the RX ring.  --SAW */
1755                if (sp->last_rxf == sp->rx_ringp[entry]) {
1756                        /* Postpone the packet.  It'll be reaped at an interrupt when this
1757                           packet is no longer the last packet in the ring. */
1758                        if (netif_msg_rx_err(sp))
1759                                printk(KERN_DEBUG "%s: RX packet postponed!\n",
1760                                           dev->name);
1761                        sp->rx_ring_state |= RrPostponed;
1762                        break;
1763                }
1764
1765                if (netif_msg_rx_status(sp))
1766                        printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
1767                                   pkt_len);
1768                if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1769                        if (status & RxErrTooBig)
1770                                printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1771                                           "status %8.8x!\n", dev->name, status);
1772                        else if (! (status & RxOK)) {
1773                                /* There was a fatal error.  This *should* be impossible. */
1774                                sp->stats.rx_errors++;
1775                                printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1776                                           "status %8.8x.\n",
1777                                           dev->name, status);
1778                        }
1779                } else {
1780                        struct sk_buff *skb;
1781
1782                        /* Check if the packet is long enough to just accept without
1783                           copying to a properly sized skbuff. */
1784                        if (pkt_len < rx_copybreak
1785                                && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1786                                skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1787                                /* 'skb_put()' points to the start of sk_buff data area. */
1788                                pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1789                                                                                        sizeof(struct RxFD) + pkt_len,
1790                                                                                        PCI_DMA_FROMDEVICE);
1791
1792#if 1 || USE_IP_CSUM
1793                                /* Packet is in one chunk -- we can copy + cksum. */
1794                                skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1795                                skb_put(skb, pkt_len);
1796#else
1797                                skb_copy_from_linear_data(sp->rx_skbuff[entry],
1798                                                          skb_put(skb, pkt_len),
1799                                                          pkt_len);
1800#endif
1801                                pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1802                                                                                           sizeof(struct RxFD) + pkt_len,
1803                                                                                           PCI_DMA_FROMDEVICE);
1804                                npkts++;
1805                        } else {
1806                                /* Pass up the already-filled skbuff. */
1807                                skb = sp->rx_skbuff[entry];
1808                                if (skb == NULL) {
1809                                        printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1810                                                   dev->name);
1811                                        break;
1812                                }
1813                                sp->rx_skbuff[entry] = NULL;
1814                                skb_put(skb, pkt_len);
1815                                npkts++;
1816                                sp->rx_ringp[entry] = NULL;
1817                                pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1818                                                                 PKT_BUF_SZ + sizeof(struct RxFD),
1819                                                                 PCI_DMA_FROMDEVICE);
1820                        }
1821                        skb->protocol = eth_type_trans(skb, dev);
1822                        netif_rx(skb);
1823                        dev->last_rx = jiffies;
1824                        sp->stats.rx_packets++;
1825                        sp->stats.rx_bytes += pkt_len;
1826                }
1827                entry = (++sp->cur_rx) % RX_RING_SIZE;
1828                sp->rx_ring_state &= ~RrPostponed;
1829                /* Refill the recently taken buffers.
1830                   Do it one-by-one to handle traffic bursts better. */
1831                if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1832                        alloc_ok = 0;
1833        }
1834
1835        /* Try hard to refill the recently taken buffers. */
1836        speedo_refill_rx_buffers(dev, 1);
1837
1838        if (npkts)
1839                sp->last_rx_time = jiffies;
1840
1841        return 0;
1842}
1843
1844static int
1845speedo_close(struct net_device *dev)
1846{
1847        struct speedo_private *sp = netdev_priv(dev);
1848        void __iomem *ioaddr = sp->regs;
1849        int i;
1850
1851        netdevice_stop(dev);
1852        netif_stop_queue(dev);
1853
1854        if (netif_msg_ifdown(sp))
1855                printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1856                           dev->name, ioread16(ioaddr + SCBStatus));
1857
1858        /* Shut off the media monitoring timer. */
1859        del_timer_sync(&sp->timer);
1860
1861        iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1862
1863        /* Shutting down the chip nicely fails to disable flow control. So.. */
1864        iowrite32(PortPartialReset, ioaddr + SCBPort);
1865        ioread32(ioaddr + SCBPort); /* flush posted write */
1866        /*
1867         * The chip requires a 10 microsecond quiet period.  Wait here!
1868         */
1869        udelay(10);
1870
1871        free_irq(dev->irq, dev);
1872        speedo_show_state(dev);
1873
1874    /* Free all the skbuffs in the Rx and Tx queues. */
1875        for (i = 0; i < RX_RING_SIZE; i++) {
1876                struct sk_buff *skb = sp->rx_skbuff[i];
1877                sp->rx_skbuff[i] = NULL;
1878                /* Clear the Rx descriptors. */
1879                if (skb) {
1880                        pci_unmap_single(sp->pdev,
1881                                         sp->rx_ring_dma[i],
1882                                         PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1883                        dev_kfree_skb(skb);
1884                }
1885        }
1886
1887        for (i = 0; i < TX_RING_SIZE; i++) {
1888                struct sk_buff *skb = sp->tx_skbuff[i];
1889                sp->tx_skbuff[i] = NULL;
1890                /* Clear the Tx descriptors. */
1891                if (skb) {
1892                        pci_unmap_single(sp->pdev,
1893                                         le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1894                                         skb->len, PCI_DMA_TODEVICE);
1895                        dev_kfree_skb(skb);
1896                }
1897        }
1898
1899        /* Free multicast setting blocks. */
1900        for (i = 0; sp->mc_setup_head != NULL; i++) {
1901                struct speedo_mc_block *t;
1902                t = sp->mc_setup_head->next;
1903                kfree(sp->mc_setup_head);
1904                sp->mc_setup_head = t;
1905        }
1906        sp->mc_setup_tail = NULL;
1907        if (netif_msg_ifdown(sp))
1908                printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1909
1910        pci_set_power_state(sp->pdev, PCI_D2);
1911
1912        return 0;
1913}
1914
1915/* The Speedo-3 has an especially awkward and unusable method of getting
1916   statistics out of the chip.  It takes an unpredictable length of time
1917   for the dump-stats command to complete.  To avoid a busy-wait loop we
1918   update the stats with the previous dump results, and then trigger a
1919   new dump.
1920
1921   Oh, and incoming frames are dropped while executing dump-stats!
1922   */
1923static struct net_device_stats *
1924speedo_get_stats(struct net_device *dev)
1925{
1926        struct speedo_private *sp = netdev_priv(dev);
1927        void __iomem *ioaddr = sp->regs;
1928
1929        /* Update only if the previous dump finished. */
1930        if (sp->lstats->done_marker == cpu_to_le32(0xA007)) {
1931                sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1932                sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1933                sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1934                sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1935                /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1936                sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1937                sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1938                sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1939                sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1940                sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1941                sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1942                sp->lstats->done_marker = 0x0000;
1943                if (netif_running(dev)) {
1944                        unsigned long flags;
1945                        /* Take a spinlock to make wait_for_cmd_done and sending the
1946                           command atomic.  --SAW */
1947                        spin_lock_irqsave(&sp->lock, flags);
1948                        wait_for_cmd_done(dev, sp);
1949                        iowrite8(CUDumpStats, ioaddr + SCBCmd);
1950                        spin_unlock_irqrestore(&sp->lock, flags);
1951                }
1952        }
1953        return &sp->stats;
1954}
1955
1956static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1957{
1958        struct speedo_private *sp = netdev_priv(dev);
1959        strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1960        strncpy(info->version, version, sizeof(info->version)-1);
1961        if (sp->pdev)
1962                strcpy(info->bus_info, pci_name(sp->pdev));
1963}
1964
1965static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1966{
1967        struct speedo_private *sp = netdev_priv(dev);
1968        spin_lock_irq(&sp->lock);
1969        mii_ethtool_gset(&sp->mii_if, ecmd);
1970        spin_unlock_irq(&sp->lock);
1971        return 0;
1972}
1973
1974static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1975{
1976        struct speedo_private *sp = netdev_priv(dev);
1977        int res;
1978        spin_lock_irq(&sp->lock);
1979        res = mii_ethtool_sset(&sp->mii_if, ecmd);
1980        spin_unlock_irq(&sp->lock);
1981        return res;
1982}
1983
1984static int speedo_nway_reset(struct net_device *dev)
1985{
1986        struct speedo_private *sp = netdev_priv(dev);
1987        return mii_nway_restart(&sp->mii_if);
1988}
1989
1990static u32 speedo_get_link(struct net_device *dev)
1991{
1992        struct speedo_private *sp = netdev_priv(dev);
1993        return mii_link_ok(&sp->mii_if);
1994}
1995
1996static u32 speedo_get_msglevel(struct net_device *dev)
1997{
1998        struct speedo_private *sp = netdev_priv(dev);
1999        return sp->msg_enable;
2000}
2001
2002static void speedo_set_msglevel(struct net_device *dev, u32 v)
2003{
2004        struct speedo_private *sp = netdev_priv(dev);
2005        sp->msg_enable = v;
2006}
2007
2008static const struct ethtool_ops ethtool_ops = {
2009        .get_drvinfo = speedo_get_drvinfo,
2010        .get_settings = speedo_get_settings,
2011        .set_settings = speedo_set_settings,
2012        .nway_reset = speedo_nway_reset,
2013        .get_link = speedo_get_link,
2014        .get_msglevel = speedo_get_msglevel,
2015        .set_msglevel = speedo_set_msglevel,
2016};
2017
2018static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2019{
2020        struct speedo_private *sp = netdev_priv(dev);
2021        struct mii_ioctl_data *data = if_mii(rq);
2022        int phy = sp->phy[0] & 0x1f;
2023        int saved_acpi;
2024        int t;
2025
2026    switch(cmd) {
2027        case SIOCGMIIPHY:               /* Get address of MII PHY in use. */
2028                data->phy_id = phy;
2029
2030        case SIOCGMIIREG:               /* Read MII PHY register. */
2031                /* FIXME: these operations need to be serialized with MDIO
2032                   access from the timeout handler.
2033                   They are currently serialized only with MDIO access from the
2034                   timer routine.  2000/05/09 SAW */
2035                saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2036                t = del_timer_sync(&sp->timer);
2037                data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2038                if (t)
2039                        add_timer(&sp->timer); /* may be set to the past  --SAW */
2040                pci_set_power_state(sp->pdev, saved_acpi);
2041                return 0;
2042
2043        case SIOCSMIIREG:               /* Write MII PHY register. */
2044                if (!capable(CAP_NET_ADMIN))
2045                        return -EPERM;
2046                saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2047                t = del_timer_sync(&sp->timer);
2048                mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2049                if (t)
2050                        add_timer(&sp->timer); /* may be set to the past  --SAW */
2051                pci_set_power_state(sp->pdev, saved_acpi);
2052                return 0;
2053        default:
2054                return -EOPNOTSUPP;
2055        }
2056}
2057
2058/* Set or clear the multicast filter for this adaptor.
2059   This is very ugly with Intel chips -- we usually have to execute an
2060   entire configuration command, plus process a multicast command.
2061   This is complicated.  We must put a large configuration command and
2062   an arbitrarily-sized multicast command in the transmit list.
2063   To minimize the disruption -- the previous command might have already
2064   loaded the link -- we convert the current command block, normally a Tx
2065   command, into a no-op and link it to the new command.
2066*/
2067static void set_rx_mode(struct net_device *dev)
2068{
2069        struct speedo_private *sp = netdev_priv(dev);
2070        void __iomem *ioaddr = sp->regs;
2071        struct descriptor *last_cmd;
2072        char new_rx_mode;
2073        unsigned long flags;
2074        int entry, i;
2075
2076        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
2077                new_rx_mode = 3;
2078        } else if ((dev->flags & IFF_ALLMULTI)  ||
2079                           dev->mc_count > multicast_filter_limit) {
2080                new_rx_mode = 1;
2081        } else
2082                new_rx_mode = 0;
2083
2084        if (netif_msg_rx_status(sp))
2085                printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2086                                sp->rx_mode, new_rx_mode);
2087
2088        if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2089            /* The Tx ring is full -- don't add anything!  Hope the mode will be
2090                 * set again later. */
2091                sp->rx_mode = -1;
2092                return;
2093        }
2094
2095        if (new_rx_mode != sp->rx_mode) {
2096                u8 *config_cmd_data;
2097
2098                spin_lock_irqsave(&sp->lock, flags);
2099                entry = sp->cur_tx++ % TX_RING_SIZE;
2100                last_cmd = sp->last_cmd;
2101                sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2102
2103                sp->tx_skbuff[entry] = NULL;                    /* Redundant. */
2104                sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2105                sp->tx_ring[entry].link =
2106                        cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2107                config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2108                /* Construct a full CmdConfig frame. */
2109                memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2110                config_cmd_data[1] = (txfifo << 4) | rxfifo;
2111                config_cmd_data[4] = rxdmacount;
2112                config_cmd_data[5] = txdmacount + 0x80;
2113                config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2114                /* 0x80 doesn't disable FC 0x84 does.
2115                   Disable Flow control since we are not ACK-ing any FC interrupts
2116                   for now. --Dragan */
2117                config_cmd_data[19] = 0x84;
2118                config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2119                config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2120                if (sp->phy[0] & 0x8000) {                      /* Use the AUI port instead. */
2121                        config_cmd_data[15] |= 0x80;
2122                        config_cmd_data[8] = 0;
2123                }
2124                /* Trigger the command unit resume. */
2125                wait_for_cmd_done(dev, sp);
2126                clear_suspend(last_cmd);
2127                iowrite8(CUResume, ioaddr + SCBCmd);
2128                if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2129                        netif_stop_queue(dev);
2130                        sp->tx_full = 1;
2131                }
2132                spin_unlock_irqrestore(&sp->lock, flags);
2133        }
2134
2135        if (new_rx_mode == 0  &&  dev->mc_count < 4) {
2136                /* The simple case of 0-3 multicast list entries occurs often, and
2137                   fits within one tx_ring[] entry. */
2138                struct dev_mc_list *mclist;
2139                __le16 *setup_params, *eaddrs;
2140
2141                spin_lock_irqsave(&sp->lock, flags);
2142                entry = sp->cur_tx++ % TX_RING_SIZE;
2143                last_cmd = sp->last_cmd;
2144                sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2145
2146                sp->tx_skbuff[entry] = NULL;
2147                sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2148                sp->tx_ring[entry].link =
2149                        cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2150                sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2151                setup_params = (__le16 *)&sp->tx_ring[entry].tx_desc_addr;
2152                *setup_params++ = cpu_to_le16(dev->mc_count*6);
2153                /* Fill in the multicast addresses. */
2154                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2155                         i++, mclist = mclist->next) {
2156                        eaddrs = (__le16 *)mclist->dmi_addr;
2157                        *setup_params++ = *eaddrs++;
2158                        *setup_params++ = *eaddrs++;
2159                        *setup_params++ = *eaddrs++;
2160                }
2161
2162                wait_for_cmd_done(dev, sp);
2163                clear_suspend(last_cmd);
2164                /* Immediately trigger the command unit resume. */
2165                iowrite8(CUResume, ioaddr + SCBCmd);
2166
2167                if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2168                        netif_stop_queue(dev);
2169                        sp->tx_full = 1;
2170                }
2171                spin_unlock_irqrestore(&sp->lock, flags);
2172        } else if (new_rx_mode == 0) {
2173                struct dev_mc_list *mclist;
2174                __le16 *setup_params, *eaddrs;
2175                struct speedo_mc_block *mc_blk;
2176                struct descriptor *mc_setup_frm;
2177                int i;
2178
2179                mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2180                                                 GFP_ATOMIC);
2181                if (mc_blk == NULL) {
2182                        printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2183                                   dev->name);
2184                        sp->rx_mode = -1; /* We failed, try again. */
2185                        return;
2186                }
2187                mc_blk->next = NULL;
2188                mc_blk->len = 2 + multicast_filter_limit*6;
2189                mc_blk->frame_dma =
2190                        pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2191                                        PCI_DMA_TODEVICE);
2192                mc_setup_frm = &mc_blk->frame;
2193
2194                /* Fill the setup frame. */
2195                if (netif_msg_ifup(sp))
2196                        printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2197                                   dev->name, mc_setup_frm);
2198                mc_setup_frm->cmd_status =
2199                        cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2200                /* Link set below. */
2201                setup_params = (__le16 *)&mc_setup_frm->params;
2202                *setup_params++ = cpu_to_le16(dev->mc_count*6);
2203                /* Fill in the multicast addresses. */
2204                for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2205                         i++, mclist = mclist->next) {
2206                        eaddrs = (__le16 *)mclist->dmi_addr;
2207                        *setup_params++ = *eaddrs++;
2208                        *setup_params++ = *eaddrs++;
2209                        *setup_params++ = *eaddrs++;
2210                }
2211
2212                /* Disable interrupts while playing with the Tx Cmd list. */
2213                spin_lock_irqsave(&sp->lock, flags);
2214
2215                if (sp->mc_setup_tail)
2216                        sp->mc_setup_tail->next = mc_blk;
2217                else
2218                        sp->mc_setup_head = mc_blk;
2219                sp->mc_setup_tail = mc_blk;
2220                mc_blk->tx = sp->cur_tx;
2221
2222                entry = sp->cur_tx++ % TX_RING_SIZE;
2223                last_cmd = sp->last_cmd;
2224                sp->last_cmd = mc_setup_frm;
2225
2226                /* Change the command to a NoOp, pointing to the CmdMulti command. */
2227                sp->tx_skbuff[entry] = NULL;
2228                sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2229                sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2230
2231                /* Set the link in the setup frame. */
2232                mc_setup_frm->link =
2233                        cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2234
2235                pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2236                                                                           mc_blk->len, PCI_DMA_TODEVICE);
2237
2238                wait_for_cmd_done(dev, sp);
2239                clear_suspend(last_cmd);
2240                /* Immediately trigger the command unit resume. */
2241                iowrite8(CUResume, ioaddr + SCBCmd);
2242
2243                if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2244                        netif_stop_queue(dev);
2245                        sp->tx_full = 1;
2246                }
2247                spin_unlock_irqrestore(&sp->lock, flags);
2248
2249                if (netif_msg_rx_status(sp))
2250                        printk(" CmdMCSetup frame length %d in entry %d.\n",
2251                                   dev->mc_count, entry);
2252        }
2253
2254        sp->rx_mode = new_rx_mode;
2255}
2256
2257#ifdef CONFIG_PM
2258static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2259{
2260        struct net_device *dev = pci_get_drvdata (pdev);
2261        struct speedo_private *sp = netdev_priv(dev);
2262        void __iomem *ioaddr = sp->regs;
2263
2264        pci_save_state(pdev);
2265
2266        if (!netif_running(dev))
2267                return 0;
2268
2269        del_timer_sync(&sp->timer);
2270
2271        netif_device_detach(dev);
2272        iowrite32(PortPartialReset, ioaddr + SCBPort);
2273
2274        /* XXX call pci_set_power_state ()? */
2275        pci_disable_device(pdev);
2276        pci_set_power_state (pdev, PCI_D3hot);
2277        return 0;
2278}
2279
2280static int eepro100_resume(struct pci_dev *pdev)
2281{
2282        struct net_device *dev = pci_get_drvdata (pdev);
2283        struct speedo_private *sp = netdev_priv(dev);
2284        void __iomem *ioaddr = sp->regs;
2285        int rc;
2286
2287        pci_set_power_state(pdev, PCI_D0);
2288        pci_restore_state(pdev);
2289
2290        rc = pci_enable_device(pdev);
2291        if (rc)
2292                return rc;
2293
2294        pci_set_master(pdev);
2295
2296        if (!netif_running(dev))
2297                return 0;
2298
2299        /* I'm absolutely uncertain if this part of code may work.
2300           The problems are:
2301            - correct hardware reinitialization;
2302                - correct driver behavior between different steps of the
2303                  reinitialization;
2304                - serialization with other driver calls.
2305           2000/03/08  SAW */
2306        iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2307        speedo_resume(dev);
2308        netif_device_attach(dev);
2309        sp->rx_mode = -1;
2310        sp->flow_ctrl = sp->partner = 0;
2311        set_rx_mode(dev);
2312        sp->timer.expires = RUN_AT(2*HZ);
2313        add_timer(&sp->timer);
2314        return 0;
2315}
2316#endif /* CONFIG_PM */
2317
2318static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2319{
2320        struct net_device *dev = pci_get_drvdata (pdev);
2321        struct speedo_private *sp = netdev_priv(dev);
2322
2323        unregister_netdev(dev);
2324
2325        release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2326        release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2327
2328        pci_iounmap(pdev, sp->regs);
2329        pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2330                                                                + sizeof(struct speedo_stats),
2331                                                sp->tx_ring, sp->tx_ring_dma);
2332        pci_disable_device(pdev);
2333        free_netdev(dev);
2334}
2335
2336static struct pci_device_id eepro100_pci_tbl[] = {
2337        { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2338        { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2339        { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2340        { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2341        { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2342        { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2343        { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2344        { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2345        { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2346        { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2347        { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2348        { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2349        { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2350        { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2351        { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2352        { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2353        { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2354        { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2355        { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2356        { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2357        { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2358        { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2359        { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2360        { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2361        { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2362        { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2363        { 0,}
2364};
2365MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2366
2367static struct pci_driver eepro100_driver = {
2368        .name           = "eepro100",
2369        .id_table       = eepro100_pci_tbl,
2370        .probe          = eepro100_init_one,
2371        .remove         = __devexit_p(eepro100_remove_one),
2372#ifdef CONFIG_PM
2373        .suspend        = eepro100_suspend,
2374        .resume         = eepro100_resume,
2375#endif /* CONFIG_PM */
2376};
2377
2378static int __init eepro100_init_module(void)
2379{
2380#ifdef MODULE
2381        printk(version);
2382#endif
2383        return pci_register_driver(&eepro100_driver);
2384}
2385
2386static void __exit eepro100_cleanup_module(void)
2387{
2388        pci_unregister_driver(&eepro100_driver);
2389}
2390
2391module_init(eepro100_init_module);
2392module_exit(eepro100_cleanup_module);
2393
2394/*
2395 * Local variables:
2396 *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2397 *  c-indent-level: 4
2398 *  c-basic-offset: 4
2399 *  tab-width: 4
2400 * End:
2401 */
2402
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.