linux/drivers/net/sundance.c
<<
>>
Prefs
   1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
   2/*
   3        Written 1999-2000 by Donald Becker.
   4
   5        This software may be used and distributed according to the terms of
   6        the GNU General Public License (GPL), incorporated herein by reference.
   7        Drivers based on or derived from this code fall under the GPL and must
   8        retain the authorship, copyright and license notice.  This file is not
   9        a complete program and may only be used when the entire operating
  10        system is licensed under the GPL.
  11
  12        The author may be reached as becker@scyld.com, or C/O
  13        Scyld Computing Corporation
  14        410 Severn Ave., Suite 210
  15        Annapolis MD 21403
  16
  17        Support and updates available at
  18        http://www.scyld.com/network/sundance.html
  19        [link no longer provides useful info -jgarzik]
  20        Archives of the mailing list are still available at
  21        http://www.beowulf.org/pipermail/netdrivers/
  22
  23*/
  24
  25#define DRV_NAME        "sundance"
  26#define DRV_VERSION     "1.2"
  27#define DRV_RELDATE     "11-Sep-2006"
  28
  29
  30/* The user-configurable values.
  31   These may be modified when a driver module is loaded.*/
  32static int debug = 1;                   /* 1 normal messages, 0 quiet .. 7 verbose. */
  33/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
  34   Typical is a 64 element hash table based on the Ethernet CRC.  */
  35static const int multicast_filter_limit = 32;
  36
  37/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
  38   Setting to > 1518 effectively disables this feature.
  39   This chip can receive into offset buffers, so the Alpha does not
  40   need a copy-align. */
  41static int rx_copybreak;
  42static int flowctrl=1;
  43
  44/* media[] specifies the media type the NIC operates at.
  45                 autosense      Autosensing active media.
  46                 10mbps_hd      10Mbps half duplex.
  47                 10mbps_fd      10Mbps full duplex.
  48                 100mbps_hd     100Mbps half duplex.
  49                 100mbps_fd     100Mbps full duplex.
  50                 0              Autosensing active media.
  51                 1              10Mbps half duplex.
  52                 2              10Mbps full duplex.
  53                 3              100Mbps half duplex.
  54                 4              100Mbps full duplex.
  55*/
  56#define MAX_UNITS 8
  57static char *media[MAX_UNITS];
  58
  59
  60/* Operational parameters that are set at compile time. */
  61
  62/* Keep the ring sizes a power of two for compile efficiency.
  63   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
  64   Making the Tx ring too large decreases the effectiveness of channel
  65   bonding and packet priority, and more than 128 requires modifying the
  66   Tx error recovery.
  67   Large receive rings merely waste memory. */
  68#define TX_RING_SIZE    32
  69#define TX_QUEUE_LEN    (TX_RING_SIZE - 1) /* Limit ring entries actually used.  */
  70#define RX_RING_SIZE    64
  71#define RX_BUDGET       32
  72#define TX_TOTAL_SIZE   TX_RING_SIZE*sizeof(struct netdev_desc)
  73#define RX_TOTAL_SIZE   RX_RING_SIZE*sizeof(struct netdev_desc)
  74
  75/* Operational parameters that usually are not changed. */
  76/* Time in jiffies before concluding the transmitter is hung. */
  77#define TX_TIMEOUT  (4*HZ)
  78#define PKT_BUF_SZ              1536    /* Size of each temporary Rx buffer.*/
  79
  80/* Include files, designed to support most kernel versions 2.0.0 and later. */
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/string.h>
  84#include <linux/timer.h>
  85#include <linux/errno.h>
  86#include <linux/ioport.h>
  87#include <linux/interrupt.h>
  88#include <linux/pci.h>
  89#include <linux/netdevice.h>
  90#include <linux/etherdevice.h>
  91#include <linux/skbuff.h>
  92#include <linux/init.h>
  93#include <linux/bitops.h>
  94#include <asm/uaccess.h>
  95#include <asm/processor.h>              /* Processor type for cache alignment. */
  96#include <asm/io.h>
  97#include <linux/delay.h>
  98#include <linux/spinlock.h>
  99#ifndef _COMPAT_WITH_OLD_KERNEL
 100#include <linux/crc32.h>
 101#include <linux/ethtool.h>
 102#include <linux/mii.h>
 103#else
 104#include "crc32.h"
 105#include "ethtool.h"
 106#include "mii.h"
 107#include "compat.h"
 108#endif
 109
 110/* These identify the driver base version and may not be removed. */
 111static const char version[] __devinitconst =
 112        KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE
 113        " Written by Donald Becker\n";
 114
 115MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
 116MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
 117MODULE_LICENSE("GPL");
 118
 119module_param(debug, int, 0);
 120module_param(rx_copybreak, int, 0);
 121module_param_array(media, charp, NULL, 0);
 122module_param(flowctrl, int, 0);
 123MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
 124MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
 125MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
 126
 127/*
 128                                Theory of Operation
 129
 130I. Board Compatibility
 131
 132This driver is designed for the Sundance Technologies "Alta" ST201 chip.
 133
 134II. Board-specific settings
 135
 136III. Driver operation
 137
 138IIIa. Ring buffers
 139
 140This driver uses two statically allocated fixed-size descriptor lists
 141formed into rings by a branch from the final descriptor to the beginning of
 142the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
 143Some chips explicitly use only 2^N sized rings, while others use a
 144'next descriptor' pointer that the driver forms into rings.
 145
 146IIIb/c. Transmit/Receive Structure
 147
 148This driver uses a zero-copy receive and transmit scheme.
 149The driver allocates full frame size skbuffs for the Rx ring buffers at
 150open() time and passes the skb->data field to the chip as receive data
 151buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
 152a fresh skbuff is allocated and the frame is copied to the new skbuff.
 153When the incoming frame is larger, the skbuff is passed directly up the
 154protocol stack.  Buffers consumed this way are replaced by newly allocated
 155skbuffs in a later phase of receives.
 156
 157The RX_COPYBREAK value is chosen to trade-off the memory wasted by
 158using a full-sized skbuff for small frames vs. the copying costs of larger
 159frames.  New boards are typically used in generously configured machines
 160and the underfilled buffers have negligible impact compared to the benefit of
 161a single allocation size, so the default value of zero results in never
 162copying packets.  When copying is done, the cost is usually mitigated by using
 163a combined copy/checksum routine.  Copying also preloads the cache, which is
 164most useful with small frames.
 165
 166A subtle aspect of the operation is that the IP header at offset 14 in an
 167ethernet frame isn't longword aligned for further processing.
 168Unaligned buffers are permitted by the Sundance hardware, so
 169frames are received into the skbuff at an offset of "+2", 16-byte aligning
 170the IP header.
 171
 172IIId. Synchronization
 173
 174The driver runs as two independent, single-threaded flows of control.  One
 175is the send-packet routine, which enforces single-threaded use by the
 176dev->tbusy flag.  The other thread is the interrupt handler, which is single
 177threaded by the hardware and interrupt handling software.
 178
 179The send packet thread has partial control over the Tx ring and 'dev->tbusy'
 180flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
 181queue slot is empty, it clears the tbusy flag when finished otherwise it sets
 182the 'lp->tx_full' flag.
 183
 184The interrupt handler has exclusive control over the Rx ring and records stats
 185from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
 186empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
 187clears both the tx_full and tbusy flags.
 188
 189IV. Notes
 190
 191IVb. References
 192
 193The Sundance ST201 datasheet, preliminary version.
 194The Kendin KS8723 datasheet, preliminary version.
 195The ICplus IP100 datasheet, preliminary version.
 196http://www.scyld.com/expert/100mbps.html
 197http://www.scyld.com/expert/NWay.html
 198
 199IVc. Errata
 200
 201*/
 202
 203/* Work-around for Kendin chip bugs. */
 204#ifndef CONFIG_SUNDANCE_MMIO
 205#define USE_IO_OPS 1
 206#endif
 207
 208static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
 209        { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
 210        { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
 211        { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
 212        { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 },
 213        { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 214        { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 215        { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 216        { }
 217};
 218MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
 219
 220enum {
 221        netdev_io_size = 128
 222};
 223
 224struct pci_id_info {
 225        const char *name;
 226};
 227static const struct pci_id_info pci_id_tbl[] __devinitdata = {
 228        {"D-Link DFE-550TX FAST Ethernet Adapter"},
 229        {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
 230        {"D-Link DFE-580TX 4 port Server Adapter"},
 231        {"D-Link DFE-530TXS FAST Ethernet Adapter"},
 232        {"D-Link DL10050-based FAST Ethernet Adapter"},
 233        {"Sundance Technology Alta"},
 234        {"IC Plus Corporation IP100A FAST Ethernet Adapter"},
 235        { }     /* terminate list. */
 236};
 237
 238/* This driver was written to use PCI memory space, however x86-oriented
 239   hardware often uses I/O space accesses. */
 240
 241/* Offsets to the device registers.
 242   Unlike software-only systems, device drivers interact with complex hardware.
 243   It's not useful to define symbolic names for every register bit in the
 244   device.  The name can only partially document the semantics and make
 245   the driver longer and more difficult to read.
 246   In general, only the important configuration values or bits changed
 247   multiple times should be defined symbolically.
 248*/
 249enum alta_offsets {
 250        DMACtrl = 0x00,
 251        TxListPtr = 0x04,
 252        TxDMABurstThresh = 0x08,
 253        TxDMAUrgentThresh = 0x09,
 254        TxDMAPollPeriod = 0x0a,
 255        RxDMAStatus = 0x0c,
 256        RxListPtr = 0x10,
 257        DebugCtrl0 = 0x1a,
 258        DebugCtrl1 = 0x1c,
 259        RxDMABurstThresh = 0x14,
 260        RxDMAUrgentThresh = 0x15,
 261        RxDMAPollPeriod = 0x16,
 262        LEDCtrl = 0x1a,
 263        ASICCtrl = 0x30,
 264        EEData = 0x34,
 265        EECtrl = 0x36,
 266        FlashAddr = 0x40,
 267        FlashData = 0x44,
 268        TxStatus = 0x46,
 269        TxFrameId = 0x47,
 270        DownCounter = 0x18,
 271        IntrClear = 0x4a,
 272        IntrEnable = 0x4c,
 273        IntrStatus = 0x4e,
 274        MACCtrl0 = 0x50,
 275        MACCtrl1 = 0x52,
 276        StationAddr = 0x54,
 277        MaxFrameSize = 0x5A,
 278        RxMode = 0x5c,
 279        MIICtrl = 0x5e,
 280        MulticastFilter0 = 0x60,
 281        MulticastFilter1 = 0x64,
 282        RxOctetsLow = 0x68,
 283        RxOctetsHigh = 0x6a,
 284        TxOctetsLow = 0x6c,
 285        TxOctetsHigh = 0x6e,
 286        TxFramesOK = 0x70,
 287        RxFramesOK = 0x72,
 288        StatsCarrierError = 0x74,
 289        StatsLateColl = 0x75,
 290        StatsMultiColl = 0x76,
 291        StatsOneColl = 0x77,
 292        StatsTxDefer = 0x78,
 293        RxMissed = 0x79,
 294        StatsTxXSDefer = 0x7a,
 295        StatsTxAbort = 0x7b,
 296        StatsBcastTx = 0x7c,
 297        StatsBcastRx = 0x7d,
 298        StatsMcastTx = 0x7e,
 299        StatsMcastRx = 0x7f,
 300        /* Aliased and bogus values! */
 301        RxStatus = 0x0c,
 302};
 303enum ASICCtrl_HiWord_bit {
 304        GlobalReset = 0x0001,
 305        RxReset = 0x0002,
 306        TxReset = 0x0004,
 307        DMAReset = 0x0008,
 308        FIFOReset = 0x0010,
 309        NetworkReset = 0x0020,
 310        HostReset = 0x0040,
 311        ResetBusy = 0x0400,
 312};
 313
 314/* Bits in the interrupt status/mask registers. */
 315enum intr_status_bits {
 316        IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
 317        IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
 318        IntrDrvRqst=0x0040,
 319        StatsMax=0x0080, LinkChange=0x0100,
 320        IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
 321};
 322
 323/* Bits in the RxMode register. */
 324enum rx_mode_bits {
 325        AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
 326        AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
 327};
 328/* Bits in MACCtrl. */
 329enum mac_ctrl0_bits {
 330        EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
 331        EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
 332};
 333enum mac_ctrl1_bits {
 334        StatsEnable=0x0020,     StatsDisable=0x0040, StatsEnabled=0x0080,
 335        TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
 336        RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
 337};
 338
 339/* The Rx and Tx buffer descriptors. */
 340/* Note that using only 32 bit fields simplifies conversion to big-endian
 341   architectures. */
 342struct netdev_desc {
 343        __le32 next_desc;
 344        __le32 status;
 345        struct desc_frag { __le32 addr, length; } frag[1];
 346};
 347
 348/* Bits in netdev_desc.status */
 349enum desc_status_bits {
 350        DescOwn=0x8000,
 351        DescEndPacket=0x4000,
 352        DescEndRing=0x2000,
 353        LastFrag=0x80000000,
 354        DescIntrOnTx=0x8000,
 355        DescIntrOnDMADone=0x80000000,
 356        DisableAlign = 0x00000001,
 357};
 358
 359#define PRIV_ALIGN      15      /* Required alignment mask */
 360/* Use  __attribute__((aligned (L1_CACHE_BYTES)))  to maintain alignment
 361   within the structure. */
 362#define MII_CNT         4
 363struct netdev_private {
 364        /* Descriptor rings first for alignment. */
 365        struct netdev_desc *rx_ring;
 366        struct netdev_desc *tx_ring;
 367        struct sk_buff* rx_skbuff[RX_RING_SIZE];
 368        struct sk_buff* tx_skbuff[TX_RING_SIZE];
 369        dma_addr_t tx_ring_dma;
 370        dma_addr_t rx_ring_dma;
 371        struct timer_list timer;                /* Media monitoring timer. */
 372        /* Frequently used values: keep some adjacent for cache effect. */
 373        spinlock_t lock;
 374        spinlock_t rx_lock;                     /* Group with Tx control cache line. */
 375        int msg_enable;
 376        int chip_id;
 377        unsigned int cur_rx, dirty_rx;          /* Producer/consumer ring indices */
 378        unsigned int rx_buf_sz;                 /* Based on MTU+slack. */
 379        struct netdev_desc *last_tx;            /* Last Tx descriptor used. */
 380        unsigned int cur_tx, dirty_tx;
 381        /* These values are keep track of the transceiver/media in use. */
 382        unsigned int flowctrl:1;
 383        unsigned int default_port:4;            /* Last dev->if_port value. */
 384        unsigned int an_enable:1;
 385        unsigned int speed;
 386        struct tasklet_struct rx_tasklet;
 387        struct tasklet_struct tx_tasklet;
 388        int budget;
 389        int cur_task;
 390        /* Multicast and receive mode. */
 391        spinlock_t mcastlock;                   /* SMP lock multicast updates. */
 392        u16 mcast_filter[4];
 393        /* MII transceiver section. */
 394        struct mii_if_info mii_if;
 395        int mii_preamble_required;
 396        unsigned char phys[MII_CNT];            /* MII device addresses, only first one used. */
 397        struct pci_dev *pci_dev;
 398        void __iomem *base;
 399};
 400
 401/* The station address location in the EEPROM. */
 402#define EEPROM_SA_OFFSET        0x10
 403#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
 404                        IntrDrvRqst | IntrTxDone | StatsMax | \
 405                        LinkChange)
 406
 407static int  change_mtu(struct net_device *dev, int new_mtu);
 408static int  eeprom_read(void __iomem *ioaddr, int location);
 409static int  mdio_read(struct net_device *dev, int phy_id, int location);
 410static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 411static int  mdio_wait_link(struct net_device *dev, int wait);
 412static int  netdev_open(struct net_device *dev);
 413static void check_duplex(struct net_device *dev);
 414static void netdev_timer(unsigned long data);
 415static void tx_timeout(struct net_device *dev);
 416static void init_ring(struct net_device *dev);
 417static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
 418static int reset_tx (struct net_device *dev);
 419static irqreturn_t intr_handler(int irq, void *dev_instance);
 420static void rx_poll(unsigned long data);
 421static void tx_poll(unsigned long data);
 422static void refill_rx (struct net_device *dev);
 423static void netdev_error(struct net_device *dev, int intr_status);
 424static void netdev_error(struct net_device *dev, int intr_status);
 425static void set_rx_mode(struct net_device *dev);
 426static int __set_mac_addr(struct net_device *dev);
 427static struct net_device_stats *get_stats(struct net_device *dev);
 428static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 429static int  netdev_close(struct net_device *dev);
 430static const struct ethtool_ops ethtool_ops;
 431
 432static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
 433{
 434        struct netdev_private *np = netdev_priv(dev);
 435        void __iomem *ioaddr = np->base + ASICCtrl;
 436        int countdown;
 437
 438        /* ST201 documentation states ASICCtrl is a 32bit register */
 439        iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
 440        /* ST201 documentation states reset can take up to 1 ms */
 441        countdown = 10 + 1;
 442        while (ioread32 (ioaddr) & (ResetBusy << 16)) {
 443                if (--countdown == 0) {
 444                        printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
 445                        break;
 446                }
 447                udelay(100);
 448        }
 449}
 450
 451static const struct net_device_ops netdev_ops = {
 452        .ndo_open               = netdev_open,
 453        .ndo_stop               = netdev_close,
 454        .ndo_start_xmit         = start_tx,
 455        .ndo_get_stats          = get_stats,
 456        .ndo_set_multicast_list = set_rx_mode,
 457        .ndo_do_ioctl           = netdev_ioctl,
 458        .ndo_tx_timeout         = tx_timeout,
 459        .ndo_change_mtu         = change_mtu,
 460        .ndo_set_mac_address    = eth_mac_addr,
 461        .ndo_validate_addr      = eth_validate_addr,
 462};
 463
 464static int __devinit sundance_probe1 (struct pci_dev *pdev,
 465                                      const struct pci_device_id *ent)
 466{
 467        struct net_device *dev;
 468        struct netdev_private *np;
 469        static int card_idx;
 470        int chip_idx = ent->driver_data;
 471        int irq;
 472        int i;
 473        void __iomem *ioaddr;
 474        u16 mii_ctl;
 475        void *ring_space;
 476        dma_addr_t ring_dma;
 477#ifdef USE_IO_OPS
 478        int bar = 0;
 479#else
 480        int bar = 1;
 481#endif
 482        int phy, phy_end, phy_idx = 0;
 483
 484/* when built into the kernel, we only print version if device is found */
 485#ifndef MODULE
 486        static int printed_version;
 487        if (!printed_version++)
 488                printk(version);
 489#endif
 490
 491        if (pci_enable_device(pdev))
 492                return -EIO;
 493        pci_set_master(pdev);
 494
 495        irq = pdev->irq;
 496
 497        dev = alloc_etherdev(sizeof(*np));
 498        if (!dev)
 499                return -ENOMEM;
 500        SET_NETDEV_DEV(dev, &pdev->dev);
 501
 502        if (pci_request_regions(pdev, DRV_NAME))
 503                goto err_out_netdev;
 504
 505        ioaddr = pci_iomap(pdev, bar, netdev_io_size);
 506        if (!ioaddr)
 507                goto err_out_res;
 508
 509        for (i = 0; i < 3; i++)
 510                ((__le16 *)dev->dev_addr)[i] =
 511                        cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
 512        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 513
 514        dev->base_addr = (unsigned long)ioaddr;
 515        dev->irq = irq;
 516
 517        np = netdev_priv(dev);
 518        np->base = ioaddr;
 519        np->pci_dev = pdev;
 520        np->chip_id = chip_idx;
 521        np->msg_enable = (1 << debug) - 1;
 522        spin_lock_init(&np->lock);
 523        tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
 524        tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
 525
 526        ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
 527        if (!ring_space)
 528                goto err_out_cleardev;
 529        np->tx_ring = (struct netdev_desc *)ring_space;
 530        np->tx_ring_dma = ring_dma;
 531
 532        ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
 533        if (!ring_space)
 534                goto err_out_unmap_tx;
 535        np->rx_ring = (struct netdev_desc *)ring_space;
 536        np->rx_ring_dma = ring_dma;
 537
 538        np->mii_if.dev = dev;
 539        np->mii_if.mdio_read = mdio_read;
 540        np->mii_if.mdio_write = mdio_write;
 541        np->mii_if.phy_id_mask = 0x1f;
 542        np->mii_if.reg_num_mask = 0x1f;
 543
 544        /* The chip-specific entries in the device structure. */
 545        dev->netdev_ops = &netdev_ops;
 546        SET_ETHTOOL_OPS(dev, &ethtool_ops);
 547        dev->watchdog_timeo = TX_TIMEOUT;
 548
 549        pci_set_drvdata(pdev, dev);
 550
 551        i = register_netdev(dev);
 552        if (i)
 553                goto err_out_unmap_rx;
 554
 555        printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
 556               dev->name, pci_id_tbl[chip_idx].name, ioaddr,
 557               dev->dev_addr, irq);
 558
 559        np->phys[0] = 1;                /* Default setting */
 560        np->mii_preamble_required++;
 561
 562        /*
 563         * It seems some phys doesn't deal well with address 0 being accessed
 564         * first
 565         */
 566        if (sundance_pci_tbl[np->chip_id].device == 0x0200) {
 567                phy = 0;
 568                phy_end = 31;
 569        } else {
 570                phy = 1;
 571                phy_end = 32;   /* wraps to zero, due to 'phy & 0x1f' */
 572        }
 573        for (; phy <= phy_end && phy_idx < MII_CNT; phy++) {
 574                int phyx = phy & 0x1f;
 575                int mii_status = mdio_read(dev, phyx, MII_BMSR);
 576                if (mii_status != 0xffff  &&  mii_status != 0x0000) {
 577                        np->phys[phy_idx++] = phyx;
 578                        np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
 579                        if ((mii_status & 0x0040) == 0)
 580                                np->mii_preamble_required++;
 581                        printk(KERN_INFO "%s: MII PHY found at address %d, status "
 582                                   "0x%4.4x advertising %4.4x.\n",
 583                                   dev->name, phyx, mii_status, np->mii_if.advertising);
 584                }
 585        }
 586        np->mii_preamble_required--;
 587
 588        if (phy_idx == 0) {
 589                printk(KERN_INFO "%s: No MII transceiver found, aborting.  ASIC status %x\n",
 590                           dev->name, ioread32(ioaddr + ASICCtrl));
 591                goto err_out_unregister;
 592        }
 593
 594        np->mii_if.phy_id = np->phys[0];
 595
 596        /* Parse override configuration */
 597        np->an_enable = 1;
 598        if (card_idx < MAX_UNITS) {
 599                if (media[card_idx] != NULL) {
 600                        np->an_enable = 0;
 601                        if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
 602                            strcmp (media[card_idx], "4") == 0) {
 603                                np->speed = 100;
 604                                np->mii_if.full_duplex = 1;
 605                        } else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
 606                                   strcmp (media[card_idx], "3") == 0) {
 607                                np->speed = 100;
 608                                np->mii_if.full_duplex = 0;
 609                        } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
 610                                   strcmp (media[card_idx], "2") == 0) {
 611                                np->speed = 10;
 612                                np->mii_if.full_duplex = 1;
 613                        } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
 614                                   strcmp (media[card_idx], "1") == 0) {
 615                                np->speed = 10;
 616                                np->mii_if.full_duplex = 0;
 617                        } else {
 618                                np->an_enable = 1;
 619                        }
 620                }
 621                if (flowctrl == 1)
 622                        np->flowctrl = 1;
 623        }
 624
 625        /* Fibre PHY? */
 626        if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
 627                /* Default 100Mbps Full */
 628                if (np->an_enable) {
 629                        np->speed = 100;
 630                        np->mii_if.full_duplex = 1;
 631                        np->an_enable = 0;
 632                }
 633        }
 634        /* Reset PHY */
 635        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
 636        mdelay (300);
 637        /* If flow control enabled, we need to advertise it.*/
 638        if (np->flowctrl)
 639                mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
 640        mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
 641        /* Force media type */
 642        if (!np->an_enable) {
 643                mii_ctl = 0;
 644                mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
 645                mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
 646                mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
 647                printk (KERN_INFO "Override speed=%d, %s duplex\n",
 648                        np->speed, np->mii_if.full_duplex ? "Full" : "Half");
 649
 650        }
 651
 652        /* Perhaps move the reset here? */
 653        /* Reset the chip to erase previous misconfiguration. */
 654        if (netif_msg_hw(np))
 655                printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
 656        sundance_reset(dev, 0x00ff << 16);
 657        if (netif_msg_hw(np))
 658                printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
 659
 660        card_idx++;
 661        return 0;
 662
 663err_out_unregister:
 664        unregister_netdev(dev);
 665err_out_unmap_rx:
 666        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
 667err_out_unmap_tx:
 668        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 669err_out_cleardev:
 670        pci_set_drvdata(pdev, NULL);
 671        pci_iounmap(pdev, ioaddr);
 672err_out_res:
 673        pci_release_regions(pdev);
 674err_out_netdev:
 675        free_netdev (dev);
 676        return -ENODEV;
 677}
 678
 679static int change_mtu(struct net_device *dev, int new_mtu)
 680{
 681        if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
 682                return -EINVAL;
 683        if (netif_running(dev))
 684                return -EBUSY;
 685        dev->mtu = new_mtu;
 686        return 0;
 687}
 688
 689#define eeprom_delay(ee_addr)   ioread32(ee_addr)
 690/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
 691static int __devinit eeprom_read(void __iomem *ioaddr, int location)
 692{
 693        int boguscnt = 10000;           /* Typical 1900 ticks. */
 694        iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
 695        do {
 696                eeprom_delay(ioaddr + EECtrl);
 697                if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
 698                        return ioread16(ioaddr + EEData);
 699                }
 700        } while (--boguscnt > 0);
 701        return 0;
 702}
 703
 704/*  MII transceiver control section.
 705        Read and write the MII registers using software-generated serial
 706        MDIO protocol.  See the MII specifications or DP83840A data sheet
 707        for details.
 708
 709        The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
 710        met by back-to-back 33Mhz PCI cycles. */
 711#define mdio_delay() ioread8(mdio_addr)
 712
 713enum mii_reg_bits {
 714        MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
 715};
 716#define MDIO_EnbIn  (0)
 717#define MDIO_WRITE0 (MDIO_EnbOutput)
 718#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
 719
 720/* Generate the preamble required for initial synchronization and
 721   a few older transceivers. */
 722static void mdio_sync(void __iomem *mdio_addr)
 723{
 724        int bits = 32;
 725
 726        /* Establish sync by sending at least 32 logic ones. */
 727        while (--bits >= 0) {
 728                iowrite8(MDIO_WRITE1, mdio_addr);
 729                mdio_delay();
 730                iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
 731                mdio_delay();
 732        }
 733}
 734
 735static int mdio_read(struct net_device *dev, int phy_id, int location)
 736{
 737        struct netdev_private *np = netdev_priv(dev);
 738        void __iomem *mdio_addr = np->base + MIICtrl;
 739        int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
 740        int i, retval = 0;
 741
 742        if (np->mii_preamble_required)
 743                mdio_sync(mdio_addr);
 744
 745        /* Shift the read command bits out. */
 746        for (i = 15; i >= 0; i--) {
 747                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 748
 749                iowrite8(dataval, mdio_addr);
 750                mdio_delay();
 751                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 752                mdio_delay();
 753        }
 754        /* Read the two transition, 16 data, and wire-idle bits. */
 755        for (i = 19; i > 0; i--) {
 756                iowrite8(MDIO_EnbIn, mdio_addr);
 757                mdio_delay();
 758                retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
 759                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 760                mdio_delay();
 761        }
 762        return (retval>>1) & 0xffff;
 763}
 764
 765static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
 766{
 767        struct netdev_private *np = netdev_priv(dev);
 768        void __iomem *mdio_addr = np->base + MIICtrl;
 769        int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
 770        int i;
 771
 772        if (np->mii_preamble_required)
 773                mdio_sync(mdio_addr);
 774
 775        /* Shift the command bits out. */
 776        for (i = 31; i >= 0; i--) {
 777                int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
 778
 779                iowrite8(dataval, mdio_addr);
 780                mdio_delay();
 781                iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
 782                mdio_delay();
 783        }
 784        /* Clear out extra bits. */
 785        for (i = 2; i > 0; i--) {
 786                iowrite8(MDIO_EnbIn, mdio_addr);
 787                mdio_delay();
 788                iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
 789                mdio_delay();
 790        }
 791}
 792
 793static int mdio_wait_link(struct net_device *dev, int wait)
 794{
 795        int bmsr;
 796        int phy_id;
 797        struct netdev_private *np;
 798
 799        np = netdev_priv(dev);
 800        phy_id = np->phys[0];
 801
 802        do {
 803                bmsr = mdio_read(dev, phy_id, MII_BMSR);
 804                if (bmsr & 0x0004)
 805                        return 0;
 806                mdelay(1);
 807        } while (--wait > 0);
 808        return -1;
 809}
 810
 811static int netdev_open(struct net_device *dev)
 812{
 813        struct netdev_private *np = netdev_priv(dev);
 814        void __iomem *ioaddr = np->base;
 815        unsigned long flags;
 816        int i;
 817
 818        /* Do we need to reset the chip??? */
 819
 820        i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
 821        if (i)
 822                return i;
 823
 824        if (netif_msg_ifup(np))
 825                printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
 826                           dev->name, dev->irq);
 827        init_ring(dev);
 828
 829        iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
 830        /* The Tx list pointer is written as packets are queued. */
 831
 832        /* Initialize other registers. */
 833        __set_mac_addr(dev);
 834#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
 835        iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
 836#else
 837        iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
 838#endif
 839        if (dev->mtu > 2047)
 840                iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
 841
 842        /* Configure the PCI bus bursts and FIFO thresholds. */
 843
 844        if (dev->if_port == 0)
 845                dev->if_port = np->default_port;
 846
 847        spin_lock_init(&np->mcastlock);
 848
 849        set_rx_mode(dev);
 850        iowrite16(0, ioaddr + IntrEnable);
 851        iowrite16(0, ioaddr + DownCounter);
 852        /* Set the chip to poll every N*320nsec. */
 853        iowrite8(100, ioaddr + RxDMAPollPeriod);
 854        iowrite8(127, ioaddr + TxDMAPollPeriod);
 855        /* Fix DFE-580TX packet drop issue */
 856        if (np->pci_dev->revision >= 0x14)
 857                iowrite8(0x01, ioaddr + DebugCtrl1);
 858        netif_start_queue(dev);
 859
 860        spin_lock_irqsave(&np->lock, flags);
 861        reset_tx(dev);
 862        spin_unlock_irqrestore(&np->lock, flags);
 863
 864        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
 865
 866        if (netif_msg_ifup(np))
 867                printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
 868                           "MAC Control %x, %4.4x %4.4x.\n",
 869                           dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
 870                           ioread32(ioaddr + MACCtrl0),
 871                           ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
 872
 873        /* Set the timer to check for link beat. */
 874        init_timer(&np->timer);
 875        np->timer.expires = jiffies + 3*HZ;
 876        np->timer.data = (unsigned long)dev;
 877        np->timer.function = &netdev_timer;                             /* timer handler */
 878        add_timer(&np->timer);
 879
 880        /* Enable interrupts by setting the interrupt mask. */
 881        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 882
 883        return 0;
 884}
 885
 886static void check_duplex(struct net_device *dev)
 887{
 888        struct netdev_private *np = netdev_priv(dev);
 889        void __iomem *ioaddr = np->base;
 890        int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
 891        int negotiated = mii_lpa & np->mii_if.advertising;
 892        int duplex;
 893
 894        /* Force media */
 895        if (!np->an_enable || mii_lpa == 0xffff) {
 896                if (np->mii_if.full_duplex)
 897                        iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
 898                                ioaddr + MACCtrl0);
 899                return;
 900        }
 901
 902        /* Autonegotiation */
 903        duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
 904        if (np->mii_if.full_duplex != duplex) {
 905                np->mii_if.full_duplex = duplex;
 906                if (netif_msg_link(np))
 907                        printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
 908                                   "negotiated capability %4.4x.\n", dev->name,
 909                                   duplex ? "full" : "half", np->phys[0], negotiated);
 910                iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0);
 911        }
 912}
 913
 914static void netdev_timer(unsigned long data)
 915{
 916        struct net_device *dev = (struct net_device *)data;
 917        struct netdev_private *np = netdev_priv(dev);
 918        void __iomem *ioaddr = np->base;
 919        int next_tick = 10*HZ;
 920
 921        if (netif_msg_timer(np)) {
 922                printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
 923                           "Tx %x Rx %x.\n",
 924                           dev->name, ioread16(ioaddr + IntrEnable),
 925                           ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
 926        }
 927        check_duplex(dev);
 928        np->timer.expires = jiffies + next_tick;
 929        add_timer(&np->timer);
 930}
 931
 932static void tx_timeout(struct net_device *dev)
 933{
 934        struct netdev_private *np = netdev_priv(dev);
 935        void __iomem *ioaddr = np->base;
 936        unsigned long flag;
 937
 938        netif_stop_queue(dev);
 939        tasklet_disable(&np->tx_tasklet);
 940        iowrite16(0, ioaddr + IntrEnable);
 941        printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
 942                   "TxFrameId %2.2x,"
 943                   " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
 944                   ioread8(ioaddr + TxFrameId));
 945
 946        {
 947                int i;
 948                for (i=0; i<TX_RING_SIZE; i++) {
 949                        printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
 950                                (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
 951                                le32_to_cpu(np->tx_ring[i].next_desc),
 952                                le32_to_cpu(np->tx_ring[i].status),
 953                                (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
 954                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
 955                                le32_to_cpu(np->tx_ring[i].frag[0].length));
 956                }
 957                printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
 958                        ioread32(np->base + TxListPtr),
 959                        netif_queue_stopped(dev));
 960                printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
 961                        np->cur_tx, np->cur_tx % TX_RING_SIZE,
 962                        np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
 963                printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
 964                printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
 965        }
 966        spin_lock_irqsave(&np->lock, flag);
 967
 968        /* Stop and restart the chip's Tx processes . */
 969        reset_tx(dev);
 970        spin_unlock_irqrestore(&np->lock, flag);
 971
 972        dev->if_port = 0;
 973
 974        dev->trans_start = jiffies; /* prevent tx timeout */
 975        dev->stats.tx_errors++;
 976        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
 977                netif_wake_queue(dev);
 978        }
 979        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
 980        tasklet_enable(&np->tx_tasklet);
 981}
 982
 983
 984/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
 985static void init_ring(struct net_device *dev)
 986{
 987        struct netdev_private *np = netdev_priv(dev);
 988        int i;
 989
 990        np->cur_rx = np->cur_tx = 0;
 991        np->dirty_rx = np->dirty_tx = 0;
 992        np->cur_task = 0;
 993
 994        np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
 995
 996        /* Initialize all Rx descriptors. */
 997        for (i = 0; i < RX_RING_SIZE; i++) {
 998                np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
 999                        ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1000                np->rx_ring[i].status = 0;
1001                np->rx_ring[i].frag[0].length = 0;
1002                np->rx_skbuff[i] = NULL;
1003        }
1004
1005        /* Fill in the Rx buffers.  Handle allocation failure gracefully. */
1006        for (i = 0; i < RX_RING_SIZE; i++) {
1007                struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1008                np->rx_skbuff[i] = skb;
1009                if (skb == NULL)
1010                        break;
1011                skb->dev = dev;         /* Mark as being used by this device. */
1012                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
1013                np->rx_ring[i].frag[0].addr = cpu_to_le32(
1014                        pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1015                                PCI_DMA_FROMDEVICE));
1016                np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1017        }
1018        np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1019
1020        for (i = 0; i < TX_RING_SIZE; i++) {
1021                np->tx_skbuff[i] = NULL;
1022                np->tx_ring[i].status = 0;
1023        }
1024}
1025
1026static void tx_poll (unsigned long data)
1027{
1028        struct net_device *dev = (struct net_device *)data;
1029        struct netdev_private *np = netdev_priv(dev);
1030        unsigned head = np->cur_task % TX_RING_SIZE;
1031        struct netdev_desc *txdesc =
1032                &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1033
1034        /* Chain the next pointer */
1035        for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1036                int entry = np->cur_task % TX_RING_SIZE;
1037                txdesc = &np->tx_ring[entry];
1038                if (np->last_tx) {
1039                        np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1040                                entry*sizeof(struct netdev_desc));
1041                }
1042                np->last_tx = txdesc;
1043        }
1044        /* Indicate the latest descriptor of tx ring */
1045        txdesc->status |= cpu_to_le32(DescIntrOnTx);
1046
1047        if (ioread32 (np->base + TxListPtr) == 0)
1048                iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1049                        np->base + TxListPtr);
1050}
1051
1052static netdev_tx_t
1053start_tx (struct sk_buff *skb, struct net_device *dev)
1054{
1055        struct netdev_private *np = netdev_priv(dev);
1056        struct netdev_desc *txdesc;
1057        unsigned entry;
1058
1059        /* Calculate the next Tx descriptor entry. */
1060        entry = np->cur_tx % TX_RING_SIZE;
1061        np->tx_skbuff[entry] = skb;
1062        txdesc = &np->tx_ring[entry];
1063
1064        txdesc->next_desc = 0;
1065        txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1066        txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1067                                                        skb->len,
1068                                                        PCI_DMA_TODEVICE));
1069        txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1070
1071        /* Increment cur_tx before tasklet_schedule() */
1072        np->cur_tx++;
1073        mb();
1074        /* Schedule a tx_poll() task */
1075        tasklet_schedule(&np->tx_tasklet);
1076
1077        /* On some architectures: explicitly flush cache lines here. */
1078        if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 &&
1079            !netif_queue_stopped(dev)) {
1080                /* do nothing */
1081        } else {
1082                netif_stop_queue (dev);
1083        }
1084        if (netif_msg_tx_queued(np)) {
1085                printk (KERN_DEBUG
1086                        "%s: Transmit frame #%d queued in slot %d.\n",
1087                        dev->name, np->cur_tx, entry);
1088        }
1089        return NETDEV_TX_OK;
1090}
1091
1092/* Reset hardware tx and free all of tx buffers */
1093static int
1094reset_tx (struct net_device *dev)
1095{
1096        struct netdev_private *np = netdev_priv(dev);
1097        void __iomem *ioaddr = np->base;
1098        struct sk_buff *skb;
1099        int i;
1100        int irq = in_interrupt();
1101
1102        /* Reset tx logic, TxListPtr will be cleaned */
1103        iowrite16 (TxDisable, ioaddr + MACCtrl1);
1104        sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1105
1106        /* free all tx skbuff */
1107        for (i = 0; i < TX_RING_SIZE; i++) {
1108                np->tx_ring[i].next_desc = 0;
1109
1110                skb = np->tx_skbuff[i];
1111                if (skb) {
1112                        pci_unmap_single(np->pci_dev,
1113                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1114                                skb->len, PCI_DMA_TODEVICE);
1115                        if (irq)
1116                                dev_kfree_skb_irq (skb);
1117                        else
1118                                dev_kfree_skb (skb);
1119                        np->tx_skbuff[i] = NULL;
1120                        dev->stats.tx_dropped++;
1121                }
1122        }
1123        np->cur_tx = np->dirty_tx = 0;
1124        np->cur_task = 0;
1125
1126        np->last_tx = NULL;
1127        iowrite8(127, ioaddr + TxDMAPollPeriod);
1128
1129        iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1130        return 0;
1131}
1132
1133/* The interrupt handler cleans up after the Tx thread,
1134   and schedule a Rx thread work */
1135static irqreturn_t intr_handler(int irq, void *dev_instance)
1136{
1137        struct net_device *dev = (struct net_device *)dev_instance;
1138        struct netdev_private *np = netdev_priv(dev);
1139        void __iomem *ioaddr = np->base;
1140        int hw_frame_id;
1141        int tx_cnt;
1142        int tx_status;
1143        int handled = 0;
1144        int i;
1145
1146
1147        do {
1148                int intr_status = ioread16(ioaddr + IntrStatus);
1149                iowrite16(intr_status, ioaddr + IntrStatus);
1150
1151                if (netif_msg_intr(np))
1152                        printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1153                                   dev->name, intr_status);
1154
1155                if (!(intr_status & DEFAULT_INTR))
1156                        break;
1157
1158                handled = 1;
1159
1160                if (intr_status & (IntrRxDMADone)) {
1161                        iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1162                                        ioaddr + IntrEnable);
1163                        if (np->budget < 0)
1164                                np->budget = RX_BUDGET;
1165                        tasklet_schedule(&np->rx_tasklet);
1166                }
1167                if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1168                        tx_status = ioread16 (ioaddr + TxStatus);
1169                        for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1170                                if (netif_msg_tx_done(np))
1171                                        printk
1172                                            ("%s: Transmit status is %2.2x.\n",
1173                                        dev->name, tx_status);
1174                                if (tx_status & 0x1e) {
1175                                        if (netif_msg_tx_err(np))
1176                                                printk("%s: Transmit error status %4.4x.\n",
1177                                                           dev->name, tx_status);
1178                                        dev->stats.tx_errors++;
1179                                        if (tx_status & 0x10)
1180                                                dev->stats.tx_fifo_errors++;
1181                                        if (tx_status & 0x08)
1182                                                dev->stats.collisions++;
1183                                        if (tx_status & 0x04)
1184                                                dev->stats.tx_fifo_errors++;
1185                                        if (tx_status & 0x02)
1186                                                dev->stats.tx_window_errors++;
1187
1188                                        /*
1189                                        ** This reset has been verified on
1190                                        ** DFE-580TX boards ! phdm@macqel.be.
1191                                        */
1192                                        if (tx_status & 0x10) { /* TxUnderrun */
1193                                                /* Restart Tx FIFO and transmitter */
1194                                                sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1195                                                /* No need to reset the Tx pointer here */
1196                                        }
1197                                        /* Restart the Tx. Need to make sure tx enabled */
1198                                        i = 10;
1199                                        do {
1200                                                iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1);
1201                                                if (ioread16(ioaddr + MACCtrl1) & TxEnabled)
1202                                                        break;
1203                                                mdelay(1);
1204                                        } while (--i);
1205                                }
1206                                /* Yup, this is a documentation bug.  It cost me *hours*. */
1207                                iowrite16 (0, ioaddr + TxStatus);
1208                                if (tx_cnt < 0) {
1209                                        iowrite32(5000, ioaddr + DownCounter);
1210                                        break;
1211                                }
1212                                tx_status = ioread16 (ioaddr + TxStatus);
1213                        }
1214                        hw_frame_id = (tx_status >> 8) & 0xff;
1215                } else  {
1216                        hw_frame_id = ioread8(ioaddr + TxFrameId);
1217                }
1218
1219                if (np->pci_dev->revision >= 0x14) {
1220                        spin_lock(&np->lock);
1221                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1222                                int entry = np->dirty_tx % TX_RING_SIZE;
1223                                struct sk_buff *skb;
1224                                int sw_frame_id;
1225                                sw_frame_id = (le32_to_cpu(
1226                                        np->tx_ring[entry].status) >> 2) & 0xff;
1227                                if (sw_frame_id == hw_frame_id &&
1228                                        !(le32_to_cpu(np->tx_ring[entry].status)
1229                                        & 0x00010000))
1230                                                break;
1231                                if (sw_frame_id == (hw_frame_id + 1) %
1232                                        TX_RING_SIZE)
1233                                                break;
1234                                skb = np->tx_skbuff[entry];
1235                                /* Free the original skb. */
1236                                pci_unmap_single(np->pci_dev,
1237                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1238                                        skb->len, PCI_DMA_TODEVICE);
1239                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1240                                np->tx_skbuff[entry] = NULL;
1241                                np->tx_ring[entry].frag[0].addr = 0;
1242                                np->tx_ring[entry].frag[0].length = 0;
1243                        }
1244                        spin_unlock(&np->lock);
1245                } else {
1246                        spin_lock(&np->lock);
1247                        for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1248                                int entry = np->dirty_tx % TX_RING_SIZE;
1249                                struct sk_buff *skb;
1250                                if (!(le32_to_cpu(np->tx_ring[entry].status)
1251                                                        & 0x00010000))
1252                                        break;
1253                                skb = np->tx_skbuff[entry];
1254                                /* Free the original skb. */
1255                                pci_unmap_single(np->pci_dev,
1256                                        le32_to_cpu(np->tx_ring[entry].frag[0].addr),
1257                                        skb->len, PCI_DMA_TODEVICE);
1258                                dev_kfree_skb_irq (np->tx_skbuff[entry]);
1259                                np->tx_skbuff[entry] = NULL;
1260                                np->tx_ring[entry].frag[0].addr = 0;
1261                                np->tx_ring[entry].frag[0].length = 0;
1262                        }
1263                        spin_unlock(&np->lock);
1264                }
1265
1266                if (netif_queue_stopped(dev) &&
1267                        np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1268                        /* The ring is no longer full, clear busy flag. */
1269                        netif_wake_queue (dev);
1270                }
1271                /* Abnormal error summary/uncommon events handlers. */
1272                if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1273                        netdev_error(dev, intr_status);
1274        } while (0);
1275        if (netif_msg_intr(np))
1276                printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1277                           dev->name, ioread16(ioaddr + IntrStatus));
1278        return IRQ_RETVAL(handled);
1279}
1280
1281static void rx_poll(unsigned long data)
1282{
1283        struct net_device *dev = (struct net_device *)data;
1284        struct netdev_private *np = netdev_priv(dev);
1285        int entry = np->cur_rx % RX_RING_SIZE;
1286        int boguscnt = np->budget;
1287        void __iomem *ioaddr = np->base;
1288        int received = 0;
1289
1290        /* If EOP is set on the next entry, it's a new packet. Send it up. */
1291        while (1) {
1292                struct netdev_desc *desc = &(np->rx_ring[entry]);
1293                u32 frame_status = le32_to_cpu(desc->status);
1294                int pkt_len;
1295
1296                if (--boguscnt < 0) {
1297                        goto not_done;
1298                }
1299                if (!(frame_status & DescOwn))
1300                        break;
1301                pkt_len = frame_status & 0x1fff;        /* Chip omits the CRC. */
1302                if (netif_msg_rx_status(np))
1303                        printk(KERN_DEBUG "  netdev_rx() status was %8.8x.\n",
1304                                   frame_status);
1305                if (frame_status & 0x001f4000) {
1306                        /* There was a error. */
1307                        if (netif_msg_rx_err(np))
1308                                printk(KERN_DEBUG "  netdev_rx() Rx error was %8.8x.\n",
1309                                           frame_status);
1310                        dev->stats.rx_errors++;
1311                        if (frame_status & 0x00100000)
1312                                dev->stats.rx_length_errors++;
1313                        if (frame_status & 0x00010000)
1314                                dev->stats.rx_fifo_errors++;
1315                        if (frame_status & 0x00060000)
1316                                dev->stats.rx_frame_errors++;
1317                        if (frame_status & 0x00080000)
1318                                dev->stats.rx_crc_errors++;
1319                        if (frame_status & 0x00100000) {
1320                                printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1321                                           " status %8.8x.\n",
1322                                           dev->name, frame_status);
1323                        }
1324                } else {
1325                        struct sk_buff *skb;
1326#ifndef final_version
1327                        if (netif_msg_rx_status(np))
1328                                printk(KERN_DEBUG "  netdev_rx() normal Rx pkt length %d"
1329                                           ", bogus_cnt %d.\n",
1330                                           pkt_len, boguscnt);
1331#endif
1332                        /* Check if the packet is long enough to accept without copying
1333                           to a minimally-sized skbuff. */
1334                        if (pkt_len < rx_copybreak &&
1335                            (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1336                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
1337                                pci_dma_sync_single_for_cpu(np->pci_dev,
1338                                                            le32_to_cpu(desc->frag[0].addr),
1339                                                            np->rx_buf_sz,
1340                                                            PCI_DMA_FROMDEVICE);
1341
1342                                skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1343                                pci_dma_sync_single_for_device(np->pci_dev,
1344                                                               le32_to_cpu(desc->frag[0].addr),
1345                                                               np->rx_buf_sz,
1346                                                               PCI_DMA_FROMDEVICE);
1347                                skb_put(skb, pkt_len);
1348                        } else {
1349                                pci_unmap_single(np->pci_dev,
1350                                        le32_to_cpu(desc->frag[0].addr),
1351                                        np->rx_buf_sz,
1352                                        PCI_DMA_FROMDEVICE);
1353                                skb_put(skb = np->rx_skbuff[entry], pkt_len);
1354                                np->rx_skbuff[entry] = NULL;
1355                        }
1356                        skb->protocol = eth_type_trans(skb, dev);
1357                        /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1358                        netif_rx(skb);
1359                }
1360                entry = (entry + 1) % RX_RING_SIZE;
1361                received++;
1362        }
1363        np->cur_rx = entry;
1364        refill_rx (dev);
1365        np->budget -= received;
1366        iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1367        return;
1368
1369not_done:
1370        np->cur_rx = entry;
1371        refill_rx (dev);
1372        if (!received)
1373                received = 1;
1374        np->budget -= received;
1375        if (np->budget <= 0)
1376                np->budget = RX_BUDGET;
1377        tasklet_schedule(&np->rx_tasklet);
1378}
1379
1380static void refill_rx (struct net_device *dev)
1381{
1382        struct netdev_private *np = netdev_priv(dev);
1383        int entry;
1384        int cnt = 0;
1385
1386        /* Refill the Rx ring buffers. */
1387        for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1388                np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1389                struct sk_buff *skb;
1390                entry = np->dirty_rx % RX_RING_SIZE;
1391                if (np->rx_skbuff[entry] == NULL) {
1392                        skb = dev_alloc_skb(np->rx_buf_sz);
1393                        np->rx_skbuff[entry] = skb;
1394                        if (skb == NULL)
1395                                break;          /* Better luck next round. */
1396                        skb->dev = dev;         /* Mark as being used by this device. */
1397                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
1398                        np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1399                                pci_map_single(np->pci_dev, skb->data,
1400                                        np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1401                }
1402                /* Perhaps we need not reset this field. */
1403                np->rx_ring[entry].frag[0].length =
1404                        cpu_to_le32(np->rx_buf_sz | LastFrag);
1405                np->rx_ring[entry].status = 0;
1406                cnt++;
1407        }
1408}
1409static void netdev_error(struct net_device *dev, int intr_status)
1410{
1411        struct netdev_private *np = netdev_priv(dev);
1412        void __iomem *ioaddr = np->base;
1413        u16 mii_ctl, mii_advertise, mii_lpa;
1414        int speed;
1415
1416        if (intr_status & LinkChange) {
1417                if (mdio_wait_link(dev, 10) == 0) {
1418                        printk(KERN_INFO "%s: Link up\n", dev->name);
1419                        if (np->an_enable) {
1420                                mii_advertise = mdio_read(dev, np->phys[0],
1421                                                           MII_ADVERTISE);
1422                                mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
1423                                mii_advertise &= mii_lpa;
1424                                printk(KERN_INFO "%s: Link changed: ",
1425                                        dev->name);
1426                                if (mii_advertise & ADVERTISE_100FULL) {
1427                                        np->speed = 100;
1428                                        printk("100Mbps, full duplex\n");
1429                                } else if (mii_advertise & ADVERTISE_100HALF) {
1430                                        np->speed = 100;
1431                                        printk("100Mbps, half duplex\n");
1432                                } else if (mii_advertise & ADVERTISE_10FULL) {
1433                                        np->speed = 10;
1434                                        printk("10Mbps, full duplex\n");
1435                                } else if (mii_advertise & ADVERTISE_10HALF) {
1436                                        np->speed = 10;
1437                                        printk("10Mbps, half duplex\n");
1438                                } else
1439                                        printk("\n");
1440
1441                        } else {
1442                                mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR);
1443                                speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1444                                np->speed = speed;
1445                                printk(KERN_INFO "%s: Link changed: %dMbps ,",
1446                                        dev->name, speed);
1447                                printk("%s duplex.\n",
1448                                        (mii_ctl & BMCR_FULLDPLX) ?
1449                                                "full" : "half");
1450                        }
1451                        check_duplex(dev);
1452                        if (np->flowctrl && np->mii_if.full_duplex) {
1453                                iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1454                                        ioaddr + MulticastFilter1+2);
1455                                iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1456                                        ioaddr + MACCtrl0);
1457                        }
1458                        netif_carrier_on(dev);
1459                } else {
1460                        printk(KERN_INFO "%s: Link down\n", dev->name);
1461                        netif_carrier_off(dev);
1462                }
1463        }
1464        if (intr_status & StatsMax) {
1465                get_stats(dev);
1466        }
1467        if (intr_status & IntrPCIErr) {
1468                printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1469                           dev->name, intr_status);
1470                /* We must do a global reset of DMA to continue. */
1471        }
1472}
1473
1474static struct net_device_stats *get_stats(struct net_device *dev)
1475{
1476        struct netdev_private *np = netdev_priv(dev);
1477        void __iomem *ioaddr = np->base;
1478        int i;
1479
1480        /* We should lock this segment of code for SMP eventually, although
1481           the vulnerability window is very small and statistics are
1482           non-critical. */
1483        /* The chip only need report frame silently dropped. */
1484        dev->stats.rx_missed_errors     += ioread8(ioaddr + RxMissed);
1485        dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1486        dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1487        dev->stats.collisions += ioread8(ioaddr + StatsLateColl);
1488        dev->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1489        dev->stats.collisions += ioread8(ioaddr + StatsOneColl);
1490        dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1491        ioread8(ioaddr + StatsTxDefer);
1492        for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1493                ioread8(ioaddr + i);
1494        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1495        dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1496        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1497        dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1498
1499        return &dev->stats;
1500}
1501
1502static void set_rx_mode(struct net_device *dev)
1503{
1504        struct netdev_private *np = netdev_priv(dev);
1505        void __iomem *ioaddr = np->base;
1506        u16 mc_filter[4];                       /* Multicast hash filter */
1507        u32 rx_mode;
1508        int i;
1509
1510        if (dev->flags & IFF_PROMISC) {                 /* Set promiscuous. */
1511                memset(mc_filter, 0xff, sizeof(mc_filter));
1512                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1513        } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1514                   (dev->flags & IFF_ALLMULTI)) {
1515                /* Too many to match, or accept all multicasts. */
1516                memset(mc_filter, 0xff, sizeof(mc_filter));
1517                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1518        } else if (!netdev_mc_empty(dev)) {
1519                struct netdev_hw_addr *ha;
1520                int bit;
1521                int index;
1522                int crc;
1523                memset (mc_filter, 0, sizeof (mc_filter));
1524                netdev_for_each_mc_addr(ha, dev) {
1525                        crc = ether_crc_le(ETH_ALEN, ha->addr);
1526                        for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1527                                if (crc & 0x80000000) index |= 1 << bit;
1528                        mc_filter[index/16] |= (1 << (index % 16));
1529                }
1530                rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1531        } else {
1532                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1533                return;
1534        }
1535        if (np->mii_if.full_duplex && np->flowctrl)
1536                mc_filter[3] |= 0x0200;
1537
1538        for (i = 0; i < 4; i++)
1539                iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1540        iowrite8(rx_mode, ioaddr + RxMode);
1541}
1542
1543static int __set_mac_addr(struct net_device *dev)
1544{
1545        struct netdev_private *np = netdev_priv(dev);
1546        u16 addr16;
1547
1548        addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1549        iowrite16(addr16, np->base + StationAddr);
1550        addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1551        iowrite16(addr16, np->base + StationAddr+2);
1552        addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1553        iowrite16(addr16, np->base + StationAddr+4);
1554        return 0;
1555}
1556
1557static int check_if_running(struct net_device *dev)
1558{
1559        if (!netif_running(dev))
1560                return -EINVAL;
1561        return 0;
1562}
1563
1564static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1565{
1566        struct netdev_private *np = netdev_priv(dev);
1567        strcpy(info->driver, DRV_NAME);
1568        strcpy(info->version, DRV_VERSION);
1569        strcpy(info->bus_info, pci_name(np->pci_dev));
1570}
1571
1572static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1573{
1574        struct netdev_private *np = netdev_priv(dev);
1575        spin_lock_irq(&np->lock);
1576        mii_ethtool_gset(&np->mii_if, ecmd);
1577        spin_unlock_irq(&np->lock);
1578        return 0;
1579}
1580
1581static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1582{
1583        struct netdev_private *np = netdev_priv(dev);
1584        int res;
1585        spin_lock_irq(&np->lock);
1586        res = mii_ethtool_sset(&np->mii_if, ecmd);
1587        spin_unlock_irq(&np->lock);
1588        return res;
1589}
1590
1591static int nway_reset(struct net_device *dev)
1592{
1593        struct netdev_private *np = netdev_priv(dev);
1594        return mii_nway_restart(&np->mii_if);
1595}
1596
1597static u32 get_link(struct net_device *dev)
1598{
1599        struct netdev_private *np = netdev_priv(dev);
1600        return mii_link_ok(&np->mii_if);
1601}
1602
1603static u32 get_msglevel(struct net_device *dev)
1604{
1605        struct netdev_private *np = netdev_priv(dev);
1606        return np->msg_enable;
1607}
1608
1609static void set_msglevel(struct net_device *dev, u32 val)
1610{
1611        struct netdev_private *np = netdev_priv(dev);
1612        np->msg_enable = val;
1613}
1614
1615static const struct ethtool_ops ethtool_ops = {
1616        .begin = check_if_running,
1617        .get_drvinfo = get_drvinfo,
1618        .get_settings = get_settings,
1619        .set_settings = set_settings,
1620        .nway_reset = nway_reset,
1621        .get_link = get_link,
1622        .get_msglevel = get_msglevel,
1623        .set_msglevel = set_msglevel,
1624};
1625
1626static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1627{
1628        struct netdev_private *np = netdev_priv(dev);
1629        int rc;
1630
1631        if (!netif_running(dev))
1632                return -EINVAL;
1633
1634        spin_lock_irq(&np->lock);
1635        rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1636        spin_unlock_irq(&np->lock);
1637
1638        return rc;
1639}
1640
1641static int netdev_close(struct net_device *dev)
1642{
1643        struct netdev_private *np = netdev_priv(dev);
1644        void __iomem *ioaddr = np->base;
1645        struct sk_buff *skb;
1646        int i;
1647
1648        /* Wait and kill tasklet */
1649        tasklet_kill(&np->rx_tasklet);
1650        tasklet_kill(&np->tx_tasklet);
1651        np->cur_tx = 0;
1652        np->dirty_tx = 0;
1653        np->cur_task = 0;
1654        np->last_tx = NULL;
1655
1656        netif_stop_queue(dev);
1657
1658        if (netif_msg_ifdown(np)) {
1659                printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1660                           "Rx %4.4x Int %2.2x.\n",
1661                           dev->name, ioread8(ioaddr + TxStatus),
1662                           ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1663                printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
1664                           dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1665        }
1666
1667        /* Disable interrupts by clearing the interrupt mask. */
1668        iowrite16(0x0000, ioaddr + IntrEnable);
1669
1670        /* Disable Rx and Tx DMA for safely release resource */
1671        iowrite32(0x500, ioaddr + DMACtrl);
1672
1673        /* Stop the chip's Tx and Rx processes. */
1674        iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1675
1676        for (i = 2000; i > 0; i--) {
1677                if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0)
1678                        break;
1679                mdelay(1);
1680        }
1681
1682        iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1683                        ioaddr +ASICCtrl + 2);
1684
1685        for (i = 2000; i > 0; i--) {
1686                if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0)
1687                        break;
1688                mdelay(1);
1689        }
1690
1691#ifdef __i386__
1692        if (netif_msg_hw(np)) {
1693                printk(KERN_DEBUG "  Tx ring at %8.8x:\n",
1694                           (int)(np->tx_ring_dma));
1695                for (i = 0; i < TX_RING_SIZE; i++)
1696                        printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
1697                                   i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1698                                   np->tx_ring[i].frag[0].length);
1699                printk(KERN_DEBUG "  Rx ring %8.8x:\n",
1700                           (int)(np->rx_ring_dma));
1701                for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1702                        printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1703                                   i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1704                                   np->rx_ring[i].frag[0].length);
1705                }
1706        }
1707#endif /* __i386__ debugging only */
1708
1709        free_irq(dev->irq, dev);
1710
1711        del_timer_sync(&np->timer);
1712
1713        /* Free all the skbuffs in the Rx queue. */
1714        for (i = 0; i < RX_RING_SIZE; i++) {
1715                np->rx_ring[i].status = 0;
1716                skb = np->rx_skbuff[i];
1717                if (skb) {
1718                        pci_unmap_single(np->pci_dev,
1719                                le32_to_cpu(np->rx_ring[i].frag[0].addr),
1720                                np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1721                        dev_kfree_skb(skb);
1722                        np->rx_skbuff[i] = NULL;
1723                }
1724                np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */
1725        }
1726        for (i = 0; i < TX_RING_SIZE; i++) {
1727                np->tx_ring[i].next_desc = 0;
1728                skb = np->tx_skbuff[i];
1729                if (skb) {
1730                        pci_unmap_single(np->pci_dev,
1731                                le32_to_cpu(np->tx_ring[i].frag[0].addr),
1732                                skb->len, PCI_DMA_TODEVICE);
1733                        dev_kfree_skb(skb);
1734                        np->tx_skbuff[i] = NULL;
1735                }
1736        }
1737
1738        return 0;
1739}
1740
1741static void __devexit sundance_remove1 (struct pci_dev *pdev)
1742{
1743        struct net_device *dev = pci_get_drvdata(pdev);
1744
1745        if (dev) {
1746                struct netdev_private *np = netdev_priv(dev);
1747
1748                unregister_netdev(dev);
1749                pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1750                        np->rx_ring_dma);
1751                pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1752                        np->tx_ring_dma);
1753                pci_iounmap(pdev, np->base);
1754                pci_release_regions(pdev);
1755                free_netdev(dev);
1756                pci_set_drvdata(pdev, NULL);
1757        }
1758}
1759
1760static struct pci_driver sundance_driver = {
1761        .name           = DRV_NAME,
1762        .id_table       = sundance_pci_tbl,
1763        .probe          = sundance_probe1,
1764        .remove         = __devexit_p(sundance_remove1),
1765};
1766
1767static int __init sundance_init(void)
1768{
1769/* when a module, this is printed whether or not devices are found in probe */
1770#ifdef MODULE
1771        printk(version);
1772#endif
1773        return pci_register_driver(&sundance_driver);
1774}
1775
1776static void __exit sundance_exit(void)
1777{
1778        pci_unregister_driver(&sundance_driver);
1779}
1780
1781module_init(sundance_init);
1782module_exit(sundance_exit);
1783
1784
1785
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.