linux/drivers/net/sky2.c
<<
>>
Prefs
   1/*
   2 * New driver for Marvell Yukon 2 chipset.
   3 * Based on earlier sk98lin, and skge driver.
   4 *
   5 * This driver intentionally does not support all the features
   6 * of the original driver such as link fail-over and link management because
   7 * those should be done at higher levels.
   8 *
   9 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License.
  14 *
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18 * GNU General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23 */
  24
  25#include <linux/crc32.h>
  26#include <linux/kernel.h>
  27#include <linux/module.h>
  28#include <linux/netdevice.h>
  29#include <linux/dma-mapping.h>
  30#include <linux/etherdevice.h>
  31#include <linux/ethtool.h>
  32#include <linux/pci.h>
  33#include <linux/ip.h>
  34#include <net/ip.h>
  35#include <linux/tcp.h>
  36#include <linux/in.h>
  37#include <linux/delay.h>
  38#include <linux/workqueue.h>
  39#include <linux/if_vlan.h>
  40#include <linux/prefetch.h>
  41#include <linux/debugfs.h>
  42#include <linux/mii.h>
  43
  44#include <asm/irq.h>
  45
  46#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  47#define SKY2_VLAN_TAG_USED 1
  48#endif
  49
  50#include "sky2.h"
  51
  52#define DRV_NAME                "sky2"
  53#define DRV_VERSION             "1.22"
  54#define PFX                     DRV_NAME " "
  55
  56/*
  57 * The Yukon II chipset takes 64 bit command blocks (called list elements)
  58 * that are organized into three (receive, transmit, status) different rings
  59 * similar to Tigon3.
  60 */
  61
  62#define RX_LE_SIZE              1024
  63#define RX_LE_BYTES             (RX_LE_SIZE*sizeof(struct sky2_rx_le))
  64#define RX_MAX_PENDING          (RX_LE_SIZE/6 - 2)
  65#define RX_DEF_PENDING          RX_MAX_PENDING
  66
  67#define TX_RING_SIZE            512
  68#define TX_DEF_PENDING          (TX_RING_SIZE - 1)
  69#define TX_MIN_PENDING          64
  70#define MAX_SKB_TX_LE           (4 + (sizeof(dma_addr_t)/sizeof(u32))*MAX_SKB_FRAGS)
  71
  72#define STATUS_RING_SIZE        2048    /* 2 ports * (TX + 2*RX) */
  73#define STATUS_LE_BYTES         (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
  74#define TX_WATCHDOG             (5 * HZ)
  75#define NAPI_WEIGHT             64
  76#define PHY_RETRIES             1000
  77
  78#define SKY2_EEPROM_MAGIC       0x9955aabb
  79
  80
  81#define RING_NEXT(x,s)  (((x)+1) & ((s)-1))
  82
  83static const u32 default_msg =
  84    NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
  85    | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
  86    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
  87
  88static int debug = -1;          /* defaults above */
  89module_param(debug, int, 0);
  90MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  91
  92static int copybreak __read_mostly = 128;
  93module_param(copybreak, int, 0);
  94MODULE_PARM_DESC(copybreak, "Receive copy threshold");
  95
  96static int disable_msi = 0;
  97module_param(disable_msi, int, 0);
  98MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  99
 100static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
 101        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
 102        { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
 103        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },    /* DGE-560T */
 104        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) },    /* DGE-550SX */
 105        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) },    /* DGE-560SX */
 106        { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B03) },    /* DGE-550T */
 107        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, /* 88E8021 */
 108        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, /* 88E8022 */
 109        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, /* 88E8061 */
 110        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) }, /* 88E8062 */
 111        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) }, /* 88E8021 */
 112        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) }, /* 88E8022 */
 113        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) }, /* 88E8061 */
 114        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) }, /* 88E8062 */
 115        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, /* 88E8035 */
 116        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, /* 88E8036 */
 117        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, /* 88E8038 */
 118        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) }, /* 88E8039 */
 119        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4354) }, /* 88E8040 */
 120        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4355) }, /* 88E8040T */
 121        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4356) }, /* 88EC033 */
 122        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4357) }, /* 88E8042 */
 123        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x435A) }, /* 88E8048 */
 124        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, /* 88E8052 */
 125        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, /* 88E8050 */
 126        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, /* 88E8053 */
 127        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, /* 88E8055 */
 128        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) }, /* 88E8056 */
 129        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) }, /* 88E8070 */
 130        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) }, /* 88EC036 */
 131        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) }, /* 88EC032 */
 132        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
 133        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
 134        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
 135        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
 136        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
 137        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
 138        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
 139        { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
 140        { 0 }
 141};
 142
 143MODULE_DEVICE_TABLE(pci, sky2_id_table);
 144
 145/* Avoid conditionals by using array */
 146static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
 147static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
 148static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 };
 149
 150static void sky2_set_multicast(struct net_device *dev);
 151
 152/* Access to PHY via serial interconnect */
 153static int gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
 154{
 155        int i;
 156
 157        gma_write16(hw, port, GM_SMI_DATA, val);
 158        gma_write16(hw, port, GM_SMI_CTRL,
 159                    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
 160
 161        for (i = 0; i < PHY_RETRIES; i++) {
 162                u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 163                if (ctrl == 0xffff)
 164                        goto io_error;
 165
 166                if (!(ctrl & GM_SMI_CT_BUSY))
 167                        return 0;
 168
 169                udelay(10);
 170        }
 171
 172        dev_warn(&hw->pdev->dev,"%s: phy write timeout\n", hw->dev[port]->name);
 173        return -ETIMEDOUT;
 174
 175io_error:
 176        dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 177        return -EIO;
 178}
 179
 180static int __gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg, u16 *val)
 181{
 182        int i;
 183
 184        gma_write16(hw, port, GM_SMI_CTRL, GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
 185                    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
 186
 187        for (i = 0; i < PHY_RETRIES; i++) {
 188                u16 ctrl = gma_read16(hw, port, GM_SMI_CTRL);
 189                if (ctrl == 0xffff)
 190                        goto io_error;
 191
 192                if (ctrl & GM_SMI_CT_RD_VAL) {
 193                        *val = gma_read16(hw, port, GM_SMI_DATA);
 194                        return 0;
 195                }
 196
 197                udelay(10);
 198        }
 199
 200        dev_warn(&hw->pdev->dev, "%s: phy read timeout\n", hw->dev[port]->name);
 201        return -ETIMEDOUT;
 202io_error:
 203        dev_err(&hw->pdev->dev, "%s: phy I/O error\n", hw->dev[port]->name);
 204        return -EIO;
 205}
 206
 207static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 208{
 209        u16 v;
 210        __gm_phy_read(hw, port, reg, &v);
 211        return v;
 212}
 213
 214
 215static void sky2_power_on(struct sky2_hw *hw)
 216{
 217        /* switch power to VCC (WA for VAUX problem) */
 218        sky2_write8(hw, B0_POWER_CTRL,
 219                    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
 220
 221        /* disable Core Clock Division, */
 222        sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
 223
 224        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
 225                /* enable bits are inverted */
 226                sky2_write8(hw, B2_Y2_CLK_GATE,
 227                            Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 228                            Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 229                            Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 230        else
 231                sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 232
 233        if (hw->flags & SKY2_HW_ADV_POWER_CTL) {
 234                u32 reg;
 235
 236                sky2_pci_write32(hw, PCI_DEV_REG3, 0);
 237
 238                reg = sky2_pci_read32(hw, PCI_DEV_REG4);
 239                /* set all bits to 0 except bits 15..12 and 8 */
 240                reg &= P_ASPM_CONTROL_MSK;
 241                sky2_pci_write32(hw, PCI_DEV_REG4, reg);
 242
 243                reg = sky2_pci_read32(hw, PCI_DEV_REG5);
 244                /* set all bits to 0 except bits 28 & 27 */
 245                reg &= P_CTL_TIM_VMAIN_AV_MSK;
 246                sky2_pci_write32(hw, PCI_DEV_REG5, reg);
 247
 248                sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
 249
 250                /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
 251                reg = sky2_read32(hw, B2_GP_IO);
 252                reg |= GLB_GPIO_STAT_RACE_DIS;
 253                sky2_write32(hw, B2_GP_IO, reg);
 254
 255                sky2_read32(hw, B2_GP_IO);
 256        }
 257}
 258
 259static void sky2_power_aux(struct sky2_hw *hw)
 260{
 261        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
 262                sky2_write8(hw, B2_Y2_CLK_GATE, 0);
 263        else
 264                /* enable bits are inverted */
 265                sky2_write8(hw, B2_Y2_CLK_GATE,
 266                            Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
 267                            Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
 268                            Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
 269
 270        /* switch power to VAUX */
 271        if (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL)
 272                sky2_write8(hw, B0_POWER_CTRL,
 273                            (PC_VAUX_ENA | PC_VCC_ENA |
 274                             PC_VAUX_ON | PC_VCC_OFF));
 275}
 276
 277static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
 278{
 279        u16 reg;
 280
 281        /* disable all GMAC IRQ's */
 282        sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
 283
 284        gma_write16(hw, port, GM_MC_ADDR_H1, 0);        /* clear MC hash */
 285        gma_write16(hw, port, GM_MC_ADDR_H2, 0);
 286        gma_write16(hw, port, GM_MC_ADDR_H3, 0);
 287        gma_write16(hw, port, GM_MC_ADDR_H4, 0);
 288
 289        reg = gma_read16(hw, port, GM_RX_CTRL);
 290        reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
 291        gma_write16(hw, port, GM_RX_CTRL, reg);
 292}
 293
 294/* flow control to advertise bits */
 295static const u16 copper_fc_adv[] = {
 296        [FC_NONE]       = 0,
 297        [FC_TX]         = PHY_M_AN_ASP,
 298        [FC_RX]         = PHY_M_AN_PC,
 299        [FC_BOTH]       = PHY_M_AN_PC | PHY_M_AN_ASP,
 300};
 301
 302/* flow control to advertise bits when using 1000BaseX */
 303static const u16 fiber_fc_adv[] = {
 304        [FC_NONE] = PHY_M_P_NO_PAUSE_X,
 305        [FC_TX]   = PHY_M_P_ASYM_MD_X,
 306        [FC_RX]   = PHY_M_P_SYM_MD_X,
 307        [FC_BOTH] = PHY_M_P_BOTH_MD_X,
 308};
 309
 310/* flow control to GMA disable bits */
 311static const u16 gm_fc_disable[] = {
 312        [FC_NONE] = GM_GPCR_FC_RX_DIS | GM_GPCR_FC_TX_DIS,
 313        [FC_TX]   = GM_GPCR_FC_RX_DIS,
 314        [FC_RX]   = GM_GPCR_FC_TX_DIS,
 315        [FC_BOTH] = 0,
 316};
 317
 318
 319static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
 320{
 321        struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 322        u16 ctrl, ct1000, adv, pg, ledctrl, ledover, reg;
 323
 324        if (sky2->autoneg == AUTONEG_ENABLE &&
 325            !(hw->flags & SKY2_HW_NEWER_PHY)) {
 326                u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
 327
 328                ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
 329                           PHY_M_EC_MAC_S_MSK);
 330                ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
 331
 332                /* on PHY 88E1040 Rev.D0 (and newer) downshift control changed */
 333                if (hw->chip_id == CHIP_ID_YUKON_EC)
 334                        /* set downshift counter to 3x and enable downshift */
 335                        ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
 336                else
 337                        /* set master & slave downshift counter to 1x */
 338                        ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
 339
 340                gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
 341        }
 342
 343        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 344        if (sky2_is_copper(hw)) {
 345                if (!(hw->flags & SKY2_HW_GIGABIT)) {
 346                        /* enable automatic crossover */
 347                        ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
 348
 349                        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 350                            hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 351                                u16 spec;
 352
 353                                /* Enable Class A driver for FE+ A0 */
 354                                spec = gm_phy_read(hw, port, PHY_MARV_FE_SPEC_2);
 355                                spec |= PHY_M_FESC_SEL_CL_A;
 356                                gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
 357                        }
 358                } else {
 359                        /* disable energy detect */
 360                        ctrl &= ~PHY_M_PC_EN_DET_MSK;
 361
 362                        /* enable automatic crossover */
 363                        ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
 364
 365                        /* downshift on PHY 88E1112 and 88E1149 is changed */
 366                        if (sky2->autoneg == AUTONEG_ENABLE
 367                            && (hw->flags & SKY2_HW_NEWER_PHY)) {
 368                                /* set downshift counter to 3x and enable downshift */
 369                                ctrl &= ~PHY_M_PC_DSC_MSK;
 370                                ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
 371                        }
 372                }
 373        } else {
 374                /* workaround for deviation #4.88 (CRC errors) */
 375                /* disable Automatic Crossover */
 376
 377                ctrl &= ~PHY_M_PC_MDIX_MSK;
 378        }
 379
 380        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 381
 382        /* special setup for PHY 88E1112 Fiber */
 383        if (hw->chip_id == CHIP_ID_YUKON_XL && (hw->flags & SKY2_HW_FIBRE_PHY)) {
 384                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 385
 386                /* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
 387                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 388                ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 389                ctrl &= ~PHY_M_MAC_MD_MSK;
 390                ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
 391                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 392
 393                if (hw->pmd_type  == 'P') {
 394                        /* select page 1 to access Fiber registers */
 395                        gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
 396
 397                        /* for SFP-module set SIGDET polarity to low */
 398                        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 399                        ctrl |= PHY_M_FIB_SIGD_POL;
 400                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 401                }
 402
 403                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 404        }
 405
 406        ctrl = PHY_CT_RESET;
 407        ct1000 = 0;
 408        adv = PHY_AN_CSMA;
 409        reg = 0;
 410
 411        if (sky2->autoneg == AUTONEG_ENABLE) {
 412                if (sky2_is_copper(hw)) {
 413                        if (sky2->advertising & ADVERTISED_1000baseT_Full)
 414                                ct1000 |= PHY_M_1000C_AFD;
 415                        if (sky2->advertising & ADVERTISED_1000baseT_Half)
 416                                ct1000 |= PHY_M_1000C_AHD;
 417                        if (sky2->advertising & ADVERTISED_100baseT_Full)
 418                                adv |= PHY_M_AN_100_FD;
 419                        if (sky2->advertising & ADVERTISED_100baseT_Half)
 420                                adv |= PHY_M_AN_100_HD;
 421                        if (sky2->advertising & ADVERTISED_10baseT_Full)
 422                                adv |= PHY_M_AN_10_FD;
 423                        if (sky2->advertising & ADVERTISED_10baseT_Half)
 424                                adv |= PHY_M_AN_10_HD;
 425
 426                        adv |= copper_fc_adv[sky2->flow_mode];
 427                } else {        /* special defines for FIBER (88E1040S only) */
 428                        if (sky2->advertising & ADVERTISED_1000baseT_Full)
 429                                adv |= PHY_M_AN_1000X_AFD;
 430                        if (sky2->advertising & ADVERTISED_1000baseT_Half)
 431                                adv |= PHY_M_AN_1000X_AHD;
 432
 433                        adv |= fiber_fc_adv[sky2->flow_mode];
 434                }
 435
 436                /* Restart Auto-negotiation */
 437                ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
 438        } else {
 439                /* forced speed/duplex settings */
 440                ct1000 = PHY_M_1000C_MSE;
 441
 442                /* Disable auto update for duplex flow control and speed */
 443                reg |= GM_GPCR_AU_ALL_DIS;
 444
 445                switch (sky2->speed) {
 446                case SPEED_1000:
 447                        ctrl |= PHY_CT_SP1000;
 448                        reg |= GM_GPCR_SPEED_1000;
 449                        break;
 450                case SPEED_100:
 451                        ctrl |= PHY_CT_SP100;
 452                        reg |= GM_GPCR_SPEED_100;
 453                        break;
 454                }
 455
 456                if (sky2->duplex == DUPLEX_FULL) {
 457                        reg |= GM_GPCR_DUP_FULL;
 458                        ctrl |= PHY_CT_DUP_MD;
 459                } else if (sky2->speed < SPEED_1000)
 460                        sky2->flow_mode = FC_NONE;
 461
 462
 463                reg |= gm_fc_disable[sky2->flow_mode];
 464
 465                /* Forward pause packets to GMAC? */
 466                if (sky2->flow_mode & FC_RX)
 467                        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
 468                else
 469                        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
 470        }
 471
 472        gma_write16(hw, port, GM_GP_CTRL, reg);
 473
 474        if (hw->flags & SKY2_HW_GIGABIT)
 475                gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
 476
 477        gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
 478        gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
 479
 480        /* Setup Phy LED's */
 481        ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
 482        ledover = 0;
 483
 484        switch (hw->chip_id) {
 485        case CHIP_ID_YUKON_FE:
 486                /* on 88E3082 these bits are at 11..9 (shifted left) */
 487                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
 488
 489                ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
 490
 491                /* delete ACT LED control bits */
 492                ctrl &= ~PHY_M_FELP_LED1_MSK;
 493                /* change ACT LED control to blink mode */
 494                ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
 495                gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 496                break;
 497
 498        case CHIP_ID_YUKON_FE_P:
 499                /* Enable Link Partner Next Page */
 500                ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 501                ctrl |= PHY_M_PC_ENA_LIP_NP;
 502
 503                /* disable Energy Detect and enable scrambler */
 504                ctrl &= ~(PHY_M_PC_ENA_ENE_DT | PHY_M_PC_DIS_SCRAMB);
 505                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 506
 507                /* set LED2 -> ACT, LED1 -> LINK, LED0 -> SPEED */
 508                ctrl = PHY_M_FELP_LED2_CTRL(LED_PAR_CTRL_ACT_BL) |
 509                        PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_LINK) |
 510                        PHY_M_FELP_LED0_CTRL(LED_PAR_CTRL_SPEED);
 511
 512                gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
 513                break;
 514
 515        case CHIP_ID_YUKON_XL:
 516                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 517
 518                /* select page 3 to access LED control register */
 519                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 520
 521                /* set LED Function Control register */
 522                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 523                             (PHY_M_LEDC_LOS_CTRL(1) |  /* LINK/ACT */
 524                              PHY_M_LEDC_INIT_CTRL(7) | /* 10 Mbps */
 525                              PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
 526                              PHY_M_LEDC_STA0_CTRL(7)));        /* 1000 Mbps */
 527
 528                /* set Polarity Control register */
 529                gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
 530                             (PHY_M_POLC_LS1_P_MIX(4) |
 531                              PHY_M_POLC_IS0_P_MIX(4) |
 532                              PHY_M_POLC_LOS_CTRL(2) |
 533                              PHY_M_POLC_INIT_CTRL(2) |
 534                              PHY_M_POLC_STA1_CTRL(2) |
 535                              PHY_M_POLC_STA0_CTRL(2)));
 536
 537                /* restore page register */
 538                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 539                break;
 540
 541        case CHIP_ID_YUKON_EC_U:
 542        case CHIP_ID_YUKON_EX:
 543        case CHIP_ID_YUKON_SUPR:
 544                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
 545
 546                /* select page 3 to access LED control register */
 547                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
 548
 549                /* set LED Function Control register */
 550                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
 551                             (PHY_M_LEDC_LOS_CTRL(1) |  /* LINK/ACT */
 552                              PHY_M_LEDC_INIT_CTRL(8) | /* 10 Mbps */
 553                              PHY_M_LEDC_STA1_CTRL(7) | /* 100 Mbps */
 554                              PHY_M_LEDC_STA0_CTRL(7)));/* 1000 Mbps */
 555
 556                /* set Blink Rate in LED Timer Control Register */
 557                gm_phy_write(hw, port, PHY_MARV_INT_MASK,
 558                             ledctrl | PHY_M_LED_BLINK_RT(BLINK_84MS));
 559                /* restore page register */
 560                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
 561                break;
 562
 563        default:
 564                /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
 565                ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
 566
 567                /* turn off the Rx LED (LED_RX) */
 568                ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
 569        }
 570
 571        if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_UL_2) {
 572                /* apply fixes in PHY AFE */
 573                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255);
 574
 575                /* increase differential signal amplitude in 10BASE-T */
 576                gm_phy_write(hw, port, 0x18, 0xaa99);
 577                gm_phy_write(hw, port, 0x17, 0x2011);
 578
 579                if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 580                        /* fix for IEEE A/B Symmetry failure in 1000BASE-T */
 581                        gm_phy_write(hw, port, 0x18, 0xa204);
 582                        gm_phy_write(hw, port, 0x17, 0x2002);
 583                }
 584
 585                /* set page register to 0 */
 586                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 587        } else if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 588                   hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 589                /* apply workaround for integrated resistors calibration */
 590                gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
 591                gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
 592        } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
 593                   hw->chip_id < CHIP_ID_YUKON_SUPR) {
 594                /* no effect on Yukon-XL */
 595                gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
 596
 597                if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
 598                        /* turn on 100 Mbps LED (LED_LINK100) */
 599                        ledover |= PHY_M_LED_MO_100(MO_LED_ON);
 600                }
 601
 602                if (ledover)
 603                        gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
 604
 605        }
 606
 607        /* Enable phy interrupt on auto-negotiation complete (or link up) */
 608        if (sky2->autoneg == AUTONEG_ENABLE)
 609                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
 610        else
 611                gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
 612}
 613
 614static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
 615static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
 616
 617static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
 618{
 619        u32 reg1;
 620
 621        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 622        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 623        reg1 &= ~phy_power[port];
 624
 625        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
 626                reg1 |= coma_mode[port];
 627
 628        sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 629        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 630        sky2_pci_read32(hw, PCI_DEV_REG1);
 631
 632        if (hw->chip_id == CHIP_ID_YUKON_FE)
 633                gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE);
 634        else if (hw->flags & SKY2_HW_ADV_POWER_CTL)
 635                sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 636}
 637
 638static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port)
 639{
 640        u32 reg1;
 641        u16 ctrl;
 642
 643        /* release GPHY Control reset */
 644        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 645
 646        /* release GMAC reset */
 647        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 648
 649        if (hw->flags & SKY2_HW_NEWER_PHY) {
 650                /* select page 2 to access MAC control register */
 651                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 652
 653                ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 654                /* allow GMII Power Down */
 655                ctrl &= ~PHY_M_MAC_GMIF_PUP;
 656                gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 657
 658                /* set page register back to 0 */
 659                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 660        }
 661
 662        /* setup General Purpose Control Register */
 663        gma_write16(hw, port, GM_GP_CTRL,
 664                    GM_GPCR_FL_PASS | GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
 665
 666        if (hw->chip_id != CHIP_ID_YUKON_EC) {
 667                if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
 668                        /* select page 2 to access MAC control register */
 669                        gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
 670
 671                        ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
 672                        /* enable Power Down */
 673                        ctrl |= PHY_M_PC_POW_D_ENA;
 674                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
 675
 676                        /* set page register back to 0 */
 677                        gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
 678                }
 679
 680                /* set IEEE compatible Power Down Mode (dev. #4.99) */
 681                gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN);
 682        }
 683
 684        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
 685        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 686        reg1 |= phy_power[port];                /* set PHY to PowerDown/COMA Mode */
 687        sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 688        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
 689}
 690
 691/* Force a renegotiation */
 692static void sky2_phy_reinit(struct sky2_port *sky2)
 693{
 694        spin_lock_bh(&sky2->phy_lock);
 695        sky2_phy_init(sky2->hw, sky2->port);
 696        spin_unlock_bh(&sky2->phy_lock);
 697}
 698
 699/* Put device in state to listen for Wake On Lan */
 700static void sky2_wol_init(struct sky2_port *sky2)
 701{
 702        struct sky2_hw *hw = sky2->hw;
 703        unsigned port = sky2->port;
 704        enum flow_control save_mode;
 705        u16 ctrl;
 706        u32 reg1;
 707
 708        /* Bring hardware out of reset */
 709        sky2_write16(hw, B0_CTST, CS_RST_CLR);
 710        sky2_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
 711
 712        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 713        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 714
 715        /* Force to 10/100
 716         * sky2_reset will re-enable on resume
 717         */
 718        save_mode = sky2->flow_mode;
 719        ctrl = sky2->advertising;
 720
 721        sky2->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
 722        sky2->flow_mode = FC_NONE;
 723
 724        spin_lock_bh(&sky2->phy_lock);
 725        sky2_phy_power_up(hw, port);
 726        sky2_phy_init(hw, port);
 727        spin_unlock_bh(&sky2->phy_lock);
 728
 729        sky2->flow_mode = save_mode;
 730        sky2->advertising = ctrl;
 731
 732        /* Set GMAC to no flow control and auto update for speed/duplex */
 733        gma_write16(hw, port, GM_GP_CTRL,
 734                    GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA|
 735                    GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS);
 736
 737        /* Set WOL address */
 738        memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR),
 739                    sky2->netdev->dev_addr, ETH_ALEN);
 740
 741        /* Turn on appropriate WOL control bits */
 742        sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT);
 743        ctrl = 0;
 744        if (sky2->wol & WAKE_PHY)
 745                ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT;
 746        else
 747                ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT;
 748
 749        if (sky2->wol & WAKE_MAGIC)
 750                ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT;
 751        else
 752                ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT;;
 753
 754        ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
 755        sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
 756
 757        /* Turn on legacy PCI-Express PME mode */
 758        reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
 759        reg1 |= PCI_Y2_PME_LEGACY;
 760        sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
 761
 762        /* block receiver */
 763        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
 764
 765}
 766
 767static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
 768{
 769        struct net_device *dev = hw->dev[port];
 770
 771        if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
 772              hw->chip_rev != CHIP_REV_YU_EX_A0) ||
 773             hw->chip_id == CHIP_ID_YUKON_FE_P ||
 774             hw->chip_id == CHIP_ID_YUKON_SUPR) {
 775                /* Yukon-Extreme B0 and further Extreme devices */
 776                /* enable Store & Forward mode for TX */
 777
 778                if (dev->mtu <= ETH_DATA_LEN)
 779                        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
 780                                     TX_JUMBO_DIS | TX_STFW_ENA);
 781
 782                else
 783                        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
 784                                     TX_JUMBO_ENA| TX_STFW_ENA);
 785        } else {
 786                if (dev->mtu <= ETH_DATA_LEN)
 787                        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
 788                else {
 789                        /* set Tx GMAC FIFO Almost Empty Threshold */
 790                        sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
 791                                     (ECU_JUMBO_WM << 16) | ECU_AE_THR);
 792
 793                        sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
 794
 795                        /* Can't do offload because of lack of store/forward */
 796                        dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
 797                }
 798        }
 799}
 800
 801static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
 802{
 803        struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
 804        u16 reg;
 805        u32 rx_reg;
 806        int i;
 807        const u8 *addr = hw->dev[port]->dev_addr;
 808
 809        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
 810        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
 811
 812        sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
 813
 814        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) {
 815                /* WA DEV_472 -- looks like crossed wires on port 2 */
 816                /* clear GMAC 1 Control reset */
 817                sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
 818                do {
 819                        sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
 820                        sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
 821                } while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
 822                         gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
 823                         gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
 824        }
 825
 826        sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC));
 827
 828        /* Enable Transmit FIFO Underrun */
 829        sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK);
 830
 831        spin_lock_bh(&sky2->phy_lock);
 832        sky2_phy_power_up(hw, port);
 833        sky2_phy_init(hw, port);
 834        spin_unlock_bh(&sky2->phy_lock);
 835
 836        /* MIB clear */
 837        reg = gma_read16(hw, port, GM_PHY_ADDR);
 838        gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
 839
 840        for (i = GM_MIB_CNT_BASE; i <= GM_MIB_CNT_END; i += 4)
 841                gma_read16(hw, port, i);
 842        gma_write16(hw, port, GM_PHY_ADDR, reg);
 843
 844        /* transmit control */
 845        gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
 846
 847        /* receive control reg: unicast + multicast + no FCS  */
 848        gma_write16(hw, port, GM_RX_CTRL,
 849                    GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
 850
 851        /* transmit flow control */
 852        gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
 853
 854        /* transmit parameter */
 855        gma_write16(hw, port, GM_TX_PARAM,
 856                    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
 857                    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
 858                    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
 859                    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
 860
 861        /* serial mode register */
 862        reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
 863                GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
 864
 865        if (hw->dev[port]->mtu > ETH_DATA_LEN)
 866                reg |= GM_SMOD_JUMBO_ENA;
 867
 868        gma_write16(hw, port, GM_SERIAL_MODE, reg);
 869
 870        /* virtual address for data */
 871        gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
 872
 873        /* physical address: used for pause frames */
 874        gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
 875
 876        /* ignore counter overflows */
 877        gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
 878        gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
 879        gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
 880
 881        /* Configure Rx MAC FIFO */
 882        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 883        rx_reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
 884        if (hw->chip_id == CHIP_ID_YUKON_EX ||
 885            hw->chip_id == CHIP_ID_YUKON_FE_P)
 886                rx_reg |= GMF_RX_OVER_ON;
 887
 888        sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), rx_reg);
 889
 890        if (hw->chip_id == CHIP_ID_YUKON_XL) {
 891                /* Hardware errata - clear flush mask */
 892                sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), 0);
 893        } else {
 894                /* Flush Rx MAC FIFO on any flow control or error */
 895                sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
 896        }
 897
 898        /* Set threshold to 0xa (64 bytes) + 1 to workaround pause bug  */
 899        reg = RX_GMF_FL_THR_DEF + 1;
 900        /* Another magic mystery workaround from sk98lin */
 901        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 902            hw->chip_rev == CHIP_REV_YU_FE2_A0)
 903                reg = 0x178;
 904        sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), reg);
 905
 906        /* Configure Tx MAC FIFO */
 907        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
 908        sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 909
 910        /* On chips without ram buffer, pause is controled by MAC level */
 911        if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
 912                sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
 913                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 914
 915                sky2_set_tx_stfwd(hw, port);
 916        }
 917
 918        if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
 919            hw->chip_rev == CHIP_REV_YU_FE2_A0) {
 920                /* disable dynamic watermark */
 921                reg = sky2_read16(hw, SK_REG(port, TX_GMF_EA));
 922                reg &= ~TX_DYN_WM_ENA;
 923                sky2_write16(hw, SK_REG(port, TX_GMF_EA), reg);
 924        }
 925}
 926
 927/* Assign Ram Buffer allocation to queue */
 928static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
 929{
 930        u32 end;
 931
 932        /* convert from K bytes to qwords used for hw register */
 933        start *= 1024/8;
 934        space *= 1024/8;
 935        end = start + space - 1;
 936
 937        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
 938        sky2_write32(hw, RB_ADDR(q, RB_START), start);
 939        sky2_write32(hw, RB_ADDR(q, RB_END), end);
 940        sky2_write32(hw, RB_ADDR(q, RB_WP), start);
 941        sky2_write32(hw, RB_ADDR(q, RB_RP), start);
 942
 943        if (q == Q_R1 || q == Q_R2) {
 944                u32 tp = space - space/4;
 945
 946                /* On receive queue's set the thresholds
 947                 * give receiver priority when > 3/4 full
 948                 * send pause when down to 2K
 949                 */
 950                sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
 951                sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
 952
 953                tp = space - 2048/8;
 954                sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
 955                sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
 956        } else {
 957                /* Enable store & forward on Tx queue's because
 958                 * Tx FIFO is only 1K on Yukon
 959                 */
 960                sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
 961        }
 962
 963        sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
 964        sky2_read8(hw, RB_ADDR(q, RB_CTRL));
 965}
 966
 967/* Setup Bus Memory Interface */
 968static void sky2_qset(struct sky2_hw *hw, u16 q)
 969{
 970        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
 971        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
 972        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
 973        sky2_write32(hw, Q_ADDR(q, Q_WM),  BMU_WM_DEFAULT);
 974}
 975
 976/* Setup prefetch unit registers. This is the interface between
 977 * hardware and driver list elements
 978 */
 979static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
 980                                      u64 addr, u32 last)
 981{
 982        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
 983        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
 984        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
 985        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
 986        sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
 987        sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
 988
 989        sky2_read32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL));
 990}
 991
 992static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
 993{
 994        struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
 995
 996        sky2->tx_prod = RING_NEXT(sky2->tx_prod, TX_RING_SIZE);
 997        le->ctrl = 0;
 998        return le;
 999}
1000
1001static void tx_init(struct sky2_port *sky2)
1002{
1003        struct sky2_tx_le *le;
1004
1005        sky2->tx_prod = sky2->tx_cons = 0;
1006        sky2->tx_tcpsum = 0;
1007        sky2->tx_last_mss = 0;
1008
1009        le = get_tx_le(sky2);
1010        le->addr = 0;
1011        le->opcode = OP_ADDR64 | HW_OWNER;
1012}
1013
1014static inline struct tx_ring_info *tx_le_re(struct sky2_port *sky2,
1015                                            struct sky2_tx_le *le)
1016{
1017        return sky2->tx_ring + (le - sky2->tx_le);
1018}
1019
1020/* Update chip's next pointer */
1021static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
1022{
1023        /* Make sure write' to descriptors are complete before we tell hardware */
1024        wmb();
1025        sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
1026
1027        /* Synchronize I/O on since next processor may write to tail */
1028        mmiowb();
1029}
1030
1031
1032static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
1033{
1034        struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
1035        sky2->rx_put = RING_NEXT(sky2->rx_put, RX_LE_SIZE);
1036        le->ctrl = 0;
1037        return le;
1038}
1039
1040/* Build description to hardware for one receive segment */
1041static void sky2_rx_add(struct sky2_port *sky2,  u8 op,
1042                        dma_addr_t map, unsigned len)
1043{
1044        struct sky2_rx_le *le;
1045
1046        if (sizeof(dma_addr_t) > sizeof(u32)) {
1047                le = sky2_next_rx(sky2);
1048                le->addr = cpu_to_le32(upper_32_bits(map));
1049                le->opcode = OP_ADDR64 | HW_OWNER;
1050        }
1051
1052        le = sky2_next_rx(sky2);
1053        le->addr = cpu_to_le32((u32) map);
1054        le->length = cpu_to_le16(len);
1055        le->opcode = op | HW_OWNER;
1056}
1057
1058/* Build description to hardware for one possibly fragmented skb */
1059static void sky2_rx_submit(struct sky2_port *sky2,
1060                           const struct rx_ring_info *re)
1061{
1062        int i;
1063
1064        sky2_rx_add(sky2, OP_PACKET, re->data_addr, sky2->rx_data_size);
1065
1066        for (i = 0; i < skb_shinfo(re->skb)->nr_frags; i++)
1067                sky2_rx_add(sky2, OP_BUFFER, re->frag_addr[i], PAGE_SIZE);
1068}
1069
1070
1071static void sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1072                            unsigned size)
1073{
1074        struct sk_buff *skb = re->skb;
1075        int i;
1076
1077        re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1078        pci_unmap_len_set(re, data_size, size);
1079
1080        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1081                re->frag_addr[i] = pci_map_page(pdev,
1082                                                skb_shinfo(skb)->frags[i].page,
1083                                                skb_shinfo(skb)->frags[i].page_offset,
1084                                                skb_shinfo(skb)->frags[i].size,
1085                                                PCI_DMA_FROMDEVICE);
1086}
1087
1088static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1089{
1090        struct sk_buff *skb = re->skb;
1091        int i;
1092
1093        pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
1094                         PCI_DMA_FROMDEVICE);
1095
1096        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1097                pci_unmap_page(pdev, re->frag_addr[i],
1098                               skb_shinfo(skb)->frags[i].size,
1099                               PCI_DMA_FROMDEVICE);
1100}
1101
1102/* Tell chip where to start receive checksum.
1103 * Actually has two checksums, but set both same to avoid possible byte
1104 * order problems.
1105 */
1106static void rx_set_checksum(struct sky2_port *sky2)
1107{
1108        struct sky2_rx_le *le = sky2_next_rx(sky2);
1109
1110        le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
1111        le->ctrl = 0;
1112        le->opcode = OP_TCPSTART | HW_OWNER;
1113
1114        sky2_write32(sky2->hw,
1115                     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1116                     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1117}
1118
1119/*
1120 * The RX Stop command will not work for Yukon-2 if the BMU does not
1121 * reach the end of packet and since we can't make sure that we have
1122 * incoming data, we must reset the BMU while it is not doing a DMA
1123 * transfer. Since it is possible that the RX path is still active,
1124 * the RX RAM buffer will be stopped first, so any possible incoming
1125 * data will not trigger a DMA. After the RAM buffer is stopped, the
1126 * BMU is polled until any DMA in progress is ended and only then it
1127 * will be reset.
1128 */
1129static void sky2_rx_stop(struct sky2_port *sky2)
1130{
1131        struct sky2_hw *hw = sky2->hw;
1132        unsigned rxq = rxqaddr[sky2->port];
1133        int i;
1134
1135        /* disable the RAM Buffer receive queue */
1136        sky2_write8(hw, RB_ADDR(rxq, RB_CTRL), RB_DIS_OP_MD);
1137
1138        for (i = 0; i < 0xffff; i++)
1139                if (sky2_read8(hw, RB_ADDR(rxq, Q_RSL))
1140                    == sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
1141                        goto stopped;
1142
1143        printk(KERN_WARNING PFX "%s: receiver stop failed\n",
1144               sky2->netdev->name);
1145stopped:
1146        sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
1147
1148        /* reset the Rx prefetch unit */
1149        sky2_write32(hw, Y2_QADDR(rxq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
1150        mmiowb();
1151}
1152
1153/* Clean out receive buffer area, assumes receiver hardware stopped */
1154static void sky2_rx_clean(struct sky2_port *sky2)
1155{
1156        unsigned i;
1157
1158        memset(sky2->rx_le, 0, RX_LE_BYTES);
1159        for (i = 0; i < sky2->rx_pending; i++) {
1160                struct rx_ring_info *re = sky2->rx_ring + i;
1161
1162                if (re->skb) {
1163                        sky2_rx_unmap_skb(sky2->hw->pdev, re);
1164                        kfree_skb(re->skb);
1165                        re->skb = NULL;
1166                }
1167        }
1168}
1169
1170/* Basic MII support */
1171static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1172{
1173        struct mii_ioctl_data *data = if_mii(ifr);
1174        struct sky2_port *sky2 = netdev_priv(dev);
1175        struct sky2_hw *hw = sky2->hw;
1176        int err = -EOPNOTSUPP;
1177
1178        if (!netif_running(dev))
1179                return -ENODEV; /* Phy still in reset */
1180
1181        switch (cmd) {
1182        case SIOCGMIIPHY:
1183                data->phy_id = PHY_ADDR_MARV;
1184
1185                /* fallthru */
1186        case SIOCGMIIREG: {
1187                u16 val = 0;
1188
1189                spin_lock_bh(&sky2->phy_lock);
1190                err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val);
1191                spin_unlock_bh(&sky2->phy_lock);
1192
1193                data->val_out = val;
1194                break;
1195        }
1196
1197        case SIOCSMIIREG:
1198                if (!capable(CAP_NET_ADMIN))
1199                        return -EPERM;
1200
1201                spin_lock_bh(&sky2->phy_lock);
1202                err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f,
1203                                   data->val_in);
1204                spin_unlock_bh(&sky2->phy_lock);
1205                break;
1206        }
1207        return err;
1208}
1209
1210#ifdef SKY2_VLAN_TAG_USED
1211static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
1212{
1213        if (onoff) {
1214                sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1215                             RX_VLAN_STRIP_ON);
1216                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1217                             TX_VLAN_TAG_ON);
1218        } else {
1219                sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1220                             RX_VLAN_STRIP_OFF);
1221                sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1222                             TX_VLAN_TAG_OFF);
1223        }
1224}
1225
1226static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1227{
1228        struct sky2_port *sky2 = netdev_priv(dev);
1229        struct sky2_hw *hw = sky2->hw;
1230        u16 port = sky2->port;
1231
1232        netif_tx_lock_bh(dev);
1233        napi_disable(&hw->napi);
1234
1235        sky2->vlgrp = grp;
1236        sky2_set_vlan_mode(hw, port, grp != NULL);
1237
1238        sky2_read32(hw, B0_Y2_SP_LISR);
1239        napi_enable(&hw->napi);
1240        netif_tx_unlock_bh(dev);
1241}
1242#endif
1243
1244/*
1245 * Allocate an skb for receiving. If the MTU is large enough
1246 * make the skb non-linear with a fragment list of pages.
1247 */
1248static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
1249{
1250        struct sk_buff *skb;
1251        int i;
1252
1253        if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
1254                unsigned char *start;
1255                /*
1256                 * Workaround for a bug in FIFO that cause hang
1257                 * if the FIFO if the receive buffer is not 64 byte aligned.
1258                 * The buffer returned from netdev_alloc_skb is
1259                 * aligned except if slab debugging is enabled.
1260                 */
1261                skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8);
1262                if (!skb)
1263                        goto nomem;
1264                start = PTR_ALIGN(skb->data, 8);
1265                skb_reserve(skb, start - skb->data);
1266        } else {
1267                skb = netdev_alloc_skb(sky2->netdev,
1268                                       sky2->rx_data_size + NET_IP_ALIGN);
1269                if (!skb)
1270                        goto nomem;
1271                skb_reserve(skb, NET_IP_ALIGN);
1272        }
1273
1274        for (i = 0; i < sky2->rx_nfrags; i++) {
1275                struct page *page = alloc_page(GFP_ATOMIC);
1276
1277                if (!page)
1278                        goto free_partial;
1279                skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
1280        }
1281
1282        return skb;
1283free_partial:
1284        kfree_skb(skb);
1285nomem:
1286        return NULL;
1287}
1288
1289static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
1290{
1291        sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
1292}
1293
1294/*
1295 * Allocate and setup receiver buffer pool.
1296 * Normal case this ends up creating one list element for skb
1297 * in the receive ring. Worst case if using large MTU and each
1298 * allocation falls on a different 64 bit region, that results
1299 * in 6 list elements per ring entry.
1300 * One element is used for checksum enable/disable, and one
1301 * extra to avoid wrap.
1302 */
1303static int sky2_rx_start(struct sky2_port *sky2)
1304{
1305        struct sky2_hw *hw = sky2->hw;
1306        struct rx_ring_info *re;
1307        unsigned rxq = rxqaddr[sky2->port];
1308        unsigned i, size, thresh;
1309
1310        sky2->rx_put = sky2->rx_next = 0;
1311        sky2_qset(hw, rxq);
1312
1313        /* On PCI express lowering the watermark gives better performance */
1314        if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
1315                sky2_write32(hw, Q_ADDR(rxq, Q_WM), BMU_WM_PEX);
1316
1317        /* These chips have no ram buffer?
1318         * MAC Rx RAM Read is controlled by hardware */
1319        if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1320            (hw->chip_rev == CHIP_REV_YU_EC_U_A1
1321             || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1322                sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1323
1324        sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1325
1326        if (!(hw->flags & SKY2_HW_NEW_LE))
1327                rx_set_checksum(sky2);
1328
1329        /* Space needed for frame data + headers rounded up */
1330        size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
1331
1332        /* Stopping point for hardware truncation */
1333        thresh = (size - 8) / sizeof(u32);
1334
1335        sky2->rx_nfrags = size >> PAGE_SHIFT;
1336        BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
1337
1338        /* Compute residue after pages */
1339        size -= sky2->rx_nfrags << PAGE_SHIFT;
1340
1341        /* Optimize to handle small packets and headers */
1342        if (size < copybreak)
1343                size = copybreak;
1344        if (size < ETH_HLEN)
1345                size = ETH_HLEN;
1346
1347        sky2->rx_data_size = size;
1348
1349        /* Fill Rx ring */
1350        for (i = 0; i < sky2->rx_pending; i++) {
1351                re = sky2->rx_ring + i;
1352
1353                re->skb = sky2_rx_alloc(sky2);
1354                if (!re->skb)
1355                        goto nomem;
1356
1357                sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size);
1358                sky2_rx_submit(sky2, re);
1359        }
1360
1361        /*
1362         * The receiver hangs if it receives frames larger than the
1363         * packet buffer. As a workaround, truncate oversize frames, but
1364         * the register is limited to 9 bits, so if you do frames > 2052
1365         * you better get the MTU right!
1366         */
1367        if (thresh > 0x1ff)
1368                sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
1369        else {
1370                sky2_write16(hw, SK_REG(sky2->port, RX_GMF_TR_THR), thresh);
1371                sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_ON);
1372        }
1373
1374        /* Tell chip about available buffers */
1375        sky2_rx_update(sky2, rxq);
1376        return 0;
1377nomem:
1378        sky2_rx_clean(sky2);
1379        return -ENOMEM;
1380}
1381
1382/* Bring up network interface. */
1383static int sky2_up(struct net_device *dev)
1384{
1385        struct sky2_port *sky2 = netdev_priv(dev);
1386        struct sky2_hw *hw = sky2->hw;
1387        unsigned port = sky2->port;
1388        u32 imask, ramsize;
1389        int cap, err = -ENOMEM;
1390        struct net_device *otherdev = hw->dev[sky2->port^1];
1391
1392        /*
1393         * On dual port PCI-X card, there is an problem where status
1394         * can be received out of order due to split transactions
1395         */
1396        if (otherdev && netif_running(otherdev) &&
1397            (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1398                u16 cmd;
1399
1400                cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1401                cmd &= ~PCI_X_CMD_MAX_SPLIT;
1402                sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1403
1404        }
1405
1406        if (netif_msg_ifup(sky2))
1407                printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
1408
1409        netif_carrier_off(dev);
1410
1411        /* must be power of 2 */
1412        sky2->tx_le = pci_alloc_consistent(hw->pdev,
1413                                           TX_RING_SIZE *
1414                                           sizeof(struct sky2_tx_le),
1415                                           &sky2->tx_le_map);
1416        if (!sky2->tx_le)
1417                goto err_out;
1418
1419        sky2->tx_ring = kcalloc(TX_RING_SIZE, sizeof(struct tx_ring_info),
1420                                GFP_KERNEL);
1421        if (!sky2->tx_ring)
1422                goto err_out;
1423
1424        tx_init(sky2);
1425
1426        sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
1427                                           &sky2->rx_le_map);
1428        if (!sky2->rx_le)
1429                goto err_out;
1430        memset(sky2->rx_le, 0, RX_LE_BYTES);
1431
1432        sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
1433                                GFP_KERNEL);
1434        if (!sky2->rx_ring)
1435                goto err_out;
1436
1437        sky2_mac_init(hw, port);
1438
1439        /* Register is number of 4K blocks on internal RAM buffer. */
1440        ramsize = sky2_read8(hw, B2_E_0) * 4;
1441        if (ramsize > 0) {
1442                u32 rxspace;
1443
1444                hw->flags |= SKY2_HW_RAM_BUFFER;
1445                pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
1446                if (ramsize < 16)
1447                        rxspace = ramsize / 2;
1448                else
1449                        rxspace = 8 + (2*(ramsize - 16))/3;
1450
1451                sky2_ramset(hw, rxqaddr[port], 0, rxspace);
1452                sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
1453
1454                /* Make sure SyncQ is disabled */
1455                sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
1456                            RB_RST_SET);
1457        }
1458
1459        sky2_qset(hw, txqaddr[port]);
1460
1461        /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1462        if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1463                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1464
1465        /* Set almost empty threshold */
1466        if (hw->chip_id == CHIP_ID_YUKON_EC_U
1467            && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
1468                sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), ECU_TXFF_LEV);
1469
1470        sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1471                           TX_RING_SIZE - 1);
1472
1473#ifdef SKY2_VLAN_TAG_USED
1474        sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1475#endif
1476
1477        err = sky2_rx_start(sky2);
1478        if (err)
1479                goto err_out;
1480
1481        /* Enable interrupts from phy/mac for port */
1482        imask = sky2_read32(hw, B0_IMSK);
1483        imask |= portirq_msk[port];
1484        sky2_write32(hw, B0_IMSK, imask);
1485
1486        sky2_set_multicast(dev);
1487        return 0;
1488
1489err_out:
1490        if (sky2->rx_le) {
1491                pci_free_consistent(hw->pdev, RX_LE_BYTES,
1492                                    sky2->rx_le, sky2->rx_le_map);
1493                sky2->rx_le = NULL;
1494        }
1495        if (sky2->tx_le) {
1496                pci_free_consistent(hw->pdev,
1497                                    TX_RING_SIZE * sizeof(struct sky2_tx_le),
1498                                    sky2->tx_le, sky2->tx_le_map);
1499                sky2->tx_le = NULL;
1500        }
1501        kfree(sky2->tx_ring);
1502        kfree(sky2->rx_ring);
1503
1504        sky2->tx_ring = NULL;
1505        sky2->rx_ring = NULL;
1506        return err;
1507}
1508
1509/* Modular subtraction in ring */
1510static inline int tx_dist(unsigned tail, unsigned head)
1511{
1512        return (head - tail) & (TX_RING_SIZE - 1);
1513}
1514
1515/* Number of list elements available for next tx */
1516static inline int tx_avail(const struct sky2_port *sky2)
1517{
1518        return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
1519}
1520
1521/* Estimate of number of transmit list elements required */
1522static unsigned tx_le_req(const struct sk_buff *skb)
1523{
1524        unsigned count;
1525
1526        count = sizeof(dma_addr_t) / sizeof(u32);
1527        count += skb_shinfo(skb)->nr_frags * count;
1528
1529        if (skb_is_gso(skb))
1530                ++count;
1531
1532        if (skb->ip_summed == CHECKSUM_PARTIAL)
1533                ++count;
1534
1535        return count;
1536}
1537
1538/*
1539 * Put one packet in ring for transmit.
1540 * A single packet can generate multiple list elements, and
1541 * the number of ring elements will probably be less than the number
1542 * of list elements used.
1543 */
1544static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1545{
1546        struct sky2_port *sky2 = netdev_priv(dev);
1547        struct sky2_hw *hw = sky2->hw;
1548        struct sky2_tx_le *le = NULL;
1549        struct tx_ring_info *re;
1550        unsigned i, len;
1551        dma_addr_t mapping;
1552        u16 mss;
1553        u8 ctrl;
1554
1555        if (unlikely(tx_avail(sky2) < tx_le_req(skb)))
1556                return NETDEV_TX_BUSY;
1557
1558        if (unlikely(netif_msg_tx_queued(sky2)))
1559                printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
1560                       dev->name, sky2->tx_prod, skb->len);
1561
1562        len = skb_headlen(skb);
1563        mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
1564
1565        /* Send high bits if needed */
1566        if (sizeof(dma_addr_t) > sizeof(u32)) {
1567                le = get_tx_le(sky2);
1568                le->addr = cpu_to_le32(upper_32_bits(mapping));
1569                le->opcode = OP_ADDR64 | HW_OWNER;
1570        }
1571
1572        /* Check for TCP Segmentation Offload */
1573        mss = skb_shinfo(skb)->gso_size;
1574        if (mss != 0) {
1575
1576                if (!(hw->flags & SKY2_HW_NEW_LE))
1577                        mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1578
1579                if (mss != sky2->tx_last_mss) {
1580                        le = get_tx_le(sky2);
1581                        le->addr = cpu_to_le32(mss);
1582
1583                        if (hw->flags & SKY2_HW_NEW_LE)
1584                                le->opcode = OP_MSS | HW_OWNER;
1585                        else
1586                                le->opcode = OP_LRGLEN | HW_OWNER;
1587                        sky2->tx_last_mss = mss;
1588                }
1589        }
1590
1591        ctrl = 0;
1592#ifdef SKY2_VLAN_TAG_USED
1593        /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1594        if (sky2->vlgrp && vlan_tx_tag_present(skb)) {
1595                if (!le) {
1596                        le = get_tx_le(sky2);
1597                        le->addr = 0;
1598                        le->opcode = OP_VLAN|HW_OWNER;
1599                } else
1600                        le->opcode |= OP_VLAN;
1601                le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1602                ctrl |= INS_VLAN;
1603        }
1604#endif
1605
1606        /* Handle TCP checksum offload */
1607        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1608                /* On Yukon EX (some versions) encoding change. */
1609                if (hw->flags & SKY2_HW_AUTO_TX_SUM)
1610                        ctrl |= CALSUM; /* auto checksum */
1611                else {
1612                        const unsigned offset = skb_transport_offset(skb);
1613                        u32 tcpsum;
1614
1615                        tcpsum = offset << 16;                  /* sum start */
1616                        tcpsum |= offset + skb->csum_offset;    /* sum write */
1617
1618                        ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1619                        if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1620                                ctrl |= UDPTCP;
1621
1622                        if (tcpsum != sky2->tx_tcpsum) {
1623                                sky2->tx_tcpsum = tcpsum;
1624
1625                                le = get_tx_le(sky2);
1626                                le->addr = cpu_to_le32(tcpsum);
1627                                le->length = 0; /* initial checksum value */
1628                                le->ctrl = 1;   /* one packet */
1629                                le->opcode = OP_TCPLISW | HW_OWNER;
1630                        }
1631                }
1632        }
1633
1634        le = get_tx_le(sky2);
1635        le->addr = cpu_to_le32((u32) mapping);
1636        le->length = cpu_to_le16(len);
1637        le->ctrl = ctrl;
1638        le->opcode = mss ? (OP_LARGESEND | HW_OWNER) : (OP_PACKET | HW_OWNER);
1639
1640        re = tx_le_re(sky2, le);
1641        re->skb = skb;
1642        pci_unmap_addr_set(re, mapaddr, mapping);
1643        pci_unmap_len_set(re, maplen, len);
1644
1645        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1646                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1647
1648                mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
1649                                       frag->size, PCI_DMA_TODEVICE);
1650
1651                if (sizeof(dma_addr_t) > sizeof(u32)) {
1652                        le = get_tx_le(sky2);
1653                        le->addr = cpu_to_le32(upper_32_bits(mapping));
1654                        le->ctrl = 0;
1655                        le->opcode = OP_ADDR64 | HW_OWNER;
1656                }
1657
1658                le = get_tx_le(sky2);
1659                le->addr = cpu_to_le32((u32) mapping);
1660                le->length = cpu_to_le16(frag->size);
1661                le->ctrl = ctrl;
1662                le->opcode = OP_BUFFER | HW_OWNER;
1663
1664                re = tx_le_re(sky2, le);
1665                re->skb = skb;
1666                pci_unmap_addr_set(re, mapaddr, mapping);
1667                pci_unmap_len_set(re, maplen, frag->size);
1668        }
1669
1670        le->ctrl |= EOP;
1671
1672        if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1673                netif_stop_queue(dev);
1674
1675        sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1676
1677        dev->trans_start = jiffies;
1678        return NETDEV_TX_OK;
1679}
1680
1681/*
1682 * Free ring elements from starting at tx_cons until "done"
1683 *
1684 * NB: the hardware will tell us about partial completion of multi-part
1685 *     buffers so make sure not to free skb to early.
1686 */
1687static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1688{
1689        struct net_device *dev = sky2->netdev;
1690        struct pci_dev *pdev = sky2->hw->pdev;
1691        unsigned idx;
1692
1693        BUG_ON(done >= TX_RING_SIZE);
1694
1695        for (idx = sky2->tx_cons; idx != done;
1696             idx = RING_NEXT(idx, TX_RING_SIZE)) {
1697                struct sky2_tx_le *le = sky2->tx_le + idx;
1698                struct tx_ring_info *re = sky2->tx_ring + idx;
1699
1700                switch(le->opcode & ~HW_OWNER) {
1701                case OP_LARGESEND:
1702                case OP_PACKET:
1703                        pci_unmap_single(pdev,
1704                                         pci_unmap_addr(re, mapaddr),
1705                                         pci_unmap_len(re, maplen),
1706                                         PCI_DMA_TODEVICE);
1707                        break;
1708                case OP_BUFFER:
1709                        pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
1710                                       pci_unmap_len(re, maplen),
1711                                       PCI_DMA_TODEVICE);
1712                        break;
1713                }
1714
1715                if (le->ctrl & EOP) {
1716                        if (unlikely(netif_msg_tx_done(sky2)))
1717                                printk(KERN_DEBUG "%s: tx done %u\n",
1718                                       dev->name, idx);
1719
1720                        dev->stats.tx_packets++;
1721                        dev->stats.tx_bytes += re->skb->len;
1722
1723                        dev_kfree_skb_any(re->skb);
1724                        sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE);
1725                }
1726        }
1727
1728        sky2->tx_cons = idx;
1729        smp_mb();
1730
1731        if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
1732                netif_wake_queue(dev);
1733}
1734
1735/* Cleanup all untransmitted buffers, assume transmitter not running */
1736static void sky2_tx_clean(struct net_device *dev)
1737{
1738        struct sky2_port *sky2 = netdev_priv(dev);
1739
1740        netif_tx_lock_bh(dev);
1741        sky2_tx_complete(sky2, sky2->tx_prod);
1742        netif_tx_unlock_bh(dev);
1743}
1744
1745/* Network shutdown */
1746static int sky2_down(struct net_device *dev)
1747{
1748        struct sky2_port *sky2 = netdev_priv(dev);
1749        struct sky2_hw *hw = sky2->hw;
1750        unsigned port = sky2->port;
1751        u16 ctrl;
1752        u32 imask;
1753
1754        /* Never really got started! */
1755        if (!sky2->tx_le)
1756                return 0;
1757
1758        if (netif_msg_ifdown(sky2))
1759                printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
1760
1761        /* Disable port IRQ */
1762        imask = sky2_read32(hw, B0_IMSK);
1763        imask &= ~portirq_msk[port];
1764        sky2_write32(hw, B0_IMSK, imask);
1765
1766        synchronize_irq(hw->pdev->irq);
1767
1768        sky2_gmac_reset(hw, port);
1769
1770        /* Stop transmitter */
1771        sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
1772        sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
1773
1774        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
1775                     RB_RST_SET | RB_DIS_OP_MD);
1776
1777        ctrl = gma_read16(hw, port, GM_GP_CTRL);
1778        ctrl &= ~(GM_GPCR_TX_ENA | GM_GPCR_RX_ENA);
1779        gma_write16(hw, port, GM_GP_CTRL, ctrl);
1780
1781        /* Make sure no packets are pending */
1782        napi_synchronize(&hw->napi);
1783
1784        sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1785
1786        /* Workaround shared GMAC reset */
1787        if (!(hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
1788              && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
1789                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1790
1791        /* Disable Force Sync bit and Enable Alloc bit */
1792        sky2_write8(hw, SK_REG(port, TXA_CTRL),
1793                    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1794
1795        /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1796        sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
1797        sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
1798
1799        /* Reset the PCI FIFO of the async Tx queue */
1800        sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR),
1801                     BMU_RST_SET | BMU_FIFO_RST);
1802
1803        /* Reset the Tx prefetch units */
1804        sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
1805                     PREF_UNIT_RST_SET);
1806
1807        sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
1808
1809        sky2_rx_stop(sky2);
1810
1811        sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
1812        sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
1813
1814        sky2_phy_power_down(hw, port);
1815
1816        /* turn off LED's */
1817        sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1818
1819        sky2_tx_clean(dev);
1820        sky2_rx_clean(sky2);
1821
1822        pci_free_consistent(hw->pdev, RX_LE_BYTES,
1823                            sky2->rx_le, sky2->rx_le_map);
1824        kfree(sky2->rx_ring);
1825
1826        pci_free_consistent(hw->pdev,
1827                            TX_RING_SIZE * sizeof(struct sky2_tx_le),
1828                            sky2->tx_le, sky2->tx_le_map);
1829        kfree(sky2->tx_ring);
1830
1831        sky2->tx_le = NULL;
1832        sky2->rx_le = NULL;
1833
1834        sky2->rx_ring = NULL;
1835        sky2->tx_ring = NULL;
1836
1837        return 0;
1838}
1839
1840static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
1841{
1842        if (hw->flags & SKY2_HW_FIBRE_PHY)
1843                return SPEED_1000;
1844
1845        if (!(hw->flags & SKY2_HW_GIGABIT)) {
1846                if (aux & PHY_M_PS_SPEED_100)
1847                        return SPEED_100;
1848                else
1849                        return SPEED_10;
1850        }
1851
1852        switch (aux & PHY_M_PS_SPEED_MSK) {
1853        case PHY_M_PS_SPEED_1000:
1854                return SPEED_1000;
1855        case PHY_M_PS_SPEED_100:
1856                return SPEED_100;
1857        default:
1858                return SPEED_10;
1859        }
1860}
1861
1862static void sky2_link_up(struct sky2_port *sky2)
1863{
1864        struct sky2_hw *hw = sky2->hw;
1865        unsigned port = sky2->port;
1866        u16 reg;
1867        static const char *fc_name[] = {
1868                [FC_NONE]       = "none",
1869                [FC_TX]         = "tx",
1870                [FC_RX]         = "rx",
1871                [FC_BOTH]       = "both",
1872        };
1873
1874        /* enable Rx/Tx */
1875        reg = gma_read16(hw, port, GM_GP_CTRL);
1876        reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1877        gma_write16(hw, port, GM_GP_CTRL, reg);
1878
1879        gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1880
1881        netif_carrier_on(sky2->netdev);
1882
1883        mod_timer(&hw->watchdog_timer, jiffies + 1);
1884
1885        /* Turn on link LED */
1886        sky2_write8(hw, SK_REG(port, LNK_LED_REG),
1887                    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
1888
1889        if (netif_msg_link(sky2))
1890                printk(KERN_INFO PFX
1891                       "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
1892                       sky2->netdev->name, sky2->speed,
1893                       sky2->duplex == DUPLEX_FULL ? "full" : "half",
1894                       fc_name[sky2->flow_status]);
1895}
1896
1897static void sky2_link_down(struct sky2_port *sky2)
1898{
1899        struct sky2_hw *hw = sky2->hw;
1900        unsigned port = sky2->port;
1901        u16 reg;
1902
1903        gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
1904
1905        reg = gma_read16(hw, port, GM_GP_CTRL);
1906        reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
1907        gma_write16(hw, port, GM_GP_CTRL, reg);
1908
1909        netif_carrier_off(sky2->netdev);
1910
1911        /* Turn on link LED */
1912        sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
1913
1914        if (netif_msg_link(sky2))
1915                printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
1916
1917        sky2_phy_init(hw, port);
1918}
1919
1920static enum flow_control sky2_flow(int rx, int tx)
1921{
1922        if (rx)
1923                return tx ? FC_BOTH : FC_RX;
1924        else
1925                return tx ? FC_TX : FC_NONE;
1926}
1927
1928static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
1929{
1930        struct sky2_hw *hw = sky2->hw;
1931        unsigned port = sky2->port;
1932        u16 advert, lpa;
1933
1934        advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
1935        lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
1936        if (lpa & PHY_M_AN_RF) {
1937                printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
1938                return -1;
1939        }
1940
1941        if (!(aux & PHY_M_PS_SPDUP_RES)) {
1942                printk(KERN_ERR PFX "%s: speed/duplex mismatch",
1943                       sky2->netdev->name);
1944                return -1;
1945        }
1946
1947        sky2->speed = sky2_phy_speed(hw, aux);
1948        sky2->duplex = (aux & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
1949
1950        /* Since the pause result bits seem to in different positions on
1951         * different chips. look at registers.
1952         */
1953        if (hw->flags & SKY2_HW_FIBRE_PHY) {
1954                /* Shift for bits in fiber PHY */
1955                advert &= ~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM);
1956                lpa &= ~(LPA_PAUSE_CAP|LPA_PAUSE_ASYM);
1957
1958                if (advert & ADVERTISE_1000XPAUSE)
1959                        advert |= ADVERTISE_PAUSE_CAP;
1960                if (advert & ADVERTISE_1000XPSE_ASYM)
1961                        advert |= ADVERTISE_PAUSE_ASYM;
1962                if (lpa & LPA_1000XPAUSE)
1963                        lpa |= LPA_PAUSE_CAP;
1964                if (lpa & LPA_1000XPAUSE_ASYM)
1965                        lpa |= LPA_PAUSE_ASYM;
1966        }
1967
1968        sky2->flow_status = FC_NONE;
1969        if (advert & ADVERTISE_PAUSE_CAP) {
1970                if (lpa & LPA_PAUSE_CAP)
1971                        sky2->flow_status = FC_BOTH;
1972                else if (advert & ADVERTISE_PAUSE_ASYM)
1973                        sky2->flow_status = FC_RX;
1974        } else if (advert & ADVERTISE_PAUSE_ASYM) {
1975                if ((lpa & LPA_PAUSE_CAP) && (lpa & LPA_PAUSE_ASYM))
1976                        sky2->flow_status = FC_TX;
1977        }
1978
1979        if (sky2->duplex == DUPLEX_HALF && sky2->speed < SPEED_1000
1980            && !(hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX))
1981                sky2->flow_status = FC_NONE;
1982
1983        if (sky2->flow_status & FC_TX)
1984                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
1985        else
1986                sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1987
1988        return 0;
1989}
1990
1991/* Interrupt from PHY */
1992static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
1993{
1994        struct net_device *dev = hw->dev[port];
1995        struct sky2_port *sky2 = netdev_priv(dev);
1996        u16 istatus, phystat;
1997
1998        if (!netif_running(dev))
1999                return;
2000
2001        spin_lock(&sky2->phy_lock);
2002        istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2003        phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2004
2005        if (netif_msg_intr(sky2))
2006                printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
2007                       sky2->netdev->name, istatus, phystat);
2008
2009        if (sky2->autoneg == AUTONEG_ENABLE && (istatus & PHY_M_IS_AN_COMPL)) {
2010                if (sky2_autoneg_done(sky2, phystat) == 0)
2011                        sky2_link_up(sky2);
2012                goto out;
2013        }
2014
2015        if (istatus & PHY_M_IS_LSP_CHANGE)
2016                sky2->speed = sky2_phy_speed(hw, phystat);
2017
2018        if (istatus & PHY_M_IS_DUP_CHANGE)
2019                sky2->duplex =
2020                    (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
2021
2022        if (istatus & PHY_M_IS_LST_CHANGE) {
2023                if (phystat & PHY_M_PS_LINK_UP)
2024                        sky2_link_up(sky2);
2025                else
2026                        sky2_link_down(sky2);
2027        }
2028out:
2029        spin_unlock(&sky2->phy_lock);
2030}
2031
2032/* Transmit timeout is only called if we are running, carrier is up
2033 * and tx queue is full (stopped).
2034 */
2035static void sky2_tx_timeout(struct net_device *dev)
2036{
2037        struct sky2_port *sky2 = netdev_priv(dev);
2038        struct sky2_hw *hw = sky2->hw;
2039
2040        if (netif_msg_timer(sky2))
2041                printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
2042
2043        printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
2044               dev->name, sky2->tx_cons, sky2->tx_prod,
2045               sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
2046               sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
2047
2048        /* can't restart safely under softirq */
2049        schedule_work(&hw->restart_work);
2050}
2051
2052static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2053{
2054        struct sky2_port *sky2 = netdev_priv(dev);
2055        struct sky2_hw *hw = sky2->hw;
2056        unsigned port = sky2->port;
2057        int err;
2058        u16 ctl, mode;
2059        u32 imask;
2060
2061        if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2062                return -EINVAL;
2063
2064        if (new_mtu > ETH_DATA_LEN &&
2065            (hw->chip_id == CHIP_ID_YUKON_FE ||
2066             hw->chip_id == CHIP_ID_YUKON_FE_P))
2067                return -EINVAL;
2068
2069        if (!netif_running(dev)) {
2070                dev->mtu = new_mtu;
2071                return 0;
2072        }
2073
2074        imask = sky2_read32(hw, B0_IMSK);
2075        sky2_write32(hw, B0_IMSK, 0);
2076
2077        dev->trans_start = jiffies;     /* prevent tx timeout */
2078        netif_stop_queue(dev);
2079        napi_disable(&hw->napi);
2080
2081        synchronize_irq(hw->pdev->irq);
2082
2083        if (!(hw->flags & SKY2_HW_RAM_BUFFER))
2084                sky2_set_tx_stfwd(hw, port);
2085
2086        ctl = gma_read16(hw, port, GM_GP_CTRL);
2087        gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
2088        sky2_rx_stop(sky2);
2089        sky2_rx_clean(sky2);
2090
2091        dev->mtu = new_mtu;
2092
2093        mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
2094                GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
2095
2096        if (dev->mtu > ETH_DATA_LEN)
2097                mode |= GM_SMOD_JUMBO_ENA;
2098
2099        gma_write16(hw, port, GM_SERIAL_MODE, mode);
2100
2101        sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
2102
2103        err = sky2_rx_start(sky2);
2104        sky2_write32(hw, B0_IMSK, imask);
2105
2106        sky2_read32(hw, B0_Y2_SP_LISR);
2107        napi_enable(&hw->napi);
2108
2109        if (err)
2110                dev_close(dev);
2111        else {
2112                gma_write16(hw, port, GM_GP_CTRL, ctl);
2113
2114                netif_wake_queue(dev);
2115        }
2116
2117        return err;
2118}
2119
2120/* For small just reuse existing skb for next receive */
2121static struct sk_buff *receive_copy(struct sky2_port *sky2,
2122                                    const struct rx_ring_info *re,
2123                                    unsigned length)
2124{
2125        struct sk_buff *skb;
2126
2127        skb = netdev_alloc_skb(sky2->netdev, length + 2);
2128        if (likely(skb)) {
2129                skb_reserve(skb, 2);
2130                pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2131                                            length, PCI_DMA_FROMDEVICE);
2132                skb_copy_from_linear_data(re->skb, skb->data, length);
2133                skb->ip_summed = re->skb->ip_summed;
2134                skb->csum = re->skb->csum;
2135                pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
2136                                               length, PCI_DMA_FROMDEVICE);
2137                re->skb->ip_summed = CHECKSUM_NONE;
2138                skb_put(skb, length);
2139        }
2140        return skb;
2141}
2142
2143/* Adjust length of skb with fragments to match received data */
2144static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
2145                          unsigned int length)
2146{
2147        int i, num_frags;
2148        unsigned int size;
2149
2150        /* put header into skb */
2151        size = min(length, hdr_space);
2152        skb->tail += size;
2153        skb->len += size;
2154        length -= size;
2155
2156        num_frags = skb_shinfo(skb)->nr_frags;
2157        for (i = 0; i < num_frags; i++) {
2158                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2159
2160                if (length == 0) {
2161                        /* don't need this page */
2162                        __free_page(frag->page);
2163                        --skb_shinfo(skb)->nr_frags;
2164                } else {
2165                        size = min(length, (unsigned) PAGE_SIZE);
2166
2167                        frag->size = size;
2168                        skb->data_len += size;
2169                        skb->truesize += size;
2170                        skb->len += size;
2171                        length -= size;
2172                }
2173        }
2174}
2175
2176/* Normal packet - take skb from ring element and put in a new one  */
2177static struct sk_buff *receive_new(struct sky2_port *sky2,
2178                                   struct rx_ring_info *re,
2179                                   unsigned int length)
2180{
2181        struct sk_buff *skb, *nskb;
2182        unsigned hdr_space = sky2->rx_data_size;
2183
2184        /* Don't be tricky about reusing pages (yet) */
2185        nskb = sky2_rx_alloc(sky2);
2186        if (unlikely(!nskb))
2187                return NULL;
2188
2189        skb = re->skb;
2190        sky2_rx_unmap_skb(sky2->hw->pdev, re);
2191
2192        prefetch(skb->data);
2193        re->skb = nskb;
2194        sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space);
2195
2196        if (skb_shinfo(skb)->nr_frags)
2197                skb_put_frags(skb, hdr_space, length);
2198        else
2199                skb_put(skb, length);
2200        return skb;
2201}
2202
2203/*
2204 * Receive one packet.
2205 * For larger packets, get new buffer.
2206 */
2207static struct sk_buff *sky2_receive(struct net_device *dev,
2208                                    u16 length, u32 status)
2209{
2210        struct sky2_port *sky2 = netdev_priv(dev);
2211        struct rx_ring_info *re = sky2->rx_ring + sky2->rx_next;
2212        struct sk_buff *skb = NULL;
2213        u16 count = (status & GMR_FS_LEN) >> 16;
2214
2215#ifdef SKY2_VLAN_TAG_USED
2216        /* Account for vlan tag */
2217        if (sky2->vlgrp && (status & GMR_FS_VLAN))
2218                count -= VLAN_HLEN;
2219#endif
2220
2221        if (unlikely(netif_msg_rx_status(sky2)))
2222                printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
2223                       dev->name, sky2->rx_next, status, length);
2224
2225        sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
2226        prefetch(sky2->rx_ring + sky2->rx_next);
2227
2228        /* This chip has hardware problems that generates bogus status.
2229         * So do only marginal checking and expect higher level protocols
2230         * to handle crap frames.
2231         */
2232        if (sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
2233            sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0 &&
2234            length != count)
2235                goto okay;
2236
2237        if (status & GMR_FS_ANY_ERR)
2238                goto error;
2239
2240        if (!(status & GMR_FS_RX_OK))
2241                goto resubmit;
2242
2243        /* if length reported by DMA does not match PHY, packet was truncated */
2244        if (length != count)
2245                goto len_error;
2246
2247okay:
2248        if (length < copybreak)
2249                skb = receive_copy(sky2, re, length);
2250        else
2251                skb = receive_new(sky2, re, length);
2252resubmit:
2253        sky2_rx_submit(sky2, re);
2254
2255        return skb;
2256
2257len_error:
2258        /* Truncation of overlength packets
2259           causes PHY length to not match MAC length */
2260        ++dev->stats.rx_length_errors;
2261        if (netif_msg_rx_err(sky2) && net_ratelimit())
2262                pr_info(PFX "%s: rx length error: status %#x length %d\n",
2263                        dev->name, status, length);
2264        goto resubmit;
2265
2266error:
2267        ++dev->stats.rx_errors;
2268        if (status & GMR_FS_RX_FF_OV) {
2269                dev->stats.rx_over_errors++;
2270                goto resubmit;
2271        }
2272
2273        if (netif_msg_rx_err(sky2) && net_ratelimit())
2274                printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
2275                       dev->name, status, length);
2276
2277        if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2278                dev->stats.rx_length_errors++;
2279        if (status & GMR_FS_FRAGMENT)
2280                dev->stats.rx_frame_errors++;
2281        if (status & GMR_FS_CRC_ERR)
2282                dev->stats.rx_crc_errors++;
2283
2284        goto resubmit;
2285}
2286
2287/* Transmit complete */
2288static inline void sky2_tx_done(struct net_device *dev, u16 last)
2289{
2290        struct sky2_port *sky2 = netdev_priv(dev);
2291
2292        if (netif_running(dev)) {
2293                netif_tx_lock(dev);
2294                sky2_tx_complete(sky2, last);
2295                netif_tx_unlock(dev);
2296        }
2297}
2298
2299/* Process status response ring */
2300static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2301{
2302        int work_done = 0;
2303        unsigned rx[2] = { 0, 0 };
2304
2305        rmb();
2306        do {
2307                struct sky2_port *sky2;
2308                struct sky2_status_le *le  = hw->st_le + hw->st_idx;
2309                unsigned port;
2310                struct net_device *dev;
2311                struct sk_buff *skb;
2312                u32 status;
2313                u16 length;
2314                u8 opcode = le->opcode;
2315
2316                if (!(opcode & HW_OWNER))
2317                        break;
2318
2319                hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
2320
2321                port = le->css & CSS_LINK_BIT;
2322                dev = hw->dev[port];
2323                sky2 = netdev_priv(dev);
2324                length = le16_to_cpu(le->length);
2325                status = le32_to_cpu(le->status);
2326
2327                le->opcode = 0;
2328                switch (opcode & ~HW_OWNER) {
2329                case OP_RXSTAT:
2330                        ++rx[port];
2331                        skb = sky2_receive(dev, length, status);
2332                        if (unlikely(!skb)) {
2333                                dev->stats.rx_dropped++;
2334                                break;
2335                        }
2336
2337                        /* This chip reports checksum status differently */
2338                        if (hw->flags & SKY2_HW_NEW_LE) {
2339                                if (sky2->rx_csum &&
2340                                    (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2341                                    (le->css & CSS_TCPUDPCSOK))
2342                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
2343                                else
2344                                        skb->ip_summed = CHECKSUM_NONE;
2345                        }
2346
2347                        skb->protocol = eth_type_trans(skb, dev);
2348                        dev->stats.rx_packets++;
2349                        dev->stats.rx_bytes += skb->len;
2350                        dev->last_rx = jiffies;
2351
2352#ifdef SKY2_VLAN_TAG_USED
2353                        if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
2354                                vlan_hwaccel_receive_skb(skb,
2355                                                         sky2->vlgrp,
2356                                                         be16_to_cpu(sky2->rx_tag));
2357                        } else
2358#endif
2359                                netif_receive_skb(skb);
2360
2361                        /* Stop after net poll weight */
2362                        if (++work_done >= to_do)
2363                                goto exit_loop;
2364                        break;
2365
2366#ifdef SKY2_VLAN_TAG_USED
2367                case OP_RXVLAN:
2368                        sky2->rx_tag = length;
2369                        break;
2370
2371                case OP_RXCHKSVLAN:
2372                        sky2->rx_tag = length;
2373                        /* fall through */
2374#endif
2375                case OP_RXCHKS:
2376                        if (!sky2->rx_csum)
2377                                break;
2378
2379                        /* If this happens then driver assuming wrong format */
2380                        if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
2381                                if (net_ratelimit())
2382                                        printk(KERN_NOTICE "%s: unexpected"
2383                                               " checksum status\n",
2384                                               dev->name);
2385                                break;
2386                        }
2387
2388                        /* Both checksum counters are programmed to start at
2389                         * the same offset, so unless there is a problem they
2390                         * should match. This failure is an early indication that
2391                         * hardware receive checksumming won't work.
2392                         */
2393                        if (likely(status >> 16 == (status & 0xffff))) {
2394                                skb = sky2->rx_ring[sky2->rx_next].skb;
2395                                skb->ip_summed = CHECKSUM_COMPLETE;
2396                                skb->csum = status & 0xffff;
2397                        } else {
2398                                printk(KERN_NOTICE PFX "%s: hardware receive "
2399                                       "checksum problem (status = %#x)\n",
2400                                       dev->name, status);
2401                                sky2->rx_csum = 0;
2402                                sky2_write32(sky2->hw,
2403                                             Q_ADDR(rxqaddr[port], Q_CSR),
2404                                             BMU_DIS_RX_CHKSUM);
2405                        }
2406                        break;
2407
2408                case OP_TXINDEXLE:
2409                        /* TX index reports status for both ports */
2410                        BUILD_BUG_ON(TX_RING_SIZE > 0x1000);
2411                        sky2_tx_done(hw->dev[0], status & 0xfff);
2412                        if (hw->dev[1])
2413                                sky2_tx_done(hw->dev[1],
2414                                     ((status >> 24) & 0xff)
2415                                             | (u16)(length & 0xf) << 8);
2416                        break;
2417
2418                default:
2419                        if (net_ratelimit())
2420                                printk(KERN_WARNING PFX
2421                                       "unknown status opcode 0x%x\n", opcode);
2422                }
2423        } while (hw->st_idx != idx);
2424
2425        /* Fully processed status ring so clear irq */
2426        sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2427
2428exit_loop:
2429        if (rx[0])
2430                sky2_rx_update(netdev_priv(hw->dev[0]), Q_R1);
2431
2432        if (rx[1])
2433                sky2_rx_update(netdev_priv(hw->dev[1]), Q_R2);
2434
2435        return work_done;
2436}
2437
2438static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
2439{
2440        struct net_device *dev = hw->dev[port];
2441
2442        if (net_ratelimit())
2443                printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
2444                       dev->name, status);
2445
2446        if (status & Y2_IS_PAR_RD1) {
2447                if (net_ratelimit())
2448                        printk(KERN_ERR PFX "%s: ram data read parity error\n",
2449                               dev->name);
2450                /* Clear IRQ */
2451                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
2452        }
2453
2454        if (status & Y2_IS_PAR_WR1) {
2455                if (net_ratelimit())
2456                        printk(KERN_ERR PFX "%s: ram data write parity error\n",
2457                               dev->name);
2458
2459                sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
2460        }
2461
2462        if (status & Y2_IS_PAR_MAC1) {
2463                if (net_ratelimit())
2464                        printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
2465                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
2466        }
2467
2468        if (status & Y2_IS_PAR_RX1) {
2469                if (net_ratelimit())
2470                        printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
2471                sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
2472        }
2473
2474        if (status & Y2_IS_TCP_TXA1) {
2475                if (net_ratelimit())
2476                        printk(KERN_ERR PFX "%s: TCP segmentation error\n",
2477                               dev->name);
2478                sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
2479        }
2480}
2481
2482static void sky2_hw_intr(struct sky2_hw *hw)
2483{
2484        struct pci_dev *pdev = hw->pdev;
2485        u32 status = sky2_read32(hw, B0_HWE_ISRC);
2486        u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
2487
2488        status &= hwmsk;
2489
2490        if (status & Y2_IS_TIST_OV)
2491                sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2492
2493        if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
2494                u16 pci_err;
2495
2496                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2497                pci_err = sky2_pci_read16(hw, PCI_STATUS);
2498                if (net_ratelimit())
2499                        dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
2500                                pci_err);
2501
2502                sky2_pci_write16(hw, PCI_STATUS,
2503                                      pci_err | PCI_STATUS_ERROR_BITS);
2504                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2505        }
2506
2507        if (status & Y2_IS_PCI_EXP) {
2508                /* PCI-Express uncorrectable Error occurred */
2509                u32 err;
2510
2511                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2512                err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2513                sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2514                             0xfffffffful);
2515                if (net_ratelimit())
2516                        dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
2517
2518                sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
2519                sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2520        }
2521
2522        if (status & Y2_HWE_L1_MASK)
2523                sky2_hw_error(hw, 0, status);
2524        status >>= 8;
2525        if (status & Y2_HWE_L1_MASK)
2526                sky2_hw_error(hw, 1, status);
2527}
2528
2529static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
2530{
2531        struct net_device *dev = hw->dev[port];
2532        struct sky2_port *sky2 = netdev_priv(dev);
2533        u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
2534
2535        if (netif_msg_intr(sky2))
2536                printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
2537                       dev->name, status);
2538
2539        if (status & GM_IS_RX_CO_OV)
2540                gma_read16(hw, port, GM_RX_IRQ_SRC);
2541
2542        if (status & GM_IS_TX_CO_OV)
2543                gma_read16(hw, port, GM_TX_IRQ_SRC);
2544
2545        if (status & GM_IS_RX_FF_OR) {
2546                ++dev->stats.rx_fifo_errors;
2547                sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
2548        }
2549
2550        if (status & GM_IS_TX_FF_UR) {
2551                ++dev->stats.tx_fifo_errors;
2552                sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
2553        }
2554}
2555
2556/* This should never happen it is a bug. */
2557static void sky2_le_error(struct sky2_hw *hw, unsigned port,
2558                          u16 q, unsigned ring_size)
2559{
2560        struct net_device *dev = hw->dev[port];
2561        struct sky2_port *sky2 = netdev_priv(dev);
2562        unsigned idx;
2563        const u64 *le = (q == Q_R1 || q == Q_R2)
2564                ? (u64 *) sky2->rx_le : (u64 *) sky2->tx_le;
2565
2566        idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
2567        printk(KERN_ERR PFX "%s: descriptor error q=%#x get=%u [%llx] put=%u\n",
2568               dev->name, (unsigned) q, idx, (unsigned long long) le[idx],
2569               (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
2570
2571        sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK);
2572}
2573
2574static int sky2_rx_hung(struct net_device *dev)
2575{
2576        struct sky2_port *sky2 = netdev_priv(dev);
2577        struct sky2_hw *hw = sky2->hw;
2578        unsigned port = sky2->port;
2579        unsigned rxq = rxqaddr[port];
2580        u32 mac_rp = sky2_read32(hw, SK_REG(port, RX_GMF_RP));
2581        u8 mac_lev = sky2_read8(hw, SK_REG(port, RX_GMF_RLEV));
2582        u8 fifo_rp = sky2_read8(hw, Q_ADDR(rxq, Q_RP));
2583        u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
2584
2585        /* If idle and MAC or PCI is stuck */
2586        if (sky2->check.last == dev->last_rx &&
2587            ((mac_rp == sky2->check.mac_rp &&
2588              mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
2589             /* Check if the PCI RX hang */
2590             (fifo_rp == sky2->check.fifo_rp &&
2591              fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
2592                printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
2593                       dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
2594                       sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
2595                return 1;
2596        } else {
2597                sky2->check.last = dev->last_rx;
2598                sky2->check.mac_rp = mac_rp;
2599                sky2->check.mac_lev = mac_lev;
2600                sky2->check.fifo_rp = fifo_rp;
2601                sky2->check.fifo_lev = fifo_lev;
2602                return 0;
2603        }
2604}
2605
2606static void sky2_watchdog(unsigned long arg)
2607{
2608        struct sky2_hw *hw = (struct sky2_hw *) arg;
2609
2610        /* Check for lost IRQ once a second */
2611        if (sky2_read32(hw, B0_ISRC)) {
2612                napi_schedule(&hw->napi);
2613        } else {
2614                int i, active = 0;
2615
2616                for (i = 0; i < hw->ports; i++) {
2617                        struct net_device *dev = hw->dev[i];
2618                        if (!netif_running(dev))
2619                                continue;
2620                        ++active;
2621
2622                        /* For chips with Rx FIFO, check if stuck */
2623                        if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
2624                             sky2_rx_hung(dev)) {
2625                                pr_info(PFX "%s: receiver hang detected\n",
2626                                        dev->name);
2627                                schedule_work(&hw->restart_work);
2628                                return;
2629                        }
2630                }
2631
2632                if (active == 0)
2633                        return;
2634        }
2635
2636        mod_timer(&hw->watchdog_timer, round_jiffies(jiffies + HZ));
2637}
2638
2639/* Hardware/software error handling */
2640static void sky2_err_intr(struct sky2_hw *hw, u32 status)
2641{
2642        if (net_ratelimit())
2643                dev_warn(&hw->pdev->dev, "error interrupt status=%#x\n", status);
2644
2645        if (status & Y2_IS_HW_ERR)
2646                sky2_hw_intr(hw);
2647
2648        if (status & Y2_IS_IRQ_MAC1)
2649                sky2_mac_intr(hw, 0);
2650
2651        if (status & Y2_IS_IRQ_MAC2)
2652                sky2_mac_intr(hw, 1);
2653
2654        if (status & Y2_IS_CHK_RX1)
2655                sky2_le_error(hw, 0, Q_R1, RX_LE_SIZE);
2656
2657        if (status & Y2_IS_CHK_RX2)
2658                sky2_le_error(hw, 1, Q_R2, RX_LE_SIZE);
2659
2660        if (status & Y2_IS_CHK_TXA1)
2661                sky2_le_error(hw, 0, Q_XA1, TX_RING_SIZE);
2662
2663        if (status & Y2_IS_CHK_TXA2)
2664                sky2_le_error(hw, 1, Q_XA2, TX_RING_SIZE);
2665}
2666
2667static int sky2_poll(struct napi_struct *napi, int work_limit)
2668{
2669        struct sky2_hw *hw = container_of(napi, struct sky2_hw, napi);
2670        u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
2671        int work_done = 0;
2672        u16 idx;
2673
2674        if (unlikely(status & Y2_IS_ERROR))
2675                sky2_err_intr(hw, status);
2676
2677        if (status & Y2_IS_IRQ_PHY1)
2678                sky2_phy_intr(hw, 0);
2679
2680        if (status & Y2_IS_IRQ_PHY2)
2681                sky2_phy_intr(hw, 1);
2682
2683        while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2684                work_done += sky2_status_intr(hw, work_limit - work_done, idx);
2685
2686                if (work_done >= work_limit)
2687                        goto done;
2688        }
2689
2690        /* Bug/Errata workaround?
2691         * Need to kick the TX irq moderation timer.
2692         */
2693        if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
2694                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
2695                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2696        }
2697        napi_complete(napi);
2698        sky2_read32(hw, B0_Y2_SP_LISR);
2699done:
2700
2701        return work_done;
2702}
2703
2704static irqreturn_t sky2_intr(int irq, void *dev_id)
2705{
2706        struct sky2_hw *hw = dev_id;
2707        u32 status;
2708
2709        /* Reading this mask interrupts as side effect */
2710        status = sky2_read32(hw, B0_Y2_SP_ISRC2);
2711        if (status == 0 || status == ~0)
2712                return IRQ_NONE;
2713
2714        prefetch(&hw->st_le[hw->st_idx]);
2715
2716        napi_schedule(&hw->napi);
2717
2718        return IRQ_HANDLED;
2719}
2720
2721#ifdef CONFIG_NET_POLL_CONTROLLER
2722static void sky2_netpoll(struct net_device *dev)
2723{
2724        struct sky2_port *sky2 = netdev_priv(dev);
2725
2726        napi_schedule(&sky2->hw->napi);
2727}
2728#endif
2729
2730/* Chip internal frequency for clock calculations */
2731static u32 sky2_mhz(const struct sky2_hw *hw)
2732{
2733        switch (hw->chip_id) {
2734        case CHIP_ID_YUKON_EC:
2735        case CHIP_ID_YUKON_EC_U:
2736        case CHIP_ID_YUKON_EX:
2737        case CHIP_ID_YUKON_SUPR:
2738        case CHIP_ID_YUKON_UL_2:
2739                return 125;
2740
2741        case CHIP_ID_YUKON_FE:
2742                return 100;
2743
2744        case CHIP_ID_YUKON_FE_P:
2745                return 50;
2746
2747        case CHIP_ID_YUKON_XL:
2748                return 156;
2749
2750        default:
2751                BUG();
2752        }
2753}
2754
2755static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
2756{
2757        return sky2_mhz(hw) * us;
2758}
2759
2760static inline u32 sky2_clk2us(const struct sky2_hw *hw, u32 clk)
2761{
2762        return clk / sky2_mhz(hw);
2763}
2764
2765
2766static int __devinit sky2_init(struct sky2_hw *hw)
2767{
2768        u8 t8;
2769
2770        /* Enable all clocks and check for bad PCI access */
2771        sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2772
2773        sky2_write8(hw, B0_CTST, CS_RST_CLR);
2774
2775        hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
2776        hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2777
2778        switch(hw->chip_id) {
2779        case CHIP_ID_YUKON_XL:
2780                hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
2781                break;
2782
2783        case CHIP_ID_YUKON_EC_U:
2784                hw->flags = SKY2_HW_GIGABIT
2785                        | SKY2_HW_NEWER_PHY
2786                        | SKY2_HW_ADV_POWER_CTL;
2787                break;
2788
2789        case CHIP_ID_YUKON_EX:
2790                hw->flags = SKY2_HW_GIGABIT
2791                        | SKY2_HW_NEWER_PHY
2792                        | SKY2_HW_NEW_LE
2793                        | SKY2_HW_ADV_POWER_CTL;
2794
2795                /* New transmit checksum */
2796                if (hw->chip_rev != CHIP_REV_YU_EX_B0)
2797                        hw->flags |= SKY2_HW_AUTO_TX_SUM;
2798                break;
2799
2800        case CHIP_ID_YUKON_EC:
2801                /* This rev is really old, and requires untested workarounds */
2802                if (hw->chip_rev == CHIP_REV_YU_EC_A1) {
2803                        dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2804                        return -EOPNOTSUPP;
2805                }
2806                hw->flags = SKY2_HW_GIGABIT;
2807                break;
2808
2809        case CHIP_ID_YUKON_FE:
2810                break;
2811
2812        case CHIP_ID_YUKON_FE_P:
2813                hw->flags = SKY2_HW_NEWER_PHY
2814                        | SKY2_HW_NEW_LE
2815                        | SKY2_HW_AUTO_TX_SUM
2816                        | SKY2_HW_ADV_POWER_CTL;
2817                break;
2818
2819        case CHIP_ID_YUKON_SUPR:
2820                hw->flags = SKY2_HW_GIGABIT
2821                        | SKY2_HW_NEWER_PHY
2822                        | SKY2_HW_NEW_LE
2823                        | SKY2_HW_AUTO_TX_SUM
2824                        | SKY2_HW_ADV_POWER_CTL;
2825                break;
2826
2827        case CHIP_ID_YUKON_UL_2:
2828                hw->flags = SKY2_HW_GIGABIT
2829                        | SKY2_HW_ADV_POWER_CTL;
2830                break;
2831
2832        default:
2833                dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n",
2834                        hw->chip_id);
2835                return -EOPNOTSUPP;
2836        }
2837
2838        hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
2839        if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P')
2840                hw->flags |= SKY2_HW_FIBRE_PHY;
2841
2842        hw->ports = 1;
2843        t8 = sky2_read8(hw, B2_Y2_HW_RES);
2844        if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
2845                if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
2846                        ++hw->ports;
2847        }
2848
2849        return 0;
2850}
2851
2852static void sky2_reset(struct sky2_hw *hw)
2853{
2854        struct pci_dev *pdev = hw->pdev;
2855        u16 status;
2856        int i, cap;
2857        u32 hwe_mask = Y2_HWE_ALL_MASK;
2858
2859        /* disable ASF */
2860        if (hw->chip_id == CHIP_ID_YUKON_EX) {
2861                status = sky2_read16(hw, HCU_CCSR);
2862                status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
2863                            HCU_CCSR_UC_STATE_MSK);
2864                sky2_write16(hw, HCU_CCSR, status);
2865        } else
2866                sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
2867        sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
2868
2869        /* do a SW reset */
2870        sky2_write8(hw, B0_CTST, CS_RST_SET);
2871        sky2_write8(hw, B0_CTST, CS_RST_CLR);
2872
2873        /* allow writes to PCI config */
2874        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2875
2876        /* clear PCI errors, if any */
2877        status = sky2_pci_read16(hw, PCI_STATUS);
2878        status |= PCI_STATUS_ERROR_BITS;
2879        sky2_pci_write16(hw, PCI_STATUS, status);
2880
2881        sky2_write8(hw, B0_CTST, CS_MRST_CLR);
2882
2883        cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2884        if (cap) {
2885                sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
2886                             0xfffffffful);
2887
2888                /* If error bit is stuck on ignore it */
2889                if (sky2_read32(hw, B0_HWE_ISRC) & Y2_IS_PCI_EXP)
2890                        dev_info(&pdev->dev, "ignoring stuck error report bit\n");
2891                else
2892                        hwe_mask |= Y2_IS_PCI_EXP;
2893        }
2894
2895        sky2_power_on(hw);
2896        sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2897
2898        for (i = 0; i < hw->ports; i++) {
2899                sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2900                sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2901
2902                if (hw->chip_id == CHIP_ID_YUKON_EX ||
2903                    hw->chip_id == CHIP_ID_YUKON_SUPR)
2904                        sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2905                                     GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2906                                     | GMC_BYP_RETR_ON);
2907        }
2908
2909        /* Clear I2C IRQ noise */
2910        sky2_write32(hw, B2_I2C_IRQ, 1);
2911
2912        /* turn off hardware timer (unused) */
2913        sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
2914        sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
2915
2916        sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
2917
2918        /* Turn off descriptor polling */
2919        sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
2920
2921        /* Turn off receive timestamp */
2922        sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
2923        sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
2924
2925        /* enable the Tx Arbiters */
2926        for (i = 0; i < hw->ports; i++)
2927                sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
2928
2929        /* Initialize ram interface */
2930        for (i = 0; i < hw->ports; i++) {
2931                sky2_write8(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
2932
2933                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
2934                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
2935                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
2936                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
2937                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
2938                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
2939                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
2940                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
2941                sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
2942                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
2943                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
2944                sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
2945        }
2946
2947        sky2_write32(hw, B0_HWE_IMSK, hwe_mask);
2948
2949        for (i = 0; i < hw->ports; i++)
2950                sky2_gmac_reset(hw, i);
2951
2952        memset(hw->st_le, 0, STATUS_LE_BYTES);
2953        hw->st_idx = 0;
2954
2955        sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
2956        sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
2957
2958        sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
2959        sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
2960
2961        /* Set the list last index */
2962        sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
2963
2964        sky2_write16(hw, STAT_TX_IDX_TH, 10);
2965        sky2_write8(hw, STAT_FIFO_WM, 16);
2966
2967        /* set Status-FIFO ISR watermark */
2968        if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
2969                sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
2970        else
2971                sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
2972
2973        sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
2974        sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
2975        sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
2976
2977        /* enable status unit */
2978        sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
2979
2980        sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
2981        sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
2982        sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
2983}
2984
2985static void sky2_restart(struct work_struct *work)
2986{
2987        struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
2988        struct net_device *dev;
2989        int i, err;
2990
2991        rtnl_lock();
2992        for (i = 0; i < hw->ports; i++) {
2993                dev = hw->dev[i];
2994                if (netif_running(dev))
2995                        sky2_down(dev);
2996        }
2997
2998        napi_disable(&hw->napi);
2999        sky2_write32(hw, B0_IMSK, 0);
3000        sky2_reset(hw);
3001        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
3002        napi_enable(&hw->napi);
3003
3004        for (i = 0; i < hw->ports; i++) {
3005                dev = hw->dev[i];
3006                if (netif_running(dev)) {
3007                        err = sky2_up(dev);
3008                        if (err) {
3009                                printk(KERN_INFO PFX "%s: could not restart %d\n",
3010                                       dev->name, err);
3011                                dev_close(dev);
3012                        }
3013                }
3014        }
3015
3016        rtnl_unlock();
3017}
3018
3019static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
3020{
3021        return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
3022}
3023
3024static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3025{
3026        const struct sky2_port *sky2 = netdev_priv(dev);
3027
3028        wol->supported = sky2_wol_supported(sky2->hw);
3029        wol->wolopts = sky2->wol;
3030}
3031
3032static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3033{
3034        struct sky2_port *sky2 = netdev_priv(dev);
3035        struct sky2_hw *hw = sky2->hw;
3036
3037        if ((wol->wolopts & ~sky2_wol_supported(sky2->hw))
3038            || !device_can_wakeup(&hw->pdev->dev))
3039                return -EOPNOTSUPP;
3040
3041        sky2->wol = wol->wolopts;
3042
3043        if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3044            hw->chip_id == CHIP_ID_YUKON_EX ||
3045            hw->chip_id == CHIP_ID_YUKON_FE_P)
3046                sky2_write32(hw, B0_CTST, sky2->wol
3047                             ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
3048
3049        device_set_wakeup_enable(&hw->pdev->dev, sky2->wol);
3050
3051        if (!netif_running(dev))
3052                sky2_wol_init(sky2);
3053        return 0;
3054}
3055
3056static u32 sky2_supported_modes(const struct sky2_hw *hw)
3057{
3058        if (sky2_is_copper(hw)) {
3059                u32 modes = SUPPORTED_10baseT_Half
3060                        | SUPPORTED_10baseT_Full
3061                        | SUPPORTED_100baseT_Half
3062                        | SUPPORTED_100baseT_Full
3063                        | SUPPORTED_Autoneg | SUPPORTED_TP;
3064
3065                if (hw->flags & SKY2_HW_GIGABIT)
3066                        modes |= SUPPORTED_1000baseT_Half
3067                                | SUPPORTED_1000baseT_Full;
3068                return modes;
3069        } else
3070                return  SUPPORTED_1000baseT_Half
3071                        | SUPPORTED_1000baseT_Full
3072                        | SUPPORTED_Autoneg
3073                        | SUPPORTED_FIBRE;
3074}
3075
3076static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3077{
3078        struct sky2_port *sky2 = netdev_priv(dev);
3079        struct sky2_hw *hw = sky2->hw;
3080
3081        ecmd->transceiver = XCVR_INTERNAL;
3082        ecmd->supported = sky2_supported_modes(hw);
3083        ecmd->phy_address = PHY_ADDR_MARV;
3084        if (sky2_is_copper(hw)) {
3085                ecmd->port = PORT_TP;
3086                ecmd->speed = sky2->speed;
3087        } else {
3088                ecmd->speed = SPEED_1000;
3089                ecmd->port = PORT_FIBRE;
3090        }
3091
3092        ecmd->advertising = sky2->advertising;
3093        ecmd->autoneg = sky2->autoneg;
3094        ecmd->duplex = sky2->duplex;
3095        return 0;
3096}
3097
3098static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3099{
3100        struct sky2_port *sky2 = netdev_priv(dev);
3101        const struct sky2_hw *hw = sky2->hw;
3102        u32 supported = sky2_supported_modes(hw);
3103
3104        if (ecmd->autoneg == AUTONEG_ENABLE) {
3105                ecmd->advertising = supported;
3106                sky2->duplex = -1;
3107                sky2->speed = -1;
3108        } else {
3109                u32 setting;
3110
3111                switch (ecmd->speed) {
3112                case SPEED_1000:
3113                        if (ecmd->duplex == DUPLEX_FULL)
3114                                setting = SUPPORTED_1000baseT_Full;
3115                        else if (ecmd->duplex == DUPLEX_HALF)
3116                                setting = SUPPORTED_1000baseT_Half;
3117                        else
3118                                return -EINVAL;
3119                        break;
3120                case SPEED_100:
3121                        if (ecmd->duplex == DUPLEX_FULL)
3122                                setting = SUPPORTED_100baseT_Full;
3123                        else if (ecmd->duplex == DUPLEX_HALF)
3124                                setting = SUPPORTED_100baseT_Half;
3125                        else
3126                                return -EINVAL;
3127                        break;
3128
3129                case SPEED_10:
3130                        if (ecmd->duplex == DUPLEX_FULL)
3131                                setting = SUPPORTED_10baseT_Full;
3132                        else if (ecmd->duplex == DUPLEX_HALF)
3133                                setting = SUPPORTED_10baseT_Half;
3134                        else
3135                                return -EINVAL;
3136                        break;
3137                default:
3138                        return -EINVAL;
3139                }
3140
3141                if ((setting & supported) == 0)
3142                        return -EINVAL;
3143
3144                sky2->speed = ecmd->speed;
3145                sky2->duplex = ecmd->duplex;
3146        }
3147
3148        sky2->autoneg = ecmd->autoneg;
3149        sky2->advertising = ecmd->advertising;
3150
3151        if (netif_running(dev)) {
3152                sky2_phy_reinit(sky2);
3153                sky2_set_multicast(dev);
3154        }
3155
3156        return 0;
3157}
3158
3159static void sky2_get_drvinfo(struct net_device *dev,
3160                             struct ethtool_drvinfo *info)
3161{
3162        struct sky2_port *sky2 = netdev_priv(dev);
3163
3164        strcpy(info->driver, DRV_NAME);
3165        strcpy(info->version, DRV_VERSION);
3166        strcpy(info->fw_version, "N/A");
3167        strcpy(info->bus_info, pci_name(sky2->hw->pdev));
3168}
3169
3170static const struct sky2_stat {
3171        char name[ETH_GSTRING_LEN];
3172        u16 offset;
3173} sky2_stats[] = {
3174        { "tx_bytes",      GM_TXO_OK_HI },
3175        { "rx_bytes",      GM_RXO_OK_HI },
3176        { "tx_broadcast",  GM_TXF_BC_OK },
3177        { "rx_broadcast",  GM_RXF_BC_OK },
3178        { "tx_multicast",  GM_TXF_MC_OK },
3179        { "rx_multicast",  GM_RXF_MC_OK },
3180        { "tx_unicast",    GM_TXF_UC_OK },
3181        { "rx_unicast",    GM_RXF_UC_OK },
3182        { "tx_mac_pause",  GM_TXF_MPAUSE },
3183        { "rx_mac_pause",  GM_RXF_MPAUSE },
3184        { "collisions",    GM_TXF_COL },
3185        { "late_collision",GM_TXF_LAT_COL },
3186        { "aborted",       GM_TXF_ABO_COL },
3187        { "single_collisions", GM_TXF_SNG_COL },
3188        { "multi_collisions", GM_TXF_MUL_COL },
3189
3190        { "rx_short",      GM_RXF_SHT },
3191        { "rx_runt",       GM_RXE_FRAG },
3192        { "rx_64_byte_packets", GM_RXF_64B },
3193        { "rx_65_to_127_byte_packets", GM_RXF_127B },
3194        { "rx_128_to_255_byte_packets", GM_RXF_255B },
3195        { "rx_256_to_511_byte_packets", GM_RXF_511B },
3196        { "rx_512_to_1023_byte_packets", GM_RXF_1023B },
3197        { "rx_1024_to_1518_byte_packets", GM_RXF_1518B },
3198        { "rx_1518_to_max_byte_packets", GM_RXF_MAX_SZ },
3199        { "rx_too_long",   GM_RXF_LNG_ERR },
3200        { "rx_fifo_overflow", GM_RXE_FIFO_OV },
3201        { "rx_jabber",     GM_RXF_JAB_PKT },
3202        { "rx_fcs_error",   GM_RXF_FCS_ERR },
3203
3204        { "tx_64_byte_packets", GM_TXF_64B },
3205        { "tx_65_to_127_byte_packets", GM_TXF_127B },
3206        { "tx_128_to_255_byte_packets", GM_TXF_255B },
3207        { "tx_256_to_511_byte_packets", GM_TXF_511B },
3208        { "tx_512_to_1023_byte_packets", GM_TXF_1023B },
3209        { "tx_1024_to_1518_byte_packets", GM_TXF_1518B },
3210        { "tx_1519_to_max_byte_packets", GM_TXF_MAX_SZ },
3211        { "tx_fifo_underrun", GM_TXE_FIFO_UR },
3212};
3213
3214static u32 sky2_get_rx_csum(struct net_device *dev)
3215{
3216        struct sky2_port *sky2 = netdev_priv(dev);
3217
3218        return sky2->rx_csum;
3219}
3220
3221static int sky2_set_rx_csum(struct net_device *dev, u32 data)
3222{
3223        struct sky2_port *sky2 = netdev_priv(dev);
3224
3225        sky2->rx_csum = data;
3226
3227        sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
3228                     data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
3229
3230        return 0;
3231}
3232
3233static u32 sky2_get_msglevel(struct net_device *netdev)
3234{
3235        struct sky2_port *sky2 = netdev_priv(netdev);
3236        return sky2->msg_enable;
3237}
3238
3239static int sky2_nway_reset(struct net_device *dev)
3240{
3241        struct sky2_port *sky2 = netdev_priv(dev);
3242
3243        if (!netif_running(dev) || sky2->autoneg != AUTONEG_ENABLE)
3244                return -EINVAL;
3245
3246        sky2_phy_reinit(sky2);
3247        sky2_set_multicast(dev);
3248
3249        return 0;
3250}
3251
3252static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3253{
3254        struct sky2_hw *hw = sky2->hw;
3255        unsigned port = sky2->port;
3256        int i;
3257
3258        data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
3259            | (u64) gma_read32(hw, port, GM_TXO_OK_LO);
3260        data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
3261            | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
3262
3263        for (i = 2; i < count; i++)
3264                data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
3265}
3266
3267static void sky2_set_msglevel(struct net_device *netdev, u32 value)
3268{
3269        struct sky2_port *sky2 = netdev_priv(netdev);
3270        sky2->msg_enable = value;
3271}
3272
3273static int sky2_get_sset_count(struct net_device *dev, int sset)
3274{
3275        switch (sset) {
3276        case ETH_SS_STATS:
3277                return ARRAY_SIZE(sky2_stats);
3278        default:
3279                return -EOPNOTSUPP;
3280        }
3281}
3282
3283static void sky2_get_ethtool_stats(struct net_device *dev,
3284                                   struct ethtool_stats *stats, u64 * data)
3285{
3286        struct sky2_port *sky2 = netdev_priv(dev);
3287
3288        sky2_phy_stats(sky2, data, ARRAY_SIZE(sky2_stats));
3289}
3290
3291static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 * data)
3292{
3293        int i;
3294
3295        switch (stringset) {
3296        case ETH_SS_STATS:
3297                for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
3298                        memcpy(data + i * ETH_GSTRING_LEN,
3299                               sky2_stats[i].name, ETH_GSTRING_LEN);
3300                break;
3301        }
3302}
3303
3304static int sky2_set_mac_address(struct net_device *dev, void *p)
3305{
3306        struct sky2_port *sky2 = netdev_priv(dev);
3307        struct sky2_hw *hw = sky2->hw;
3308        unsigned port = sky2->port;
3309        const struct sockaddr *addr = p;
3310
3311        if (!is_valid_ether_addr(addr->sa_data))
3312                return -EADDRNOTAVAIL;
3313
3314        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3315        memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
3316                    dev->dev_addr, ETH_ALEN);
3317        memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
3318                    dev->dev_addr, ETH_ALEN);
3319
3320        /* virtual address for data */
3321        gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3322
3323        /* physical address: used for pause frames */
3324        gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3325
3326        return 0;
3327}
3328
3329static void inline sky2_add_filter(u8 filter[8], const u8 *addr)
3330{
3331        u32 bit;
3332
3333        bit = ether_crc(ETH_ALEN, addr) & 63;
3334        filter[bit >> 3] |= 1 << (bit & 7);
3335}
3336
3337static void sky2_set_multicast(struct net_device *dev)
3338{
3339        struct sky2_port *sky2 = netdev_priv(dev);
3340        struct sky2_hw *hw = sky2->hw;
3341        unsigned port = sky2->port;
3342        struct dev_mc_list *list = dev->mc_list;
3343        u16 reg;
3344        u8 filter[8];
3345        int rx_pause;
3346        static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 };
3347
3348        rx_pause = (sky2->flow_status == FC_RX || sky2->flow_status == FC_BOTH);
3349        memset(filter, 0, sizeof(filter));
3350
3351        reg = gma_read16(hw, port, GM_RX_CTRL);
3352        reg |= GM_RXCR_UCF_ENA;
3353
3354        if (dev->flags & IFF_PROMISC)   /* promiscuous */
3355                reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
3356        else if (dev->flags & IFF_ALLMULTI)
3357                memset(filter, 0xff, sizeof(filter));
3358        else if (dev->mc_count == 0 && !rx_pause)
3359                reg &= ~GM_RXCR_MCF_ENA;
3360        else {
3361                int i;
3362                reg |= GM_RXCR_MCF_ENA;
3363
3364                if (rx_pause)
3365                        sky2_add_filter(filter, pause_mc_addr);
3366
3367                for (i = 0; list && i < dev->mc_count; i++, list = list->next)
3368                        sky2_add_filter(filter, list->dmi_addr);
3369        }
3370
3371        gma_write16(hw, port, GM_MC_ADDR_H1,
3372                    (u16) filter[0] | ((u16) filter[1] << 8));
3373        gma_write16(hw, port, GM_MC_ADDR_H2,
3374                    (u16) filter[2] | ((u16) filter[3] << 8));
3375        gma_write16(hw, port, GM_MC_ADDR_H3,
3376                    (u16) filter[4] | ((u16) filter[5] << 8));
3377        gma_write16(hw, port, GM_MC_ADDR_H4,
3378                    (u16) filter[6] | ((u16) filter[7] << 8));
3379
3380        gma_write16(hw, port, GM_RX_CTRL, reg);
3381}
3382
3383/* Can have one global because blinking is controlled by
3384 * ethtool and that is always under RTNL mutex
3385 */
3386static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
3387{
3388        struct sky2_hw *hw = sky2->hw;
3389        unsigned port = sky2->port;
3390
3391        spin_lock_bh(&sky2->phy_lock);
3392        if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
3393            hw->chip_id == CHIP_ID_YUKON_EX ||
3394            hw->chip_id == CHIP_ID_YUKON_SUPR) {
3395                u16 pg;
3396                pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
3397                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
3398
3399                switch (mode) {
3400                case MO_LED_OFF:
3401                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3402                                     PHY_M_LEDC_LOS_CTRL(8) |
3403                                     PHY_M_LEDC_INIT_CTRL(8) |
3404                                     PHY_M_LEDC_STA1_CTRL(8) |
3405                                     PHY_M_LEDC_STA0_CTRL(8));
3406                        break;
3407                case MO_LED_ON:
3408                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3409                                     PHY_M_LEDC_LOS_CTRL(9) |
3410                                     PHY_M_LEDC_INIT_CTRL(9) |
3411                                     PHY_M_LEDC_STA1_CTRL(9) |
3412                                     PHY_M_LEDC_STA0_CTRL(9));
3413                        break;
3414                case MO_LED_BLINK:
3415                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3416                                     PHY_M_LEDC_LOS_CTRL(0xa) |
3417                                     PHY_M_LEDC_INIT_CTRL(0xa) |
3418                                     PHY_M_LEDC_STA1_CTRL(0xa) |
3419                                     PHY_M_LEDC_STA0_CTRL(0xa));
3420                        break;
3421                case MO_LED_NORM:
3422                        gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
3423                                     PHY_M_LEDC_LOS_CTRL(1) |
3424                                     PHY_M_LEDC_INIT_CTRL(8) |
3425                                     PHY_M_LEDC_STA1_CTRL(7) |
3426                                     PHY_M_LEDC_STA0_CTRL(7));
3427                }
3428
3429                gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
3430        } else
3431                gm_phy_write(hw, port, PHY_MARV_LED_OVER,
3432                                     PHY_M_LED_MO_DUP(mode) |
3433                                     PHY_M_LED_MO_10(mode) |
3434                                     PHY_M_LED_MO_100(mode) |
3435                                     PHY_M_LED_MO_1000(mode) |
3436                                     PHY_M_LED_MO_RX(mode) |
3437                                     PHY_M_LED_MO_TX(mode));
3438
3439        spin_unlock_bh(&sky2->phy_lock);
3440}
3441
3442/* blink LED's for finding board */
3443static int sky2_phys_id(struct net_device *dev, u32 data)
3444{
3445        struct sky2_port *sky2 = netdev_priv(dev);
3446        unsigned int i;
3447
3448        if (data == 0)
3449                data = UINT_MAX;
3450
3451        for (i = 0; i < data; i++) {
3452                sky2_led(sky2, MO_LED_ON);
3453                if (msleep_interruptible(500))
3454                        break;
3455                sky2_led(sky2, MO_LED_OFF);
3456                if (msleep_interruptible(500))
3457                        break;
3458        }
3459        sky2_led(sky2, MO_LED_NORM);
3460
3461        return 0;
3462}
3463
3464static void sky2_get_pauseparam(struct net_device *dev,
3465                                struct ethtool_pauseparam *ecmd)
3466{
3467        struct sky2_port *sky2 = netdev_priv(dev);
3468
3469        switch (sky2->flow_mode) {
3470        case FC_NONE:
3471                ecmd->tx_pause = ecmd->rx_pause = 0;
3472                break;
3473        case FC_TX:
3474                ecmd->tx_pause = 1, ecmd->rx_pause = 0;
3475                break;
3476        case FC_RX:
3477                ecmd->tx_pause = 0, ecmd->rx_pause = 1;
3478                break;
3479        case FC_BOTH:
3480                ecmd->tx_pause = ecmd->rx_pause = 1;
3481        }
3482
3483        ecmd->autoneg = sky2->autoneg;
3484}
3485
3486static int sky2_set_pauseparam(struct net_device *dev,
3487                               struct ethtool_pauseparam *ecmd)
3488{
3489        struct sky2_port *sky2 = netdev_priv(dev);
3490
3491        sky2->autoneg = ecmd->autoneg;
3492        sky2->flow_mode = sky2_flow(ecmd->rx_pause, ecmd->tx_pause);
3493
3494        if (netif_running(dev))
3495                sky2_phy_reinit(sky2);
3496
3497        return 0;
3498}
3499
3500static int sky2_get_coalesce(struct net_device *dev,
3501                             struct ethtool_coalesce *ecmd)
3502{
3503        struct sky2_port *sky2 = netdev_priv(dev);
3504        struct sky2_hw *hw = sky2->hw;
3505
3506        if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_STOP)
3507                ecmd->tx_coalesce_usecs = 0;
3508        else {
3509                u32 clks = sky2_read32(hw, STAT_TX_TIMER_INI);
3510                ecmd->tx_coalesce_usecs = sky2_clk2us(hw, clks);
3511        }
3512        ecmd->tx_max_coalesced_frames = sky2_read16(hw, STAT_TX_IDX_TH);
3513
3514        if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_STOP)
3515                ecmd->rx_coalesce_usecs = 0;
3516        else {
3517                u32 clks = sky2_read32(hw, STAT_LEV_TIMER_INI);
3518                ecmd->rx_coalesce_usecs = sky2_clk2us(hw, clks);
3519        }
3520        ecmd->rx_max_coalesced_frames = sky2_read8(hw, STAT_FIFO_WM);
3521
3522        if (sky2_read8(hw, STAT_ISR_TIMER_CTRL) == TIM_STOP)
3523                ecmd->rx_coalesce_usecs_irq = 0;
3524        else {
3525                u32 clks = sky2_read32(hw, STAT_ISR_TIMER_INI);
3526                ecmd->rx_coalesce_usecs_irq = sky2_clk2us(hw, clks);
3527        }
3528
3529        ecmd->rx_max_coalesced_frames_irq = sky2_read8(hw, STAT_FIFO_ISR_WM);
3530
3531        return 0;
3532}
3533
3534/* Note: this affect both ports */
3535static int sky2_set_coalesce(struct net_device *dev,
3536                             struct ethtool_coalesce *ecmd)
3537{
3538        struct sky2_port *sky2 = netdev_priv(dev);
3539        struct sky2_hw *hw = sky2->hw;
3540        const u32 tmax = sky2_clk2us(hw, 0x0ffffff);
3541
3542        if (ecmd->tx_coalesce_usecs > tmax ||
3543            ecmd->rx_coalesce_usecs > tmax ||
3544            ecmd->rx_coalesce_usecs_irq > tmax)
3545                return -EINVAL;
3546
3547        if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
3548                return -EINVAL;
3549        if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
3550                return -EINVAL;
3551        if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
3552                return -EINVAL;
3553
3554        if (ecmd->tx_coalesce_usecs == 0)
3555                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
3556        else {
3557                sky2_write32(hw, STAT_TX_TIMER_INI,
3558                             sky2_us2clk(hw, ecmd->tx_coalesce_usecs));
3559                sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
3560        }
3561        sky2_write16(hw, STAT_TX_IDX_TH, ecmd->tx_max_coalesced_frames);
3562
3563        if (ecmd->rx_coalesce_usecs == 0)
3564                sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
3565        else {
3566                sky2_write32(hw, STAT_LEV_TIMER_INI,
3567                             sky2_us2clk(hw, ecmd->rx_coalesce_usecs));
3568                sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
3569        }
3570        sky2_write8(hw, STAT_FIFO_WM, ecmd->rx_max_coalesced_frames);
3571
3572        if (ecmd->rx_coalesce_usecs_irq == 0)
3573                sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_STOP);
3574        else {
3575                sky2_write32(hw, STAT_ISR_TIMER_INI,
3576                             sky2_us2clk(hw, ecmd->rx_coalesce_usecs_irq));
3577                sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
3578        }
3579        sky2_write8(hw, STAT_FIFO_ISR_WM, ecmd->rx_max_coalesced_frames_irq);
3580        return 0;
3581}
3582
3583static void sky2_get_ringparam(struct net_device *dev,
3584                               struct ethtool_ringparam *ering)
3585{
3586        struct sky2_port *sky2 = netdev_priv(dev);
3587
3588        ering->rx_max_pending = RX_MAX_PENDING;
3589        ering->rx_mini_max_pending = 0;
3590        ering->rx_jumbo_max_pending = 0;
3591        ering->tx_max_pending = TX_RING_SIZE - 1;
3592
3593        ering->rx_pending = sky2->rx_pending;
3594        ering->rx_mini_pending = 0;
3595        ering->rx_jumbo_pending = 0;
3596        ering->tx_pending = sky2->tx_pending;
3597}
3598
3599static int sky2_set_ringparam(struct net_device *dev,
3600                              struct ethtool_ringparam *ering)
3601{
3602        struct sky2_port *sky2 = netdev_priv(dev);
3603        int err = 0;
3604
3605        if (ering->rx_pending > RX_MAX_PENDING ||
3606            ering->rx_pending < 8 ||
3607            ering->tx_pending < MAX_SKB_TX_LE ||
3608            ering->tx_pending > TX_RING_SIZE - 1)
3609                return -EINVAL;
3610
3611        if (netif_running(dev))
3612                sky2_down(dev);
3613
3614        sky2->rx_pending = ering->rx_pending;
3615        sky2->tx_pending = ering->tx_pending;
3616
3617        if (netif_running(dev)) {
3618                err = sky2_up(dev);
3619                if (err)
3620                        dev_close(dev);
3621        }
3622
3623        return err;
3624}
3625
3626static int sky2_get_regs_len(struct net_device *dev)
3627{
3628        return 0x4000;
3629}
3630
3631/*
3632 * Returns copy of control register region
3633 * Note: ethtool_get_regs always provides full size (16k) buffer
3634 */
3635static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3636                          void *p)
3637{
3638        const struct sky2_port *sky2 = netdev_priv(dev);
3639        const void __iomem *io = sky2->hw->regs;
3640        unsigned int b;
3641
3642        regs->version = 1;
3643
3644        for (b = 0; b < 128; b++) {
3645                /* This complicated switch statement is to make sure and
3646                 * only access regions that are unreserved.
3647                 * Some blocks are only valid on dual port cards.
3648                 * and block 3 has some special diagnostic registers that
3649                 * are poison.
3650                 */
3651                switch (b) {
3652                case 3:
3653                        /* skip diagnostic ram region */
3654                        memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
3655                        break;
3656
3657                /* dual port cards only */
3658                case 5:         /* Tx Arbiter 2 */
3659                case 9:         /* RX2 */
3660                case 14 ... 15: /* TX2 */
3661                case 17: case 19: /* Ram Buffer 2 */
3662                case 22 ... 23: /* Tx Ram Buffer 2 */
3663                case 25:        /* Rx MAC Fifo 1 */
3664                case 27:        /* Tx MAC Fifo 2 */
3665                case 31:        /* GPHY 2 */
3666                case 40 ... 47: /* Pattern Ram 2 */
3667                case 52: case 54: /* TCP Segmentation 2 */
3668                case 112 ... 116: /* GMAC 2 */
3669                        if (sky2->hw->ports == 1)
3670                                goto reserved;
3671                        /* fall through */
3672                case 0:         /* Control */
3673                case 2:         /* Mac address */
3674                case 4:         /* Tx Arbiter 1 */
3675                case 7:         /* PCI express reg */
3676                case 8:         /* RX1 */
3677                case 12 ... 13: /* TX1 */
3678                case 16: case 18:/* Rx Ram Buffer 1 */
3679                case 20 ... 21: /* Tx Ram Buffer 1 */
3680                case 24:        /* Rx MAC Fifo 1 */
3681                case 26:        /* Tx MAC Fifo 1 */
3682                case 28 ... 29: /* Descriptor and status unit */
3683                case 30:        /* GPHY 1*/
3684                case 32 ... 39: /* Pattern Ram 1 */
3685                case 48: case 50: /* TCP Segmentation 1 */
3686                case 56 ... 60: /* PCI space */
3687                case 80 ... 84: /* GMAC 1 */
3688                        memcpy_fromio(p, io, 128);
3689                        break;
3690                default:
3691reserved:
3692                        memset(p, 0, 128);
3693                }
3694
3695                p += 128;
3696                io += 128;
3697        }
3698}
3699
3700/* In order to do Jumbo packets on these chips, need to turn off the
3701 * transmit store/forward. Therefore checksum offload won't work.
3702 */
3703static int no_tx_offload(struct net_device *dev)
3704{
3705        const struct sky2_port *sky2 = netdev_priv(dev);
3706        const struct sky2_hw *hw = sky2->hw;
3707
3708        return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
3709}
3710
3711static int sky2_set_tx_csum(struct net_device *dev, u32 data)
3712{
3713        if (data && no_tx_offload(dev))
3714                return -EINVAL;
3715
3716        return ethtool_op_set_tx_csum(dev, data);
3717}
3718
3719
3720static int sky2_set_tso(struct net_device *dev, u32 data)
3721{
3722        if (data && no_tx_offload(dev))
3723                return -EINVAL;
3724
3725        return ethtool_op_set_tso(dev, data);
3726}
3727
3728static int sky2_get_eeprom_len(struct net_device *dev)
3729{
3730        struct sky2_port *sky2 = netdev_priv(dev);
3731        struct sky2_hw *hw = sky2->hw;
3732        u16 reg2;
3733
3734        reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
3735        return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
3736}
3737
3738static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
3739{
3740        unsigned long start = jiffies;
3741
3742        while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
3743                /* Can take up to 10.6 ms for write */
3744                if (time_after(jiffies, start + HZ/4)) {
3745                        dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
3746                        return -ETIMEDOUT;
3747                }
3748                mdelay(1);
3749        }
3750
3751        return 0;
3752}
3753
3754static int sky2_vpd_read(struct sky2_hw *hw, int cap, void *data,
3755                         u16 offset, size_t length)
3756{
3757        int rc = 0;
3758
3759        while (length > 0) {
3760                u32 val;
3761
3762                sky2_pci_write16(hw, cap + PCI_VPD_ADDR, offset);
3763                rc = sky2_vpd_wait(hw, cap, 0);
3764                if (rc)
3765                        break;
3766
3767                val = sky2_pci_read32(hw, cap + PCI_VPD_DATA);
3768
3769                memcpy(data, &val, min(sizeof(val), length));
3770                offset += sizeof(u32);
3771                data += sizeof(u32);
3772                length -= sizeof(u32);
3773        }
3774
3775        return rc;
3776}
3777
3778static int sky2_vpd_write(struct sky2_hw *hw, int cap, const void *data,
3779                          u16 offset, unsigned int length)
3780{
3781        unsigned int i;
3782        int rc = 0;
3783
3784        for (i = 0; i < length; i += sizeof(u32)) {
3785                u32 val = *(u32 *)(data + i);
3786
3787                sky2_pci_write32(hw, cap + PCI_VPD_DATA, val);
3788                sky2_pci_write32(hw, cap + PCI_VPD_ADDR, offset | PCI_VPD_ADDR_F);
3789
3790                rc = sky2_vpd_wait(hw, cap, PCI_VPD_ADDR_F);
3791                if (rc)
3792                        break;
3793        }
3794        return rc;
3795}
3796
3797static int sky2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3798                           u8 *data)
3799{
3800        struct sky2_port *sky2 = netdev_priv(dev);
3801        int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3802
3803        if (!cap)
3804                return -EINVAL;
3805
3806        eeprom->magic = SKY2_EEPROM_MAGIC;
3807
3808        return sky2_vpd_read(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3809}
3810
3811static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
3812                           u8 *data)
3813{
3814        struct sky2_port *sky2 = netdev_priv(dev);
3815        int cap = pci_find_capability(sky2->hw->pdev, PCI_CAP_ID_VPD);
3816
3817        if (!cap)
3818                return -EINVAL;
3819
3820        if (eeprom->magic != SKY2_EEPROM_MAGIC)
3821                return -EINVAL;
3822
3823        /* Partial writes not supported */
3824        if ((eeprom->offset & 3) || (eeprom->len & 3))
3825                return -EINVAL;
3826
3827        return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
3828}
3829
3830
3831static const struct ethtool_ops sky2_ethtool_ops = {
3832        .get_settings   = sky2_get_settings,
3833        .set_settings   = sky2_set_settings,
3834        .get_drvinfo    = sky2_get_drvinfo,
3835        .get_wol        = sky2_get_wol,
3836        .set_wol        = sky2_set_wol,
3837        .get_msglevel   = sky2_get_msglevel,
3838        .set_msglevel   = sky2_set_msglevel,
3839        .nway_reset     = sky2_nway_reset,
3840        .get_regs_len   = sky2_get_regs_len,
3841        .get_regs       = sky2_get_regs,
3842        .get_link       = ethtool_op_get_link,
3843        .get_eeprom_len = sky2_get_eeprom_len,
3844        .get_eeprom     = sky2_get_eeprom,
3845        .set_eeprom     = sky2_set_eeprom,
3846        .set_sg         = ethtool_op_set_sg,
3847        .set_tx_csum    = sky2_set_tx_csum,
3848        .set_tso        = sky2_set_tso,
3849        .get_rx_csum    = sky2_get_rx_csum,
3850        .set_rx_csum    = sky2_set_rx_csum,
3851        .get_strings    = sky2_get_strings,
3852        .get_coalesce   = sky2_get_coalesce,
3853        .set_coalesce   = sky2_set_coalesce,
3854        .get_ringparam  = sky2_get_ringparam,
3855        .set_ringparam  = sky2_set_ringparam,
3856        .get_pauseparam = sky2_get_pauseparam,
3857        .set_pauseparam = sky2_set_pauseparam,
3858        .phys_id        = sky2_phys_id,
3859        .get_sset_count = sky2_get_sset_count,
3860        .get_ethtool_stats = sky2_get_ethtool_stats,
3861};
3862
3863#ifdef CONFIG_SKY2_DEBUG
3864
3865static struct dentry *sky2_debug;
3866
3867static int sky2_debug_show(struct seq_file *seq, void *v)
3868{
3869        struct net_device *dev = seq->private;
3870        const struct sky2_port *sky2 = netdev_priv(dev);
3871        struct sky2_hw *hw = sky2->hw;
3872        unsigned port = sky2->port;
3873        unsigned idx, last;
3874        int sop;
3875
3876        if (!netif_running(dev))
3877                return -ENETDOWN;
3878
3879        seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
3880                   sky2_read32(hw, B0_ISRC),
3881                   sky2_read32(hw, B0_IMSK),
3882                   sky2_read32(hw, B0_Y2_SP_ICR));
3883
3884        napi_disable(&hw->napi);
3885        last = sky2_read16(hw, STAT_PUT_IDX);
3886
3887        if (hw->st_idx == last)
3888                seq_puts(seq, "Status ring (empty)\n");
3889        else {
3890                seq_puts(seq, "Status ring\n");
3891                for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE;
3892                     idx = RING_NEXT(idx, STATUS_RING_SIZE)) {
3893                        const struct sky2_status_le *le = hw->st_le + idx;
3894                        seq_printf(seq, "[%d] %#x %d %#x\n",
3895                                   idx, le->opcode, le->length, le->status);
3896                }
3897                seq_puts(seq, "\n");
3898        }
3899
3900        seq_printf(seq, "Tx ring pending=%u...%u report=%d done=%d\n",
3901                   sky2->tx_cons, sky2->tx_prod,
3902                   sky2_read16(hw, port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
3903                   sky2_read16(hw, Q_ADDR(txqaddr[port], Q_DONE)));
3904
3905        /* Dump contents of tx ring */
3906        sop = 1;
3907        for (idx = sky2->tx_next; idx != sky2->tx_prod && idx < TX_RING_SIZE;
3908             idx = RING_NEXT(idx, TX_RING_SIZE)) {
3909                const struct sky2_tx_le *le = sky2->tx_le + idx;
3910                u32 a = le32_to_cpu(le->addr);
3911
3912                if (sop)
3913                        seq_printf(seq, "%u:", idx);
3914                sop = 0;
3915
3916                switch(le->opcode & ~HW_OWNER) {
3917                case OP_ADDR64:
3918                        seq_printf(seq, " %#x:", a);
3919                        break;
3920                case OP_LRGLEN:
3921                        seq_printf(seq, " mtu=%d", a);
3922                        break;
3923                case OP_VLAN:
3924                        seq_printf(seq, " vlan=%d", be16_to_cpu(le->length));
3925                        break;
3926                case OP_TCPLISW:
3927                        seq_printf(seq, " csum=%#x", a);
3928                        break;
3929                case OP_LARGESEND:
3930                        seq_printf(seq, " tso=%#x(%d)", a, le16_to_cpu(le->length));
3931                        break;
3932                case OP_PACKET:
3933                        seq_printf(seq, " %#x(%d)", a, le16_to_cpu(le->length));
3934                        break;
3935                case OP_BUFFER:
3936                        seq_printf(seq, " frag=%#x(%d)", a, le16_to_cpu(le->length));
3937                        break;
3938                default:
3939                        seq_printf(seq, " op=%#x,%#x(%d)", le->opcode,
3940                                   a, le16_to_cpu(le->length));
3941                }
3942
3943                if (le->ctrl & EOP) {
3944                        seq_putc(seq, '\n');
3945                        sop = 1;
3946                }
3947        }
3948
3949        seq_printf(seq, "\nRx ring hw get=%d put=%d last=%d\n",
3950                   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_GET_IDX)),
3951                   last = sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_PUT_IDX)),
3952                   sky2_read16(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_LAST_IDX)));
3953
3954        sky2_read32(hw, B0_Y2_SP_LISR);
3955        napi_enable(&hw->napi);
3956        return 0;
3957}
3958
3959static int sky2_debug_open(struct inode *inode, struct file *file)
3960{
3961        return single_open(file, sky2_debug_show, inode->i_private);
3962}
3963
3964static const struct file_operations sky2_debug_fops = {
3965        .owner          = THIS_MODULE,
3966        .open           = sky2_debug_open,
3967        .read           = seq_read,
3968        .llseek         = seq_lseek,
3969        .release        = single_release,
3970};
3971
3972/*
3973 * Use network device events to create/remove/rename
3974 * debugfs file entries
3975 */
3976static int sky2_device_event(struct notifier_block *unused,
3977                             unsigned long event, void *ptr)
3978{
3979        struct net_device *dev = ptr;
3980        struct sky2_port *sky2 = netdev_priv(dev);
3981
3982        if (dev->open != sky2_up || !sky2_debug)
3983                return NOTIFY_DONE;
3984
3985        switch(event) {
3986        case NETDEV_CHANGENAME:
3987                if (sky2->debugfs) {
3988                        sky2->debugfs = debugfs_rename(sky2_debug, sky2->debugfs,
3989                                                       sky2_debug, dev->name);
3990                }
3991                break;
3992
3993        case NETDEV_GOING_DOWN:
3994                if (sky2->debugfs) {
3995                        printk(KERN_DEBUG PFX "%s: remove debugfs\n",
3996                               dev->name);
3997                        debugfs_remove(sky2->debugfs);
3998                        sky2->debugfs = NULL;
3999                }
4000                break;
4001
4002        case NETDEV_UP:
4003                sky2->debugfs = debugfs_create_file(dev->name, S_IRUGO,
4004                                                    sky2_debug, dev,
4005                                                    &sky2_debug_fops);
4006                if (IS_ERR(sky2->debugfs))
4007                        sky2->debugfs = NULL;
4008        }
4009
4010        return NOTIFY_DONE;
4011}
4012
4013static struct notifier_block sky2_notifier = {
4014        .notifier_call = sky2_device_event,
4015};
4016
4017
4018static __init void sky2_debug_init(void)
4019{
4020        struct dentry *ent;
4021
4022        ent = debugfs_create_dir("sky2", NULL);
4023        if (!ent || IS_ERR(ent))
4024                return;
4025
4026        sky2_debug = ent;
4027        register_netdevice_notifier(&sky2_notifier);
4028}
4029
4030static __exit void sky2_debug_cleanup(void)
4031{
4032        if (sky2_debug) {
4033                unregister_netdevice_notifier(&sky2_notifier);
4034                debugfs_remove(sky2_debug);
4035                sky2_debug = NULL;
4036        }
4037}
4038
4039#else
4040#define sky2_debug_init()
4041#define sky2_debug_cleanup()
4042#endif
4043
4044
4045/* Initialize network device */
4046static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4047                                                     unsigned port,
4048                                                     int highmem, int wol)
4049{
4050        struct sky2_port *sky2;
4051        struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4052
4053        if (!dev) {
4054                dev_err(&hw->pdev->dev, "etherdev alloc failed\n");
4055                return NULL;
4056        }
4057
4058        SET_NETDEV_DEV(dev, &hw->pdev->dev);
4059        dev->irq = hw->pdev->irq;
4060        dev->open = sky2_up;
4061        dev->stop = sky2_down;
4062        dev->do_ioctl = sky2_ioctl;
4063        dev->hard_start_xmit = sky2_xmit_frame;
4064        dev->set_multicast_list = sky2_set_multicast;
4065        dev->set_mac_address = sky2_set_mac_address;
4066        dev->change_mtu = sky2_change_mtu;
4067        SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
4068        dev->tx_timeout = sky2_tx_timeout;
4069        dev->watchdog_timeo = TX_WATCHDOG;
4070#ifdef CONFIG_NET_POLL_CONTROLLER
4071        if (port == 0)
4072                dev->poll_controller = sky2_netpoll;
4073#endif
4074
4075        sky2 = netdev_priv(dev);
4076        sky2->netdev = dev;
4077        sky2->hw = hw;
4078        sky2->msg_enable = netif_msg_init(debug, default_msg);
4079
4080        /* Auto speed and flow control */
4081        sky2->autoneg = AUTONEG_ENABLE;
4082        sky2->flow_mode = FC_BOTH;
4083
4084        sky2->duplex = -1;
4085        sky2->speed = -1;
4086        sky2->advertising = sky2_supported_modes(hw);
4087        sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
4088        sky2->wol = wol;
4089
4090        spin_lock_init(&sky2->phy_lock);
4091        sky2->tx_pending = TX_DEF_PENDING;
4092        sky2->rx_pending = RX_DEF_PENDING;
4093
4094        hw->dev[port] = dev;
4095
4096        sky2->port = port;
4097
4098        dev->features |= NETIF_F_TSO | NETIF_F_IP_CSUM | NETIF_F_SG;
4099        if (highmem)
4100                dev->features |= NETIF_F_HIGHDMA;
4101
4102#ifdef SKY2_VLAN_TAG_USED
4103        /* The workaround for FE+ status conflicts with VLAN tag detection. */
4104        if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4105              sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4106                dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4107                dev->vlan_rx_register = sky2_vlan_rx_register;
4108        }
4109#endif
4110
4111        /* read the mac address */
4112        memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
4113        memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4114
4115        return dev;
4116}
4117
4118static void __devinit sky2_show_addr(struct net_device *dev)
4119{
4120        const struct sky2_port *sky2 = netdev_priv(dev);
4121        DECLARE_MAC_BUF(mac);
4122
4123        if (netif_msg_probe(sky2))
4124                printk(KERN_INFO PFX "%s: addr %s\n",
4125                       dev->name, print_mac(mac, dev->dev_addr));
4126}
4127
4128/* Handle software interrupt used during MSI test */
4129static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id)
4130{
4131        struct sky2_hw *hw = dev_id;
4132        u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2);
4133
4134        if (status == 0)
4135                return IRQ_NONE;
4136
4137        if (status & Y2_IS_IRQ_SW) {
4138                hw->flags |= SKY2_HW_USE_MSI;
4139                wake_up(&hw->msi_wait);
4140                sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4141        }
4142        sky2_write32(hw, B0_Y2_SP_ICR, 2);
4143
4144        return IRQ_HANDLED;
4145}
4146
4147/* Test interrupt path by forcing a a software IRQ */
4148static int __devinit sky2_test_msi(struct sky2_hw *hw)
4149{
4150        struct pci_dev *pdev = hw->pdev;
4151        int err;
4152
4153        init_waitqueue_head (&hw->msi_wait);
4154
4155        sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4156
4157        err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4158        if (err) {
4159                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4160                return err;
4161        }
4162
4163        sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4164        sky2_read8(hw, B0_CTST);
4165
4166        wait_event_timeout(hw->msi_wait, (hw->flags & SKY2_HW_USE_MSI), HZ/10);
4167
4168        if (!(hw->flags & SKY2_HW_USE_MSI)) {
4169                /* MSI test failed, go back to INTx mode */
4170                dev_info(&pdev->dev, "No interrupt generated using MSI, "
4171                         "switching to INTx mode.\n");
4172
4173                err = -EOPNOTSUPP;
4174                sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ);
4175        }
4176
4177        sky2_write32(hw, B0_IMSK, 0);
4178        sky2_read32(hw, B0_IMSK);
4179
4180        free_irq(pdev->irq, hw);
4181
4182        return err;
4183}
4184
4185/*
4186 * Read and parse the first part of Vital Product Data
4187 */
4188#define VPD_SIZE        128
4189#define VPD_MAGIC       0x82
4190
4191static void __devinit sky2_vpd_info(struct sky2_hw *hw)
4192{
4193        int cap = pci_find_capability(hw->pdev, PCI_CAP_ID_VPD);
4194        const u8 *p;
4195        u8 *vpd_buf = NULL;
4196        u16 len;
4197        static struct vpd_tag {
4198                char tag[2];
4199                char *label;
4200        } vpd_tags[] = {
4201                { "PN", "Part Number" },
4202                { "EC", "Engineering Level" },
4203                { "MN", "Manufacturer" },
4204        };
4205
4206        if (!cap)
4207                goto out;
4208
4209        vpd_buf = kmalloc(VPD_SIZE, GFP_KERNEL);
4210        if (!vpd_buf)
4211                goto out;
4212
4213        if (sky2_vpd_read(hw, cap, vpd_buf, 0, VPD_SIZE))
4214                goto out;
4215
4216        if (vpd_buf[0] != VPD_MAGIC)
4217                goto out;
4218        len = vpd_buf[1];
4219        if (len == 0 || len > VPD_SIZE - 4)
4220                goto out;
4221        p = vpd_buf + 3;
4222        dev_info(&hw->pdev->dev, "%.*s\n", len, p);
4223        p += len;
4224
4225        while (p < vpd_buf + VPD_SIZE - 4) {
4226                int i;
4227
4228                if (!memcmp("RW", p, 2))        /* end marker */
4229                        break;
4230
4231                len = p[2];
4232                if (len > (p - vpd_buf) - 4)
4233                        break;
4234
4235                for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
4236                        if (!memcmp(vpd_tags[i].tag, p, 2)) {
4237                                printk(KERN_DEBUG " %s: %.*s\n",
4238                                       vpd_tags[i].label, len, p + 3);
4239                                break;
4240                        }
4241                }
4242                p += len + 3;
4243        }
4244out:
4245        kfree(vpd_buf);
4246}
4247
4248/* This driver supports yukon2 chipset only */
4249static const char *sky2_name(u8 chipid, char *buf, int sz)
4250{
4251        const char *name[] = {
4252                "XL",           /* 0xb3 */
4253                "EC Ultra",     /* 0xb4 */
4254                "Extreme",      /* 0xb5 */
4255                "EC",           /* 0xb6 */
4256                "FE",           /* 0xb7 */
4257                "FE+",          /* 0xb8 */
4258                "Supreme",      /* 0xb9 */
4259                "UL 2",         /* 0xba */
4260        };
4261
4262        if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2)
4263                strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4264        else
4265                snprintf(buf, sz, "(chip %#x)", chipid);
4266        return buf;
4267}
4268
4269static int __devinit sky2_probe(struct pci_dev *pdev,
4270                                const struct pci_device_id *ent)
4271{
4272        struct net_device *dev;
4273        struct sky2_hw *hw;
4274        int err, using_dac = 0, wol_default;
4275        char buf1[16];
4276
4277        err = pci_enable_device(pdev);
4278        if (err) {
4279                dev_err(&pdev->dev, "cannot enable PCI device\n");
4280                goto err_out;
4281        }
4282
4283        err = pci_request_regions(pdev, DRV_NAME);
4284        if (err) {
4285                dev_err(&pdev->dev, "cannot obtain PCI resources\n");
4286                goto err_out_disable;
4287        }
4288
4289        pci_set_master(pdev);
4290
4291        if (sizeof(dma_addr_t) > sizeof(u32) &&
4292            !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
4293                using_dac = 1;
4294                err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4295                if (err < 0) {
4296                        dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
4297                                "for consistent allocations\n");
4298                        goto err_out_free_regions;
4299                }
4300        } else {
4301                err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4302                if (err) {
4303                        dev_err(&pdev->dev, "no usable DMA configuration\n");
4304                        goto err_out_free_regions;
4305                }
4306        }
4307
4308        wol_default = device_may_wakeup(&pdev->dev) ? WAKE_MAGIC : 0;
4309
4310        err = -ENOMEM;
4311        hw = kzalloc(sizeof(*hw), GFP_KERNEL);
4312        if (!hw) {
4313                dev_err(&pdev->dev, "cannot allocate hardware struct\n");
4314                goto err_out_free_regions;
4315        }
4316
4317        hw->pdev = pdev;
4318
4319        hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
4320        if (!hw->regs) {
4321                dev_err(&pdev->dev, "cannot map device registers\n");
4322                goto err_out_free_hw;
4323        }
4324
4325#ifdef __BIG_ENDIAN
4326        /* The sk98lin vendor driver uses hardware byte swapping but
4327         * this driver uses software swapping.
4328         */
4329        {
4330                u32 reg;
4331                reg = sky2_pci_read32(hw, PCI_DEV_REG2);
4332                reg &= ~PCI_REV_DESC;
4333                sky2_pci_write32(hw, PCI_DEV_REG2, reg);
4334        }
4335#endif
4336
4337        /* ring for status responses */
4338        hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
4339        if (!hw->st_le)
4340                goto err_out_iounmap;
4341
4342        err = sky2_init(hw);
4343        if (err)
4344                goto err_out_iounmap;
4345
4346        dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4347                 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4348
4349        sky2_reset(hw);
4350
4351        sky2_vpd_info(hw);
4352
4353        dev = sky2_init_netdev(hw, 0, using_dac, wol_default);
4354        if (!dev) {
4355                err = -ENOMEM;
4356                goto err_out_free_pci;
4357        }
4358
4359        if (!disable_msi && pci_enable_msi(pdev) == 0) {
4360                err = sky2_test_msi(hw);
4361                if (err == -EOPNOTSUPP)
4362                        pci_disable_msi(pdev);
4363                else if (err)
4364                        goto err_out_free_netdev;
4365        }
4366
4367        err = register_netdev(dev);
4368        if (err) {
4369                dev_err(&pdev->dev, "cannot register net device\n");
4370                goto err_out_free_netdev;
4371        }
4372
4373        netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT);
4374
4375        err = request_irq(pdev->irq, sky2_intr,
4376                          (hw->flags & SKY2_HW_USE_MSI) ? 0 : IRQF_SHARED,
4377                          dev->name, hw);
4378        if (err) {
4379                dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4380                goto err_out_unregister;
4381        }
4382        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4383        napi_enable(&hw->napi);
4384
4385        sky2_show_addr(dev);
4386
4387        if (hw->ports > 1) {
4388                struct net_device *dev1;
4389
4390                dev1 = sky2_init_netdev(hw, 1, using_dac, wol_default);
4391                if (!dev1)
4392                        dev_warn(&pdev->dev, "allocation for second device failed\n");
4393                else if ((err = register_netdev(dev1))) {
4394                        dev_warn(&pdev->dev,
4395                                 "register of second port failed (%d)\n", err);
4396                        hw->dev[1] = NULL;
4397                        free_netdev(dev1);
4398                } else
4399                        sky2_show_addr(dev1);
4400        }
4401
4402        setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
4403        INIT_WORK(&hw->restart_work, sky2_restart);
4404
4405        pci_set_drvdata(pdev, hw);
4406
4407        return 0;
4408
4409err_out_unregister:
4410        if (hw->flags & SKY2_HW_USE_MSI)
4411                pci_disable_msi(pdev);
4412        unregister_netdev(dev);
4413err_out_free_netdev:
4414        free_netdev(dev);
4415err_out_free_pci:
4416        sky2_write8(hw, B0_CTST, CS_RST_SET);
4417        pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4418err_out_iounmap:
4419        iounmap(hw->regs);
4420err_out_free_hw:
4421        kfree(hw);
4422err_out_free_regions:
4423        pci_release_regions(pdev);
4424err_out_disable:
4425        pci_disable_device(pdev);
4426err_out:
4427        pci_set_drvdata(pdev, NULL);
4428        return err;
4429}
4430
4431static void __devexit sky2_remove(struct pci_dev *pdev)
4432{
4433        struct sky2_hw *hw = pci_get_drvdata(pdev);
4434        int i;
4435
4436        if (!hw)
4437                return;
4438
4439        del_timer_sync(&hw->watchdog_timer);
4440        cancel_work_sync(&hw->restart_work);
4441
4442        for (i = hw->ports-1; i >= 0; --i)
4443                unregister_netdev(hw->dev[i]);
4444
4445        sky2_write32(hw, B0_IMSK, 0);
4446
4447        sky2_power_aux(hw);
4448
4449        sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
4450        sky2_write8(hw, B0_CTST, CS_RST_SET);
4451        sky2_read8(hw, B0_CTST);
4452
4453        free_irq(pdev->irq, hw);
4454        if (hw->flags & SKY2_HW_USE_MSI)
4455                pci_disable_msi(pdev);
4456        pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4457        pci_release_regions(pdev);
4458        pci_disable_device(pdev);
4459
4460        for (i = hw->ports-1; i >= 0; --i)
4461                free_netdev(hw->dev[i]);
4462
4463        iounmap(hw->regs);
4464        kfree(hw);
4465
4466        pci_set_drvdata(pdev, NULL);
4467}
4468
4469#ifdef CONFIG_PM
4470static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4471{
4472        struct sky2_hw *hw = pci_get_drvdata(pdev);
4473        int i, wol = 0;
4474
4475        if (!hw)
4476                return 0;
4477
4478        del_timer_sync(&hw->watchdog_timer);
4479        cancel_work_sync(&hw->restart_work);
4480
4481        for (i = 0; i < hw->ports; i++) {
4482                struct net_device *dev = hw->dev[i];
4483                struct sky2_port *sky2 = netdev_priv(dev);
4484
4485                netif_device_detach(dev);
4486                if (netif_running(dev))
4487                        sky2_down(dev);
4488
4489                if (sky2->wol)
4490                        sky2_wol_init(sky2);
4491
4492                wol |= sky2->wol;
4493        }
4494
4495        sky2_write32(hw, B0_IMSK, 0);
4496        napi_disable(&hw->napi);
4497        sky2_power_aux(hw);
4498
4499        pci_save_state(pdev);
4500        pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4501        pci_set_power_state(pdev, pci_choose_state(pdev, state));
4502
4503        return 0;
4504}
4505
4506static int sky2_resume(struct pci_dev *pdev)
4507{
4508        struct sky2_hw *hw = pci_get_drvdata(pdev);
4509        int i, err;
4510
4511        if (!hw)
4512                return 0;
4513
4514        err = pci_set_power_state(pdev, PCI_D0);
4515        if (err)
4516                goto out;
4517
4518        err = pci_restore_state(pdev);
4519        if (err)
4520                goto out;
4521
4522        pci_enable_wake(pdev, PCI_D0, 0);
4523
4524        /* Re-enable all clocks */
4525        if (hw->chip_id == CHIP_ID_YUKON_EX ||
4526            hw->chip_id == CHIP_ID_YUKON_EC_U ||
4527            hw->chip_id == CHIP_ID_YUKON_FE_P)
4528                sky2_pci_write32(hw, PCI_DEV_REG3, 0);
4529
4530        sky2_reset(hw);
4531        sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
4532        napi_enable(&hw->napi);
4533
4534        for (i = 0; i < hw->ports; i++) {
4535                struct net_device *dev = hw->dev[i];
4536
4537                netif_device_attach(dev);
4538                if (netif_running(dev)) {
4539                        err = sky2_up(dev);
4540                        if (err) {
4541                                printk(KERN_ERR PFX "%s: could not up: %d\n",
4542                                       dev->name, err);
4543                                rtnl_lock();
4544                                dev_close(dev);
4545                                rtnl_unlock();
4546                                goto out;
4547                        }
4548                }
4549        }
4550
4551        return 0;
4552out:
4553        dev_err(&pdev->dev, "resume failed (%d)\n", err);
4554        pci_disable_device(pdev);
4555        return err;
4556}
4557#endif
4558
4559static void sky2_shutdown(struct pci_dev *pdev)
4560{
4561        struct sky2_hw *hw = pci_get_drvdata(pdev);
4562        int i, wol = 0;
4563
4564        if (!hw)
4565                return;
4566
4567        del_timer_sync(&hw->watchdog_timer);
4568
4569        for (i = 0; i < hw->ports; i++) {
4570                struct net_device *dev = hw->dev[i];
4571                struct sky2_port *sky2 = netdev_priv(dev);
4572
4573                if (sky2->wol) {
4574                        wol = 1;
4575                        sky2_wol_init(sky2);
4576                }
4577        }
4578
4579        if (wol)
4580                sky2_power_aux(hw);
4581
4582        pci_enable_wake(pdev, PCI_D3hot, wol);
4583        pci_enable_wake(pdev, PCI_D3cold, wol);
4584
4585        pci_disable_device(pdev);
4586        pci_set_power_state(pdev, PCI_D3hot);
4587}
4588
4589static struct pci_driver sky2_driver = {
4590        .name = DRV_NAME,
4591        .id_table = sky2_id_table,
4592        .probe = sky2_probe,
4593        .remove = __devexit_p(sky2_remove),
4594#ifdef CONFIG_PM
4595        .suspend = sky2_suspend,
4596        .resume = sky2_resume,
4597#endif
4598        .shutdown = sky2_shutdown,
4599};
4600
4601static int __init sky2_init_module(void)
4602{
4603        pr_info(PFX "driver version " DRV_VERSION "\n");
4604
4605        sky2_debug_init();
4606        return pci_register_driver(&sky2_driver);
4607}
4608
4609static void __exit sky2_cleanup_module(void)
4610{
4611        pci_unregister_driver(&sky2_driver);
4612        sky2_debug_cleanup();
4613}
4614
4615module_init(sky2_init_module);
4616module_exit(sky2_cleanup_module);
4617
4618MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
4619MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
4620MODULE_LICENSE("GPL");
4621MODULE_VERSION(DRV_VERSION);
4622
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.