linux/drivers/net/tg3.c
<<
>>
Prefs
   1/*
   2 * tg3.c: Broadcom Tigon3 ethernet driver.
   3 *
   4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
   5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
   6 * Copyright (C) 2004 Sun Microsystems Inc.
   7 * Copyright (C) 2005-2010 Broadcom Corporation.
   8 *
   9 * Firmware is:
  10 *      Derived from proprietary unpublished source code,
  11 *      Copyright (C) 2000-2003 Broadcom Corporation.
  12 *
  13 *      Permission is hereby granted for the distribution of this firmware
  14 *      data in hexadecimal or equivalent format, provided this copyright
  15 *      notice is accompanying it.
  16 */
  17
  18
  19#include <linux/module.h>
  20#include <linux/moduleparam.h>
  21#include <linux/kernel.h>
  22#include <linux/types.h>
  23#include <linux/compiler.h>
  24#include <linux/slab.h>
  25#include <linux/delay.h>
  26#include <linux/in.h>
  27#include <linux/init.h>
  28#include <linux/ioport.h>
  29#include <linux/pci.h>
  30#include <linux/netdevice.h>
  31#include <linux/etherdevice.h>
  32#include <linux/skbuff.h>
  33#include <linux/ethtool.h>
  34#include <linux/mii.h>
  35#include <linux/phy.h>
  36#include <linux/brcmphy.h>
  37#include <linux/if_vlan.h>
  38#include <linux/ip.h>
  39#include <linux/tcp.h>
  40#include <linux/workqueue.h>
  41#include <linux/prefetch.h>
  42#include <linux/dma-mapping.h>
  43#include <linux/firmware.h>
  44
  45#include <net/checksum.h>
  46#include <net/ip.h>
  47
  48#include <asm/system.h>
  49#include <asm/io.h>
  50#include <asm/byteorder.h>
  51#include <asm/uaccess.h>
  52
  53#ifdef CONFIG_SPARC
  54#include <asm/idprom.h>
  55#include <asm/prom.h>
  56#endif
  57
  58#define BAR_0   0
  59#define BAR_2   2
  60
  61#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
  62#define TG3_VLAN_TAG_USED 1
  63#else
  64#define TG3_VLAN_TAG_USED 0
  65#endif
  66
  67#include "tg3.h"
  68
  69#define DRV_MODULE_NAME         "tg3"
  70#define PFX DRV_MODULE_NAME     ": "
  71#define DRV_MODULE_VERSION      "3.106"
  72#define DRV_MODULE_RELDATE      "January 12, 2010"
  73
  74#define TG3_DEF_MAC_MODE        0
  75#define TG3_DEF_RX_MODE         0
  76#define TG3_DEF_TX_MODE         0
  77#define TG3_DEF_MSG_ENABLE        \
  78        (NETIF_MSG_DRV          | \
  79         NETIF_MSG_PROBE        | \
  80         NETIF_MSG_LINK         | \
  81         NETIF_MSG_TIMER        | \
  82         NETIF_MSG_IFDOWN       | \
  83         NETIF_MSG_IFUP         | \
  84         NETIF_MSG_RX_ERR       | \
  85         NETIF_MSG_TX_ERR)
  86
  87/* length of time before we decide the hardware is borked,
  88 * and dev->tx_timeout() should be called to fix the problem
  89 */
  90#define TG3_TX_TIMEOUT                  (5 * HZ)
  91
  92/* hardware minimum and maximum for a single frame's data payload */
  93#define TG3_MIN_MTU                     60
  94#define TG3_MAX_MTU(tp) \
  95        ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
  96
  97/* These numbers seem to be hard coded in the NIC firmware somehow.
  98 * You can't change the ring sizes, but you can change where you place
  99 * them in the NIC onboard memory.
 100 */
 101#define TG3_RX_RING_SIZE                512
 102#define TG3_DEF_RX_RING_PENDING         200
 103#define TG3_RX_JUMBO_RING_SIZE          256
 104#define TG3_DEF_RX_JUMBO_RING_PENDING   100
 105#define TG3_RSS_INDIR_TBL_SIZE 128
 106
 107/* Do not place this n-ring entries value into the tp struct itself,
 108 * we really want to expose these constants to GCC so that modulo et
 109 * al.  operations are done with shifts and masks instead of with
 110 * hw multiply/modulo instructions.  Another solution would be to
 111 * replace things like '% foo' with '& (foo - 1)'.
 112 */
 113#define TG3_RX_RCB_RING_SIZE(tp)        \
 114        (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
 115          !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
 116
 117#define TG3_TX_RING_SIZE                512
 118#define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
 119
 120#define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
 121                                 TG3_RX_RING_SIZE)
 122#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
 123                                 TG3_RX_JUMBO_RING_SIZE)
 124#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
 125                                 TG3_RX_RCB_RING_SIZE(tp))
 126#define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
 127                                 TG3_TX_RING_SIZE)
 128#define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
 129
 130#define TG3_DMA_BYTE_ENAB               64
 131
 132#define TG3_RX_STD_DMA_SZ               1536
 133#define TG3_RX_JMB_DMA_SZ               9046
 134
 135#define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
 136
 137#define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
 138#define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 139
 140#define TG3_RX_STD_BUFF_RING_SIZE \
 141        (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
 142
 143#define TG3_RX_JMB_BUFF_RING_SIZE \
 144        (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
 145
 146/* minimum number of free TX descriptors required to wake up TX process */
 147#define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
 148
 149#define TG3_RAW_IP_ALIGN 2
 150
 151/* number of ETHTOOL_GSTATS u64's */
 152#define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
 153
 154#define TG3_NUM_TEST            6
 155
 156#define FIRMWARE_TG3            "tigon/tg3.bin"
 157#define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
 158#define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
 159
 160static char version[] __devinitdata =
 161        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 162
 163MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
 164MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
 165MODULE_LICENSE("GPL");
 166MODULE_VERSION(DRV_MODULE_VERSION);
 167MODULE_FIRMWARE(FIRMWARE_TG3);
 168MODULE_FIRMWARE(FIRMWARE_TG3TSO);
 169MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
 170
 171#define TG3_RSS_MIN_NUM_MSIX_VECS       2
 172
 173static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
 174module_param(tg3_debug, int, 0);
 175MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
 176
 177static struct pci_device_id tg3_pci_tbl[] = {
 178        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
 179        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
 180        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
 181        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
 182        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
 183        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
 184        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
 185        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
 186        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
 187        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
 188        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
 189        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
 190        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
 191        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
 192        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
 193        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
 194        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
 195        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
 196        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
 197        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
 198        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
 199        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
 200        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
 201        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
 202        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
 203        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
 204        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
 205        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
 206        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
 207        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
 208        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
 209        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
 210        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
 211        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
 212        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
 213        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
 214        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
 215        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
 216        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
 217        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
 218        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
 219        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
 220        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
 221        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
 222        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
 223        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
 224        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
 225        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
 226        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
 227        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
 228        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
 229        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
 230        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
 231        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
 232        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
 233        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
 234        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
 235        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
 236        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
 237        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
 238        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
 239        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
 240        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
 241        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
 242        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
 243        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
 244        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
 245        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
 246        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
 247        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
 248        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
 249        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
 250        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
 251        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
 252        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
 253        {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
 254        {}
 255};
 256
 257MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
 258
 259static const struct {
 260        const char string[ETH_GSTRING_LEN];
 261} ethtool_stats_keys[TG3_NUM_STATS] = {
 262        { "rx_octets" },
 263        { "rx_fragments" },
 264        { "rx_ucast_packets" },
 265        { "rx_mcast_packets" },
 266        { "rx_bcast_packets" },
 267        { "rx_fcs_errors" },
 268        { "rx_align_errors" },
 269        { "rx_xon_pause_rcvd" },
 270        { "rx_xoff_pause_rcvd" },
 271        { "rx_mac_ctrl_rcvd" },
 272        { "rx_xoff_entered" },
 273        { "rx_frame_too_long_errors" },
 274        { "rx_jabbers" },
 275        { "rx_undersize_packets" },
 276        { "rx_in_length_errors" },
 277        { "rx_out_length_errors" },
 278        { "rx_64_or_less_octet_packets" },
 279        { "rx_65_to_127_octet_packets" },
 280        { "rx_128_to_255_octet_packets" },
 281        { "rx_256_to_511_octet_packets" },
 282        { "rx_512_to_1023_octet_packets" },
 283        { "rx_1024_to_1522_octet_packets" },
 284        { "rx_1523_to_2047_octet_packets" },
 285        { "rx_2048_to_4095_octet_packets" },
 286        { "rx_4096_to_8191_octet_packets" },
 287        { "rx_8192_to_9022_octet_packets" },
 288
 289        { "tx_octets" },
 290        { "tx_collisions" },
 291
 292        { "tx_xon_sent" },
 293        { "tx_xoff_sent" },
 294        { "tx_flow_control" },
 295        { "tx_mac_errors" },
 296        { "tx_single_collisions" },
 297        { "tx_mult_collisions" },
 298        { "tx_deferred" },
 299        { "tx_excessive_collisions" },
 300        { "tx_late_collisions" },
 301        { "tx_collide_2times" },
 302        { "tx_collide_3times" },
 303        { "tx_collide_4times" },
 304        { "tx_collide_5times" },
 305        { "tx_collide_6times" },
 306        { "tx_collide_7times" },
 307        { "tx_collide_8times" },
 308        { "tx_collide_9times" },
 309        { "tx_collide_10times" },
 310        { "tx_collide_11times" },
 311        { "tx_collide_12times" },
 312        { "tx_collide_13times" },
 313        { "tx_collide_14times" },
 314        { "tx_collide_15times" },
 315        { "tx_ucast_packets" },
 316        { "tx_mcast_packets" },
 317        { "tx_bcast_packets" },
 318        { "tx_carrier_sense_errors" },
 319        { "tx_discards" },
 320        { "tx_errors" },
 321
 322        { "dma_writeq_full" },
 323        { "dma_write_prioq_full" },
 324        { "rxbds_empty" },
 325        { "rx_discards" },
 326        { "rx_errors" },
 327        { "rx_threshold_hit" },
 328
 329        { "dma_readq_full" },
 330        { "dma_read_prioq_full" },
 331        { "tx_comp_queue_full" },
 332
 333        { "ring_set_send_prod_index" },
 334        { "ring_status_update" },
 335        { "nic_irqs" },
 336        { "nic_avoided_irqs" },
 337        { "nic_tx_threshold_hit" }
 338};
 339
 340static const struct {
 341        const char string[ETH_GSTRING_LEN];
 342} ethtool_test_keys[TG3_NUM_TEST] = {
 343        { "nvram test     (online) " },
 344        { "link test      (online) " },
 345        { "register test  (offline)" },
 346        { "memory test    (offline)" },
 347        { "loopback test  (offline)" },
 348        { "interrupt test (offline)" },
 349};
 350
 351static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
 352{
 353        writel(val, tp->regs + off);
 354}
 355
 356static u32 tg3_read32(struct tg3 *tp, u32 off)
 357{
 358        return (readl(tp->regs + off));
 359}
 360
 361static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
 362{
 363        writel(val, tp->aperegs + off);
 364}
 365
 366static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
 367{
 368        return (readl(tp->aperegs + off));
 369}
 370
 371static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
 372{
 373        unsigned long flags;
 374
 375        spin_lock_irqsave(&tp->indirect_lock, flags);
 376        pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
 377        pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
 378        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 379}
 380
 381static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
 382{
 383        writel(val, tp->regs + off);
 384        readl(tp->regs + off);
 385}
 386
 387static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
 388{
 389        unsigned long flags;
 390        u32 val;
 391
 392        spin_lock_irqsave(&tp->indirect_lock, flags);
 393        pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
 394        pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
 395        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 396        return val;
 397}
 398
 399static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
 400{
 401        unsigned long flags;
 402
 403        if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
 404                pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
 405                                       TG3_64BIT_REG_LOW, val);
 406                return;
 407        }
 408        if (off == TG3_RX_STD_PROD_IDX_REG) {
 409                pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
 410                                       TG3_64BIT_REG_LOW, val);
 411                return;
 412        }
 413
 414        spin_lock_irqsave(&tp->indirect_lock, flags);
 415        pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
 416        pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
 417        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 418
 419        /* In indirect mode when disabling interrupts, we also need
 420         * to clear the interrupt bit in the GRC local ctrl register.
 421         */
 422        if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
 423            (val == 0x1)) {
 424                pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
 425                                       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
 426        }
 427}
 428
 429static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
 430{
 431        unsigned long flags;
 432        u32 val;
 433
 434        spin_lock_irqsave(&tp->indirect_lock, flags);
 435        pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
 436        pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
 437        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 438        return val;
 439}
 440
 441/* usec_wait specifies the wait time in usec when writing to certain registers
 442 * where it is unsafe to read back the register without some delay.
 443 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
 444 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
 445 */
 446static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
 447{
 448        if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
 449            (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
 450                /* Non-posted methods */
 451                tp->write32(tp, off, val);
 452        else {
 453                /* Posted method */
 454                tg3_write32(tp, off, val);
 455                if (usec_wait)
 456                        udelay(usec_wait);
 457                tp->read32(tp, off);
 458        }
 459        /* Wait again after the read for the posted method to guarantee that
 460         * the wait time is met.
 461         */
 462        if (usec_wait)
 463                udelay(usec_wait);
 464}
 465
 466static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
 467{
 468        tp->write32_mbox(tp, off, val);
 469        if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
 470            !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
 471                tp->read32_mbox(tp, off);
 472}
 473
 474static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
 475{
 476        void __iomem *mbox = tp->regs + off;
 477        writel(val, mbox);
 478        if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
 479                writel(val, mbox);
 480        if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
 481                readl(mbox);
 482}
 483
 484static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
 485{
 486        return (readl(tp->regs + off + GRCMBOX_BASE));
 487}
 488
 489static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
 490{
 491        writel(val, tp->regs + off + GRCMBOX_BASE);
 492}
 493
 494#define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
 495#define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
 496#define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
 497#define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
 498#define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
 499
 500#define tw32(reg,val)           tp->write32(tp, reg, val)
 501#define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
 502#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
 503#define tr32(reg)               tp->read32(tp, reg)
 504
 505static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
 506{
 507        unsigned long flags;
 508
 509        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
 510            (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
 511                return;
 512
 513        spin_lock_irqsave(&tp->indirect_lock, flags);
 514        if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
 515                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
 516                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 517
 518                /* Always leave this as zero. */
 519                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
 520        } else {
 521                tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
 522                tw32_f(TG3PCI_MEM_WIN_DATA, val);
 523
 524                /* Always leave this as zero. */
 525                tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 526        }
 527        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 528}
 529
 530static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
 531{
 532        unsigned long flags;
 533
 534        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
 535            (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
 536                *val = 0;
 537                return;
 538        }
 539
 540        spin_lock_irqsave(&tp->indirect_lock, flags);
 541        if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
 542                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
 543                pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
 544
 545                /* Always leave this as zero. */
 546                pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
 547        } else {
 548                tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
 549                *val = tr32(TG3PCI_MEM_WIN_DATA);
 550
 551                /* Always leave this as zero. */
 552                tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
 553        }
 554        spin_unlock_irqrestore(&tp->indirect_lock, flags);
 555}
 556
 557static void tg3_ape_lock_init(struct tg3 *tp)
 558{
 559        int i;
 560
 561        /* Make sure the driver hasn't any stale locks. */
 562        for (i = 0; i < 8; i++)
 563                tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
 564                                APE_LOCK_GRANT_DRIVER);
 565}
 566
 567static int tg3_ape_lock(struct tg3 *tp, int locknum)
 568{
 569        int i, off;
 570        int ret = 0;
 571        u32 status;
 572
 573        if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 574                return 0;
 575
 576        switch (locknum) {
 577                case TG3_APE_LOCK_GRC:
 578                case TG3_APE_LOCK_MEM:
 579                        break;
 580                default:
 581                        return -EINVAL;
 582        }
 583
 584        off = 4 * locknum;
 585
 586        tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
 587
 588        /* Wait for up to 1 millisecond to acquire lock. */
 589        for (i = 0; i < 100; i++) {
 590                status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
 591                if (status == APE_LOCK_GRANT_DRIVER)
 592                        break;
 593                udelay(10);
 594        }
 595
 596        if (status != APE_LOCK_GRANT_DRIVER) {
 597                /* Revoke the lock request. */
 598                tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
 599                                APE_LOCK_GRANT_DRIVER);
 600
 601                ret = -EBUSY;
 602        }
 603
 604        return ret;
 605}
 606
 607static void tg3_ape_unlock(struct tg3 *tp, int locknum)
 608{
 609        int off;
 610
 611        if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
 612                return;
 613
 614        switch (locknum) {
 615                case TG3_APE_LOCK_GRC:
 616                case TG3_APE_LOCK_MEM:
 617                        break;
 618                default:
 619                        return;
 620        }
 621
 622        off = 4 * locknum;
 623        tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
 624}
 625
 626static void tg3_disable_ints(struct tg3 *tp)
 627{
 628        int i;
 629
 630        tw32(TG3PCI_MISC_HOST_CTRL,
 631             (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
 632        for (i = 0; i < tp->irq_max; i++)
 633                tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
 634}
 635
 636static void tg3_enable_ints(struct tg3 *tp)
 637{
 638        int i;
 639        u32 coal_now = 0;
 640
 641        tp->irq_sync = 0;
 642        wmb();
 643
 644        tw32(TG3PCI_MISC_HOST_CTRL,
 645             (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
 646
 647        for (i = 0; i < tp->irq_cnt; i++) {
 648                struct tg3_napi *tnapi = &tp->napi[i];
 649                tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 650                if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
 651                        tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
 652
 653                coal_now |= tnapi->coal_now;
 654        }
 655
 656        /* Force an initial interrupt */
 657        if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
 658            (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
 659                tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
 660        else
 661                tw32(HOSTCC_MODE, tp->coalesce_mode |
 662                     HOSTCC_MODE_ENABLE | coal_now);
 663}
 664
 665static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
 666{
 667        struct tg3 *tp = tnapi->tp;
 668        struct tg3_hw_status *sblk = tnapi->hw_status;
 669        unsigned int work_exists = 0;
 670
 671        /* check for phy events */
 672        if (!(tp->tg3_flags &
 673              (TG3_FLAG_USE_LINKCHG_REG |
 674               TG3_FLAG_POLL_SERDES))) {
 675                if (sblk->status & SD_STATUS_LINK_CHG)
 676                        work_exists = 1;
 677        }
 678        /* check for RX/TX work to do */
 679        if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
 680            *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
 681                work_exists = 1;
 682
 683        return work_exists;
 684}
 685
 686/* tg3_int_reenable
 687 *  similar to tg3_enable_ints, but it accurately determines whether there
 688 *  is new work pending and can return without flushing the PIO write
 689 *  which reenables interrupts
 690 */
 691static void tg3_int_reenable(struct tg3_napi *tnapi)
 692{
 693        struct tg3 *tp = tnapi->tp;
 694
 695        tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
 696        mmiowb();
 697
 698        /* When doing tagged status, this work check is unnecessary.
 699         * The last_tag we write above tells the chip which piece of
 700         * work we've completed.
 701         */
 702        if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
 703            tg3_has_work(tnapi))
 704                tw32(HOSTCC_MODE, tp->coalesce_mode |
 705                     HOSTCC_MODE_ENABLE | tnapi->coal_now);
 706}
 707
 708static void tg3_napi_disable(struct tg3 *tp)
 709{
 710        int i;
 711
 712        for (i = tp->irq_cnt - 1; i >= 0; i--)
 713                napi_disable(&tp->napi[i].napi);
 714}
 715
 716static void tg3_napi_enable(struct tg3 *tp)
 717{
 718        int i;
 719
 720        for (i = 0; i < tp->irq_cnt; i++)
 721                napi_enable(&tp->napi[i].napi);
 722}
 723
 724static inline void tg3_netif_stop(struct tg3 *tp)
 725{
 726        tp->dev->trans_start = jiffies; /* prevent tx timeout */
 727        tg3_napi_disable(tp);
 728        netif_tx_disable(tp->dev);
 729}
 730
 731static inline void tg3_netif_start(struct tg3 *tp)
 732{
 733        /* NOTE: unconditional netif_tx_wake_all_queues is only
 734         * appropriate so long as all callers are assured to
 735         * have free tx slots (such as after tg3_init_hw)
 736         */
 737        netif_tx_wake_all_queues(tp->dev);
 738
 739        tg3_napi_enable(tp);
 740        tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
 741        tg3_enable_ints(tp);
 742}
 743
 744static void tg3_switch_clocks(struct tg3 *tp)
 745{
 746        u32 clock_ctrl;
 747        u32 orig_clock_ctrl;
 748
 749        if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
 750            (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
 751                return;
 752
 753        clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
 754
 755        orig_clock_ctrl = clock_ctrl;
 756        clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
 757                       CLOCK_CTRL_CLKRUN_OENABLE |
 758                       0x1f);
 759        tp->pci_clock_ctrl = clock_ctrl;
 760
 761        if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
 762                if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
 763                        tw32_wait_f(TG3PCI_CLOCK_CTRL,
 764                                    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
 765                }
 766        } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
 767                tw32_wait_f(TG3PCI_CLOCK_CTRL,
 768                            clock_ctrl |
 769                            (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
 770                            40);
 771                tw32_wait_f(TG3PCI_CLOCK_CTRL,
 772                            clock_ctrl | (CLOCK_CTRL_ALTCLK),
 773                            40);
 774        }
 775        tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
 776}
 777
 778#define PHY_BUSY_LOOPS  5000
 779
 780static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
 781{
 782        u32 frame_val;
 783        unsigned int loops;
 784        int ret;
 785
 786        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 787                tw32_f(MAC_MI_MODE,
 788                     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 789                udelay(80);
 790        }
 791
 792        *val = 0x0;
 793
 794        frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 795                      MI_COM_PHY_ADDR_MASK);
 796        frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 797                      MI_COM_REG_ADDR_MASK);
 798        frame_val |= (MI_COM_CMD_READ | MI_COM_START);
 799
 800        tw32_f(MAC_MI_COM, frame_val);
 801
 802        loops = PHY_BUSY_LOOPS;
 803        while (loops != 0) {
 804                udelay(10);
 805                frame_val = tr32(MAC_MI_COM);
 806
 807                if ((frame_val & MI_COM_BUSY) == 0) {
 808                        udelay(5);
 809                        frame_val = tr32(MAC_MI_COM);
 810                        break;
 811                }
 812                loops -= 1;
 813        }
 814
 815        ret = -EBUSY;
 816        if (loops != 0) {
 817                *val = frame_val & MI_COM_DATA_MASK;
 818                ret = 0;
 819        }
 820
 821        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 822                tw32_f(MAC_MI_MODE, tp->mi_mode);
 823                udelay(80);
 824        }
 825
 826        return ret;
 827}
 828
 829static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
 830{
 831        u32 frame_val;
 832        unsigned int loops;
 833        int ret;
 834
 835        if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
 836            (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
 837                return 0;
 838
 839        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 840                tw32_f(MAC_MI_MODE,
 841                     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
 842                udelay(80);
 843        }
 844
 845        frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
 846                      MI_COM_PHY_ADDR_MASK);
 847        frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
 848                      MI_COM_REG_ADDR_MASK);
 849        frame_val |= (val & MI_COM_DATA_MASK);
 850        frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
 851
 852        tw32_f(MAC_MI_COM, frame_val);
 853
 854        loops = PHY_BUSY_LOOPS;
 855        while (loops != 0) {
 856                udelay(10);
 857                frame_val = tr32(MAC_MI_COM);
 858                if ((frame_val & MI_COM_BUSY) == 0) {
 859                        udelay(5);
 860                        frame_val = tr32(MAC_MI_COM);
 861                        break;
 862                }
 863                loops -= 1;
 864        }
 865
 866        ret = -EBUSY;
 867        if (loops != 0)
 868                ret = 0;
 869
 870        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
 871                tw32_f(MAC_MI_MODE, tp->mi_mode);
 872                udelay(80);
 873        }
 874
 875        return ret;
 876}
 877
 878static int tg3_bmcr_reset(struct tg3 *tp)
 879{
 880        u32 phy_control;
 881        int limit, err;
 882
 883        /* OK, reset it, and poll the BMCR_RESET bit until it
 884         * clears or we time out.
 885         */
 886        phy_control = BMCR_RESET;
 887        err = tg3_writephy(tp, MII_BMCR, phy_control);
 888        if (err != 0)
 889                return -EBUSY;
 890
 891        limit = 5000;
 892        while (limit--) {
 893                err = tg3_readphy(tp, MII_BMCR, &phy_control);
 894                if (err != 0)
 895                        return -EBUSY;
 896
 897                if ((phy_control & BMCR_RESET) == 0) {
 898                        udelay(40);
 899                        break;
 900                }
 901                udelay(10);
 902        }
 903        if (limit < 0)
 904                return -EBUSY;
 905
 906        return 0;
 907}
 908
 909static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 910{
 911        struct tg3 *tp = bp->priv;
 912        u32 val;
 913
 914        spin_lock_bh(&tp->lock);
 915
 916        if (tg3_readphy(tp, reg, &val))
 917                val = -EIO;
 918
 919        spin_unlock_bh(&tp->lock);
 920
 921        return val;
 922}
 923
 924static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 925{
 926        struct tg3 *tp = bp->priv;
 927        u32 ret = 0;
 928
 929        spin_lock_bh(&tp->lock);
 930
 931        if (tg3_writephy(tp, reg, val))
 932                ret = -EIO;
 933
 934        spin_unlock_bh(&tp->lock);
 935
 936        return ret;
 937}
 938
 939static int tg3_mdio_reset(struct mii_bus *bp)
 940{
 941        return 0;
 942}
 943
 944static void tg3_mdio_config_5785(struct tg3 *tp)
 945{
 946        u32 val;
 947        struct phy_device *phydev;
 948
 949        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
 950        switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
 951        case TG3_PHY_ID_BCM50610:
 952        case TG3_PHY_ID_BCM50610M:
 953                val = MAC_PHYCFG2_50610_LED_MODES;
 954                break;
 955        case TG3_PHY_ID_BCMAC131:
 956                val = MAC_PHYCFG2_AC131_LED_MODES;
 957                break;
 958        case TG3_PHY_ID_RTL8211C:
 959                val = MAC_PHYCFG2_RTL8211C_LED_MODES;
 960                break;
 961        case TG3_PHY_ID_RTL8201E:
 962                val = MAC_PHYCFG2_RTL8201E_LED_MODES;
 963                break;
 964        default:
 965                return;
 966        }
 967
 968        if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
 969                tw32(MAC_PHYCFG2, val);
 970
 971                val = tr32(MAC_PHYCFG1);
 972                val &= ~(MAC_PHYCFG1_RGMII_INT |
 973                         MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
 974                val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
 975                tw32(MAC_PHYCFG1, val);
 976
 977                return;
 978        }
 979
 980        if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
 981                val |= MAC_PHYCFG2_EMODE_MASK_MASK |
 982                       MAC_PHYCFG2_FMODE_MASK_MASK |
 983                       MAC_PHYCFG2_GMODE_MASK_MASK |
 984                       MAC_PHYCFG2_ACT_MASK_MASK   |
 985                       MAC_PHYCFG2_QUAL_MASK_MASK |
 986                       MAC_PHYCFG2_INBAND_ENABLE;
 987
 988        tw32(MAC_PHYCFG2, val);
 989
 990        val = tr32(MAC_PHYCFG1);
 991        val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
 992                 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
 993        if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
 994                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
 995                        val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
 996                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
 997                        val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
 998        }
 999        val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1000               MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1001        tw32(MAC_PHYCFG1, val);
1002
1003        val = tr32(MAC_EXT_RGMII_MODE);
1004        val &= ~(MAC_RGMII_MODE_RX_INT_B |
1005                 MAC_RGMII_MODE_RX_QUALITY |
1006                 MAC_RGMII_MODE_RX_ACTIVITY |
1007                 MAC_RGMII_MODE_RX_ENG_DET |
1008                 MAC_RGMII_MODE_TX_ENABLE |
1009                 MAC_RGMII_MODE_TX_LOWPWR |
1010                 MAC_RGMII_MODE_TX_RESET);
1011        if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
1012                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1013                        val |= MAC_RGMII_MODE_RX_INT_B |
1014                               MAC_RGMII_MODE_RX_QUALITY |
1015                               MAC_RGMII_MODE_RX_ACTIVITY |
1016                               MAC_RGMII_MODE_RX_ENG_DET;
1017                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1018                        val |= MAC_RGMII_MODE_TX_ENABLE |
1019                               MAC_RGMII_MODE_TX_LOWPWR |
1020                               MAC_RGMII_MODE_TX_RESET;
1021        }
1022        tw32(MAC_EXT_RGMII_MODE, val);
1023}
1024
1025static void tg3_mdio_start(struct tg3 *tp)
1026{
1027        tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1028        tw32_f(MAC_MI_MODE, tp->mi_mode);
1029        udelay(80);
1030
1031        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
1032                u32 funcnum, is_serdes;
1033
1034                funcnum = tr32(TG3_CPMU_STATUS) & TG3_CPMU_STATUS_PCIE_FUNC;
1035                if (funcnum)
1036                        tp->phy_addr = 2;
1037                else
1038                        tp->phy_addr = 1;
1039
1040                if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1041                        is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1042                else
1043                        is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1044                                    TG3_CPMU_PHY_STRAP_IS_SERDES;
1045                if (is_serdes)
1046                        tp->phy_addr += 7;
1047        } else
1048                tp->phy_addr = TG3_PHY_MII_ADDR;
1049
1050        if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1051            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1052                tg3_mdio_config_5785(tp);
1053}
1054
1055static int tg3_mdio_init(struct tg3 *tp)
1056{
1057        int i;
1058        u32 reg;
1059        struct phy_device *phydev;
1060
1061        tg3_mdio_start(tp);
1062
1063        if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1064            (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1065                return 0;
1066
1067        tp->mdio_bus = mdiobus_alloc();
1068        if (tp->mdio_bus == NULL)
1069                return -ENOMEM;
1070
1071        tp->mdio_bus->name     = "tg3 mdio bus";
1072        snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1073                 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1074        tp->mdio_bus->priv     = tp;
1075        tp->mdio_bus->parent   = &tp->pdev->dev;
1076        tp->mdio_bus->read     = &tg3_mdio_read;
1077        tp->mdio_bus->write    = &tg3_mdio_write;
1078        tp->mdio_bus->reset    = &tg3_mdio_reset;
1079        tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1080        tp->mdio_bus->irq      = &tp->mdio_irq[0];
1081
1082        for (i = 0; i < PHY_MAX_ADDR; i++)
1083                tp->mdio_bus->irq[i] = PHY_POLL;
1084
1085        /* The bus registration will look for all the PHYs on the mdio bus.
1086         * Unfortunately, it does not ensure the PHY is powered up before
1087         * accessing the PHY ID registers.  A chip reset is the
1088         * quickest way to bring the device back to an operational state..
1089         */
1090        if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1091                tg3_bmcr_reset(tp);
1092
1093        i = mdiobus_register(tp->mdio_bus);
1094        if (i) {
1095                printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1096                        tp->dev->name, i);
1097                mdiobus_free(tp->mdio_bus);
1098                return i;
1099        }
1100
1101        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1102
1103        if (!phydev || !phydev->drv) {
1104                printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1105                mdiobus_unregister(tp->mdio_bus);
1106                mdiobus_free(tp->mdio_bus);
1107                return -ENODEV;
1108        }
1109
1110        switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1111        case TG3_PHY_ID_BCM57780:
1112                phydev->interface = PHY_INTERFACE_MODE_GMII;
1113                phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114                break;
1115        case TG3_PHY_ID_BCM50610:
1116        case TG3_PHY_ID_BCM50610M:
1117                phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1118                                     PHY_BRCM_RX_REFCLK_UNUSED |
1119                                     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1120                                     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1121                if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1122                        phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1123                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1124                        phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1125                if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1126                        phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1127                /* fallthru */
1128        case TG3_PHY_ID_RTL8211C:
1129                phydev->interface = PHY_INTERFACE_MODE_RGMII;
1130                break;
1131        case TG3_PHY_ID_RTL8201E:
1132        case TG3_PHY_ID_BCMAC131:
1133                phydev->interface = PHY_INTERFACE_MODE_MII;
1134                phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1135                tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1136                break;
1137        }
1138
1139        tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1140
1141        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1142                tg3_mdio_config_5785(tp);
1143
1144        return 0;
1145}
1146
1147static void tg3_mdio_fini(struct tg3 *tp)
1148{
1149        if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1150                tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1151                mdiobus_unregister(tp->mdio_bus);
1152                mdiobus_free(tp->mdio_bus);
1153        }
1154}
1155
1156/* tp->lock is held. */
1157static inline void tg3_generate_fw_event(struct tg3 *tp)
1158{
1159        u32 val;
1160
1161        val = tr32(GRC_RX_CPU_EVENT);
1162        val |= GRC_RX_CPU_DRIVER_EVENT;
1163        tw32_f(GRC_RX_CPU_EVENT, val);
1164
1165        tp->last_event_jiffies = jiffies;
1166}
1167
1168#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1169
1170/* tp->lock is held. */
1171static void tg3_wait_for_event_ack(struct tg3 *tp)
1172{
1173        int i;
1174        unsigned int delay_cnt;
1175        long time_remain;
1176
1177        /* If enough time has passed, no wait is necessary. */
1178        time_remain = (long)(tp->last_event_jiffies + 1 +
1179                      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1180                      (long)jiffies;
1181        if (time_remain < 0)
1182                return;
1183
1184        /* Check if we can shorten the wait time. */
1185        delay_cnt = jiffies_to_usecs(time_remain);
1186        if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1187                delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1188        delay_cnt = (delay_cnt >> 3) + 1;
1189
1190        for (i = 0; i < delay_cnt; i++) {
1191                if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1192                        break;
1193                udelay(8);
1194        }
1195}
1196
1197/* tp->lock is held. */
1198static void tg3_ump_link_report(struct tg3 *tp)
1199{
1200        u32 reg;
1201        u32 val;
1202
1203        if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1204            !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1205                return;
1206
1207        tg3_wait_for_event_ack(tp);
1208
1209        tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1210
1211        tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1212
1213        val = 0;
1214        if (!tg3_readphy(tp, MII_BMCR, &reg))
1215                val = reg << 16;
1216        if (!tg3_readphy(tp, MII_BMSR, &reg))
1217                val |= (reg & 0xffff);
1218        tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1219
1220        val = 0;
1221        if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1222                val = reg << 16;
1223        if (!tg3_readphy(tp, MII_LPA, &reg))
1224                val |= (reg & 0xffff);
1225        tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1226
1227        val = 0;
1228        if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1229                if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1230                        val = reg << 16;
1231                if (!tg3_readphy(tp, MII_STAT1000, &reg))
1232                        val |= (reg & 0xffff);
1233        }
1234        tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1235
1236        if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1237                val = reg << 16;
1238        else
1239                val = 0;
1240        tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1241
1242        tg3_generate_fw_event(tp);
1243}
1244
1245static void tg3_link_report(struct tg3 *tp)
1246{
1247        if (!netif_carrier_ok(tp->dev)) {
1248                if (netif_msg_link(tp))
1249                        printk(KERN_INFO PFX "%s: Link is down.\n",
1250                               tp->dev->name);
1251                tg3_ump_link_report(tp);
1252        } else if (netif_msg_link(tp)) {
1253                printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1254                       tp->dev->name,
1255                       (tp->link_config.active_speed == SPEED_1000 ?
1256                        1000 :
1257                        (tp->link_config.active_speed == SPEED_100 ?
1258                         100 : 10)),
1259                       (tp->link_config.active_duplex == DUPLEX_FULL ?
1260                        "full" : "half"));
1261
1262                printk(KERN_INFO PFX
1263                       "%s: Flow control is %s for TX and %s for RX.\n",
1264                       tp->dev->name,
1265                       (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1266                       "on" : "off",
1267                       (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1268                       "on" : "off");
1269                tg3_ump_link_report(tp);
1270        }
1271}
1272
1273static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1274{
1275        u16 miireg;
1276
1277        if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1278                miireg = ADVERTISE_PAUSE_CAP;
1279        else if (flow_ctrl & FLOW_CTRL_TX)
1280                miireg = ADVERTISE_PAUSE_ASYM;
1281        else if (flow_ctrl & FLOW_CTRL_RX)
1282                miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1283        else
1284                miireg = 0;
1285
1286        return miireg;
1287}
1288
1289static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1290{
1291        u16 miireg;
1292
1293        if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1294                miireg = ADVERTISE_1000XPAUSE;
1295        else if (flow_ctrl & FLOW_CTRL_TX)
1296                miireg = ADVERTISE_1000XPSE_ASYM;
1297        else if (flow_ctrl & FLOW_CTRL_RX)
1298                miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1299        else
1300                miireg = 0;
1301
1302        return miireg;
1303}
1304
1305static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1306{
1307        u8 cap = 0;
1308
1309        if (lcladv & ADVERTISE_1000XPAUSE) {
1310                if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1311                        if (rmtadv & LPA_1000XPAUSE)
1312                                cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1313                        else if (rmtadv & LPA_1000XPAUSE_ASYM)
1314                                cap = FLOW_CTRL_RX;
1315                } else {
1316                        if (rmtadv & LPA_1000XPAUSE)
1317                                cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1318                }
1319        } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1320                if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1321                        cap = FLOW_CTRL_TX;
1322        }
1323
1324        return cap;
1325}
1326
1327static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1328{
1329        u8 autoneg;
1330        u8 flowctrl = 0;
1331        u32 old_rx_mode = tp->rx_mode;
1332        u32 old_tx_mode = tp->tx_mode;
1333
1334        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1335                autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1336        else
1337                autoneg = tp->link_config.autoneg;
1338
1339        if (autoneg == AUTONEG_ENABLE &&
1340            (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1341                if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1342                        flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1343                else
1344                        flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1345        } else
1346                flowctrl = tp->link_config.flowctrl;
1347
1348        tp->link_config.active_flowctrl = flowctrl;
1349
1350        if (flowctrl & FLOW_CTRL_RX)
1351                tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1352        else
1353                tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1354
1355        if (old_rx_mode != tp->rx_mode)
1356                tw32_f(MAC_RX_MODE, tp->rx_mode);
1357
1358        if (flowctrl & FLOW_CTRL_TX)
1359                tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1360        else
1361                tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1362
1363        if (old_tx_mode != tp->tx_mode)
1364                tw32_f(MAC_TX_MODE, tp->tx_mode);
1365}
1366
1367static void tg3_adjust_link(struct net_device *dev)
1368{
1369        u8 oldflowctrl, linkmesg = 0;
1370        u32 mac_mode, lcl_adv, rmt_adv;
1371        struct tg3 *tp = netdev_priv(dev);
1372        struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1373
1374        spin_lock_bh(&tp->lock);
1375
1376        mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1377                                    MAC_MODE_HALF_DUPLEX);
1378
1379        oldflowctrl = tp->link_config.active_flowctrl;
1380
1381        if (phydev->link) {
1382                lcl_adv = 0;
1383                rmt_adv = 0;
1384
1385                if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1386                        mac_mode |= MAC_MODE_PORT_MODE_MII;
1387                else if (phydev->speed == SPEED_1000 ||
1388                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1389                        mac_mode |= MAC_MODE_PORT_MODE_GMII;
1390                else
1391                        mac_mode |= MAC_MODE_PORT_MODE_MII;
1392
1393                if (phydev->duplex == DUPLEX_HALF)
1394                        mac_mode |= MAC_MODE_HALF_DUPLEX;
1395                else {
1396                        lcl_adv = tg3_advert_flowctrl_1000T(
1397                                  tp->link_config.flowctrl);
1398
1399                        if (phydev->pause)
1400                                rmt_adv = LPA_PAUSE_CAP;
1401                        if (phydev->asym_pause)
1402                                rmt_adv |= LPA_PAUSE_ASYM;
1403                }
1404
1405                tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1406        } else
1407                mac_mode |= MAC_MODE_PORT_MODE_GMII;
1408
1409        if (mac_mode != tp->mac_mode) {
1410                tp->mac_mode = mac_mode;
1411                tw32_f(MAC_MODE, tp->mac_mode);
1412                udelay(40);
1413        }
1414
1415        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1416                if (phydev->speed == SPEED_10)
1417                        tw32(MAC_MI_STAT,
1418                             MAC_MI_STAT_10MBPS_MODE |
1419                             MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1420                else
1421                        tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1422        }
1423
1424        if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1425                tw32(MAC_TX_LENGTHS,
1426                     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1427                      (6 << TX_LENGTHS_IPG_SHIFT) |
1428                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1429        else
1430                tw32(MAC_TX_LENGTHS,
1431                     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1432                      (6 << TX_LENGTHS_IPG_SHIFT) |
1433                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1434
1435        if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1436            (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1437            phydev->speed != tp->link_config.active_speed ||
1438            phydev->duplex != tp->link_config.active_duplex ||
1439            oldflowctrl != tp->link_config.active_flowctrl)
1440            linkmesg = 1;
1441
1442        tp->link_config.active_speed = phydev->speed;
1443        tp->link_config.active_duplex = phydev->duplex;
1444
1445        spin_unlock_bh(&tp->lock);
1446
1447        if (linkmesg)
1448                tg3_link_report(tp);
1449}
1450
1451static int tg3_phy_init(struct tg3 *tp)
1452{
1453        struct phy_device *phydev;
1454
1455        if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1456                return 0;
1457
1458        /* Bring the PHY back to a known state. */
1459        tg3_bmcr_reset(tp);
1460
1461        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1462
1463        /* Attach the MAC to the PHY. */
1464        phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465                             phydev->dev_flags, phydev->interface);
1466        if (IS_ERR(phydev)) {
1467                printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1468                return PTR_ERR(phydev);
1469        }
1470
1471        /* Mask with MAC supported features. */
1472        switch (phydev->interface) {
1473        case PHY_INTERFACE_MODE_GMII:
1474        case PHY_INTERFACE_MODE_RGMII:
1475                if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1476                        phydev->supported &= (PHY_GBIT_FEATURES |
1477                                              SUPPORTED_Pause |
1478                                              SUPPORTED_Asym_Pause);
1479                        break;
1480                }
1481                /* fallthru */
1482        case PHY_INTERFACE_MODE_MII:
1483                phydev->supported &= (PHY_BASIC_FEATURES |
1484                                      SUPPORTED_Pause |
1485                                      SUPPORTED_Asym_Pause);
1486                break;
1487        default:
1488                phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1489                return -EINVAL;
1490        }
1491
1492        tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1493
1494        phydev->advertising = phydev->supported;
1495
1496        return 0;
1497}
1498
1499static void tg3_phy_start(struct tg3 *tp)
1500{
1501        struct phy_device *phydev;
1502
1503        if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1504                return;
1505
1506        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1507
1508        if (tp->link_config.phy_is_low_power) {
1509                tp->link_config.phy_is_low_power = 0;
1510                phydev->speed = tp->link_config.orig_speed;
1511                phydev->duplex = tp->link_config.orig_duplex;
1512                phydev->autoneg = tp->link_config.orig_autoneg;
1513                phydev->advertising = tp->link_config.orig_advertising;
1514        }
1515
1516        phy_start(phydev);
1517
1518        phy_start_aneg(phydev);
1519}
1520
1521static void tg3_phy_stop(struct tg3 *tp)
1522{
1523        if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1524                return;
1525
1526        phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1527}
1528
1529static void tg3_phy_fini(struct tg3 *tp)
1530{
1531        if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1532                phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1533                tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1534        }
1535}
1536
1537static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1538{
1539        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1540        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1541}
1542
1543static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1544{
1545        u32 phytest;
1546
1547        if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1548                u32 phy;
1549
1550                tg3_writephy(tp, MII_TG3_FET_TEST,
1551                             phytest | MII_TG3_FET_SHADOW_EN);
1552                if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1553                        if (enable)
1554                                phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1555                        else
1556                                phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1557                        tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1558                }
1559                tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1560        }
1561}
1562
1563static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1564{
1565        u32 reg;
1566
1567        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1568                return;
1569
1570        if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1571                tg3_phy_fet_toggle_apd(tp, enable);
1572                return;
1573        }
1574
1575        reg = MII_TG3_MISC_SHDW_WREN |
1576              MII_TG3_MISC_SHDW_SCR5_SEL |
1577              MII_TG3_MISC_SHDW_SCR5_LPED |
1578              MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1579              MII_TG3_MISC_SHDW_SCR5_SDTL |
1580              MII_TG3_MISC_SHDW_SCR5_C125OE;
1581        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1582                reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1583
1584        tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1585
1586
1587        reg = MII_TG3_MISC_SHDW_WREN |
1588              MII_TG3_MISC_SHDW_APD_SEL |
1589              MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1590        if (enable)
1591                reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1592
1593        tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1594}
1595
1596static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1597{
1598        u32 phy;
1599
1600        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1601            (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1602                return;
1603
1604        if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1605                u32 ephy;
1606
1607                if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1608                        u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1609
1610                        tg3_writephy(tp, MII_TG3_FET_TEST,
1611                                     ephy | MII_TG3_FET_SHADOW_EN);
1612                        if (!tg3_readphy(tp, reg, &phy)) {
1613                                if (enable)
1614                                        phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1615                                else
1616                                        phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1617                                tg3_writephy(tp, reg, phy);
1618                        }
1619                        tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1620                }
1621        } else {
1622                phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1623                      MII_TG3_AUXCTL_SHDWSEL_MISC;
1624                if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1625                    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1626                        if (enable)
1627                                phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1628                        else
1629                                phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1630                        phy |= MII_TG3_AUXCTL_MISC_WREN;
1631                        tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1632                }
1633        }
1634}
1635
1636static void tg3_phy_set_wirespeed(struct tg3 *tp)
1637{
1638        u32 val;
1639
1640        if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1641                return;
1642
1643        if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1644            !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1645                tg3_writephy(tp, MII_TG3_AUX_CTRL,
1646                             (val | (1 << 15) | (1 << 4)));
1647}
1648
1649static void tg3_phy_apply_otp(struct tg3 *tp)
1650{
1651        u32 otp, phy;
1652
1653        if (!tp->phy_otp)
1654                return;
1655
1656        otp = tp->phy_otp;
1657
1658        /* Enable SM_DSP clock and tx 6dB coding. */
1659        phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1660              MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1661              MII_TG3_AUXCTL_ACTL_TX_6DB;
1662        tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1663
1664        phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1665        phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1666        tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1667
1668        phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1669              ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1670        tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1671
1672        phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1673        phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1674        tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1675
1676        phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1677        tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1678
1679        phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1680        tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1681
1682        phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1683              ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1684        tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1685
1686        /* Turn off SM_DSP clock. */
1687        phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1688              MII_TG3_AUXCTL_ACTL_TX_6DB;
1689        tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1690}
1691
1692static int tg3_wait_macro_done(struct tg3 *tp)
1693{
1694        int limit = 100;
1695
1696        while (limit--) {
1697                u32 tmp32;
1698
1699                if (!tg3_readphy(tp, 0x16, &tmp32)) {
1700                        if ((tmp32 & 0x1000) == 0)
1701                                break;
1702                }
1703        }
1704        if (limit < 0)
1705                return -EBUSY;
1706
1707        return 0;
1708}
1709
1710static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1711{
1712        static const u32 test_pat[4][6] = {
1713        { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1714        { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1715        { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1716        { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1717        };
1718        int chan;
1719
1720        for (chan = 0; chan < 4; chan++) {
1721                int i;
1722
1723                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1724                             (chan * 0x2000) | 0x0200);
1725                tg3_writephy(tp, 0x16, 0x0002);
1726
1727                for (i = 0; i < 6; i++)
1728                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1729                                     test_pat[chan][i]);
1730
1731                tg3_writephy(tp, 0x16, 0x0202);
1732                if (tg3_wait_macro_done(tp)) {
1733                        *resetp = 1;
1734                        return -EBUSY;
1735                }
1736
1737                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1738                             (chan * 0x2000) | 0x0200);
1739                tg3_writephy(tp, 0x16, 0x0082);
1740                if (tg3_wait_macro_done(tp)) {
1741                        *resetp = 1;
1742                        return -EBUSY;
1743                }
1744
1745                tg3_writephy(tp, 0x16, 0x0802);
1746                if (tg3_wait_macro_done(tp)) {
1747                        *resetp = 1;
1748                        return -EBUSY;
1749                }
1750
1751                for (i = 0; i < 6; i += 2) {
1752                        u32 low, high;
1753
1754                        if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1755                            tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1756                            tg3_wait_macro_done(tp)) {
1757                                *resetp = 1;
1758                                return -EBUSY;
1759                        }
1760                        low &= 0x7fff;
1761                        high &= 0x000f;
1762                        if (low != test_pat[chan][i] ||
1763                            high != test_pat[chan][i+1]) {
1764                                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1765                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1766                                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1767
1768                                return -EBUSY;
1769                        }
1770                }
1771        }
1772
1773        return 0;
1774}
1775
1776static int tg3_phy_reset_chanpat(struct tg3 *tp)
1777{
1778        int chan;
1779
1780        for (chan = 0; chan < 4; chan++) {
1781                int i;
1782
1783                tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1784                             (chan * 0x2000) | 0x0200);
1785                tg3_writephy(tp, 0x16, 0x0002);
1786                for (i = 0; i < 6; i++)
1787                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1788                tg3_writephy(tp, 0x16, 0x0202);
1789                if (tg3_wait_macro_done(tp))
1790                        return -EBUSY;
1791        }
1792
1793        return 0;
1794}
1795
1796static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1797{
1798        u32 reg32, phy9_orig;
1799        int retries, do_phy_reset, err;
1800
1801        retries = 10;
1802        do_phy_reset = 1;
1803        do {
1804                if (do_phy_reset) {
1805                        err = tg3_bmcr_reset(tp);
1806                        if (err)
1807                                return err;
1808                        do_phy_reset = 0;
1809                }
1810
1811                /* Disable transmitter and interrupt.  */
1812                if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1813                        continue;
1814
1815                reg32 |= 0x3000;
1816                tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1817
1818                /* Set full-duplex, 1000 mbps.  */
1819                tg3_writephy(tp, MII_BMCR,
1820                             BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1821
1822                /* Set to master mode.  */
1823                if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1824                        continue;
1825
1826                tg3_writephy(tp, MII_TG3_CTRL,
1827                             (MII_TG3_CTRL_AS_MASTER |
1828                              MII_TG3_CTRL_ENABLE_AS_MASTER));
1829
1830                /* Enable SM_DSP_CLOCK and 6dB.  */
1831                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1832
1833                /* Block the PHY control access.  */
1834                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1835                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1836
1837                err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1838                if (!err)
1839                        break;
1840        } while (--retries);
1841
1842        err = tg3_phy_reset_chanpat(tp);
1843        if (err)
1844                return err;
1845
1846        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1847        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1848
1849        tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1850        tg3_writephy(tp, 0x16, 0x0000);
1851
1852        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1853            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1854                /* Set Extended packet length bit for jumbo frames */
1855                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1856        }
1857        else {
1858                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1859        }
1860
1861        tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1862
1863        if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1864                reg32 &= ~0x3000;
1865                tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1866        } else if (!err)
1867                err = -EBUSY;
1868
1869        return err;
1870}
1871
1872/* This will reset the tigon3 PHY if there is no valid
1873 * link unless the FORCE argument is non-zero.
1874 */
1875static int tg3_phy_reset(struct tg3 *tp)
1876{
1877        u32 cpmuctrl;
1878        u32 phy_status;
1879        int err;
1880
1881        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1882                u32 val;
1883
1884                val = tr32(GRC_MISC_CFG);
1885                tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1886                udelay(40);
1887        }
1888        err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1889        err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1890        if (err != 0)
1891                return -EBUSY;
1892
1893        if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1894                netif_carrier_off(tp->dev);
1895                tg3_link_report(tp);
1896        }
1897
1898        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1899            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1900            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1901                err = tg3_phy_reset_5703_4_5(tp);
1902                if (err)
1903                        return err;
1904                goto out;
1905        }
1906
1907        cpmuctrl = 0;
1908        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1909            GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1910                cpmuctrl = tr32(TG3_CPMU_CTRL);
1911                if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1912                        tw32(TG3_CPMU_CTRL,
1913                             cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1914        }
1915
1916        err = tg3_bmcr_reset(tp);
1917        if (err)
1918                return err;
1919
1920        if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1921                u32 phy;
1922
1923                phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1924                tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1925
1926                tw32(TG3_CPMU_CTRL, cpmuctrl);
1927        }
1928
1929        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1930            GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1931                u32 val;
1932
1933                val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1934                if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1935                    CPMU_LSPD_1000MB_MACCLK_12_5) {
1936                        val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1937                        udelay(40);
1938                        tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1939                }
1940        }
1941
1942        tg3_phy_apply_otp(tp);
1943
1944        if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1945                tg3_phy_toggle_apd(tp, true);
1946        else
1947                tg3_phy_toggle_apd(tp, false);
1948
1949out:
1950        if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1951                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1952                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1953                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1954                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1955                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1956                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1957        }
1958        if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1959                tg3_writephy(tp, 0x1c, 0x8d68);
1960                tg3_writephy(tp, 0x1c, 0x8d68);
1961        }
1962        if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1963                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1964                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1965                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1966                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1967                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1968                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1969                tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1970                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1971        }
1972        else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1973                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1974                tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1975                if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1976                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1977                        tg3_writephy(tp, MII_TG3_TEST1,
1978                                     MII_TG3_TEST1_TRIM_EN | 0x4);
1979                } else
1980                        tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1981                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1982        }
1983        /* Set Extended packet length bit (bit 14) on all chips that */
1984        /* support jumbo frames */
1985        if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1986                /* Cannot do read-modify-write on 5401 */
1987                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1988        } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1989                u32 phy_reg;
1990
1991                /* Set bit 14 with read-modify-write to preserve other bits */
1992                if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1993                    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1994                        tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1995        }
1996
1997        /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1998         * jumbo frames transmission.
1999         */
2000        if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2001                u32 phy_reg;
2002
2003                if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2004                    tg3_writephy(tp, MII_TG3_EXT_CTRL,
2005                                 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2006        }
2007
2008        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2009                /* adjust output voltage */
2010                tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2011        }
2012
2013        tg3_phy_toggle_automdix(tp, 1);
2014        tg3_phy_set_wirespeed(tp);
2015        return 0;
2016}
2017
2018static void tg3_frob_aux_power(struct tg3 *tp)
2019{
2020        struct tg3 *tp_peer = tp;
2021
2022        if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
2023                return;
2024
2025        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2026            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2027            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2028                struct net_device *dev_peer;
2029
2030                dev_peer = pci_get_drvdata(tp->pdev_peer);
2031                /* remove_one() may have been run on the peer. */
2032                if (!dev_peer)
2033                        tp_peer = tp;
2034                else
2035                        tp_peer = netdev_priv(dev_peer);
2036        }
2037
2038        if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2039            (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2040            (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2041            (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2042                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2043                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2044                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2045                                    (GRC_LCLCTRL_GPIO_OE0 |
2046                                     GRC_LCLCTRL_GPIO_OE1 |
2047                                     GRC_LCLCTRL_GPIO_OE2 |
2048                                     GRC_LCLCTRL_GPIO_OUTPUT0 |
2049                                     GRC_LCLCTRL_GPIO_OUTPUT1),
2050                                    100);
2051                } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2052                           tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2053                        /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2054                        u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2055                                             GRC_LCLCTRL_GPIO_OE1 |
2056                                             GRC_LCLCTRL_GPIO_OE2 |
2057                                             GRC_LCLCTRL_GPIO_OUTPUT0 |
2058                                             GRC_LCLCTRL_GPIO_OUTPUT1 |
2059                                             tp->grc_local_ctrl;
2060                        tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2061
2062                        grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2063                        tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2064
2065                        grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2066                        tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2067                } else {
2068                        u32 no_gpio2;
2069                        u32 grc_local_ctrl = 0;
2070
2071                        if (tp_peer != tp &&
2072                            (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2073                                return;
2074
2075                        /* Workaround to prevent overdrawing Amps. */
2076                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2077                            ASIC_REV_5714) {
2078                                grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2079                                tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2080                                            grc_local_ctrl, 100);
2081                        }
2082
2083                        /* On 5753 and variants, GPIO2 cannot be used. */
2084                        no_gpio2 = tp->nic_sram_data_cfg &
2085                                    NIC_SRAM_DATA_CFG_NO_GPIO2;
2086
2087                        grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2088                                         GRC_LCLCTRL_GPIO_OE1 |
2089                                         GRC_LCLCTRL_GPIO_OE2 |
2090                                         GRC_LCLCTRL_GPIO_OUTPUT1 |
2091                                         GRC_LCLCTRL_GPIO_OUTPUT2;
2092                        if (no_gpio2) {
2093                                grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2094                                                    GRC_LCLCTRL_GPIO_OUTPUT2);
2095                        }
2096                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2097                                                    grc_local_ctrl, 100);
2098
2099                        grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2100
2101                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2102                                                    grc_local_ctrl, 100);
2103
2104                        if (!no_gpio2) {
2105                                grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2106                                tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2107                                            grc_local_ctrl, 100);
2108                        }
2109                }
2110        } else {
2111                if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2112                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2113                        if (tp_peer != tp &&
2114                            (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2115                                return;
2116
2117                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2118                                    (GRC_LCLCTRL_GPIO_OE1 |
2119                                     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2120
2121                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2122                                    GRC_LCLCTRL_GPIO_OE1, 100);
2123
2124                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2125                                    (GRC_LCLCTRL_GPIO_OE1 |
2126                                     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2127                }
2128        }
2129}
2130
2131static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2132{
2133        if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2134                return 1;
2135        else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2136                if (speed != SPEED_10)
2137                        return 1;
2138        } else if (speed == SPEED_10)
2139                return 1;
2140
2141        return 0;
2142}
2143
2144static int tg3_setup_phy(struct tg3 *, int);
2145
2146#define RESET_KIND_SHUTDOWN     0
2147#define RESET_KIND_INIT         1
2148#define RESET_KIND_SUSPEND      2
2149
2150static void tg3_write_sig_post_reset(struct tg3 *, int);
2151static int tg3_halt_cpu(struct tg3 *, u32);
2152
2153static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2154{
2155        u32 val;
2156
2157        if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2158                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2159                        u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2160                        u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2161
2162                        sg_dig_ctrl |=
2163                                SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2164                        tw32(SG_DIG_CTRL, sg_dig_ctrl);
2165                        tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2166                }
2167                return;
2168        }
2169
2170        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2171                tg3_bmcr_reset(tp);
2172                val = tr32(GRC_MISC_CFG);
2173                tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2174                udelay(40);
2175                return;
2176        } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2177                u32 phytest;
2178                if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2179                        u32 phy;
2180
2181                        tg3_writephy(tp, MII_ADVERTISE, 0);
2182                        tg3_writephy(tp, MII_BMCR,
2183                                     BMCR_ANENABLE | BMCR_ANRESTART);
2184
2185                        tg3_writephy(tp, MII_TG3_FET_TEST,
2186                                     phytest | MII_TG3_FET_SHADOW_EN);
2187                        if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2188                                phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2189                                tg3_writephy(tp,
2190                                             MII_TG3_FET_SHDW_AUXMODE4,
2191                                             phy);
2192                        }
2193                        tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2194                }
2195                return;
2196        } else if (do_low_power) {
2197                tg3_writephy(tp, MII_TG3_EXT_CTRL,
2198                             MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2199
2200                tg3_writephy(tp, MII_TG3_AUX_CTRL,
2201                             MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2202                             MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2203                             MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2204                             MII_TG3_AUXCTL_PCTL_VREG_11V);
2205        }
2206
2207        /* The PHY should not be powered down on some chips because
2208         * of bugs.
2209         */
2210        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2212            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2213             (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2214                return;
2215
2216        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2217            GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2218                val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2219                val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2220                val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2221                tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2222        }
2223
2224        tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2225}
2226
2227/* tp->lock is held. */
2228static int tg3_nvram_lock(struct tg3 *tp)
2229{
2230        if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2231                int i;
2232
2233                if (tp->nvram_lock_cnt == 0) {
2234                        tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2235                        for (i = 0; i < 8000; i++) {
2236                                if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2237                                        break;
2238                                udelay(20);
2239                        }
2240                        if (i == 8000) {
2241                                tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2242                                return -ENODEV;
2243                        }
2244                }
2245                tp->nvram_lock_cnt++;
2246        }
2247        return 0;
2248}
2249
2250/* tp->lock is held. */
2251static void tg3_nvram_unlock(struct tg3 *tp)
2252{
2253        if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2254                if (tp->nvram_lock_cnt > 0)
2255                        tp->nvram_lock_cnt--;
2256                if (tp->nvram_lock_cnt == 0)
2257                        tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2258        }
2259}
2260
2261/* tp->lock is held. */
2262static void tg3_enable_nvram_access(struct tg3 *tp)
2263{
2264        if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2265            !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2266                u32 nvaccess = tr32(NVRAM_ACCESS);
2267
2268                tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2269        }
2270}
2271
2272/* tp->lock is held. */
2273static void tg3_disable_nvram_access(struct tg3 *tp)
2274{
2275        if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2276            !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2277                u32 nvaccess = tr32(NVRAM_ACCESS);
2278
2279                tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2280        }
2281}
2282
2283static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2284                                        u32 offset, u32 *val)
2285{
2286        u32 tmp;
2287        int i;
2288
2289        if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2290                return -EINVAL;
2291
2292        tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2293                                        EEPROM_ADDR_DEVID_MASK |
2294                                        EEPROM_ADDR_READ);
2295        tw32(GRC_EEPROM_ADDR,
2296             tmp |
2297             (0 << EEPROM_ADDR_DEVID_SHIFT) |
2298             ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2299              EEPROM_ADDR_ADDR_MASK) |
2300             EEPROM_ADDR_READ | EEPROM_ADDR_START);
2301
2302        for (i = 0; i < 1000; i++) {
2303                tmp = tr32(GRC_EEPROM_ADDR);
2304
2305                if (tmp & EEPROM_ADDR_COMPLETE)
2306                        break;
2307                msleep(1);
2308        }
2309        if (!(tmp & EEPROM_ADDR_COMPLETE))
2310                return -EBUSY;
2311
2312        tmp = tr32(GRC_EEPROM_DATA);
2313
2314        /*
2315         * The data will always be opposite the native endian
2316         * format.  Perform a blind byteswap to compensate.
2317         */
2318        *val = swab32(tmp);
2319
2320        return 0;
2321}
2322
2323#define NVRAM_CMD_TIMEOUT 10000
2324
2325static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2326{
2327        int i;
2328
2329        tw32(NVRAM_CMD, nvram_cmd);
2330        for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2331                udelay(10);
2332                if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2333                        udelay(10);
2334                        break;
2335                }
2336        }
2337
2338        if (i == NVRAM_CMD_TIMEOUT)
2339                return -EBUSY;
2340
2341        return 0;
2342}
2343
2344static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2345{
2346        if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2347            (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2348            (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2349           !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2350            (tp->nvram_jedecnum == JEDEC_ATMEL))
2351
2352                addr = ((addr / tp->nvram_pagesize) <<
2353                        ATMEL_AT45DB0X1B_PAGE_POS) +
2354                       (addr % tp->nvram_pagesize);
2355
2356        return addr;
2357}
2358
2359static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2360{
2361        if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2362            (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2363            (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2364           !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2365            (tp->nvram_jedecnum == JEDEC_ATMEL))
2366
2367                addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2368                        tp->nvram_pagesize) +
2369                       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2370
2371        return addr;
2372}
2373
2374/* NOTE: Data read in from NVRAM is byteswapped according to
2375 * the byteswapping settings for all other register accesses.
2376 * tg3 devices are BE devices, so on a BE machine, the data
2377 * returned will be exactly as it is seen in NVRAM.  On a LE
2378 * machine, the 32-bit value will be byteswapped.
2379 */
2380static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2381{
2382        int ret;
2383
2384        if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2385                return tg3_nvram_read_using_eeprom(tp, offset, val);
2386
2387        offset = tg3_nvram_phys_addr(tp, offset);
2388
2389        if (offset > NVRAM_ADDR_MSK)
2390                return -EINVAL;
2391
2392        ret = tg3_nvram_lock(tp);
2393        if (ret)
2394                return ret;
2395
2396        tg3_enable_nvram_access(tp);
2397
2398        tw32(NVRAM_ADDR, offset);
2399        ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2400                NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2401
2402        if (ret == 0)
2403                *val = tr32(NVRAM_RDDATA);
2404
2405        tg3_disable_nvram_access(tp);
2406
2407        tg3_nvram_unlock(tp);
2408
2409        return ret;
2410}
2411
2412/* Ensures NVRAM data is in bytestream format. */
2413static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2414{
2415        u32 v;
2416        int res = tg3_nvram_read(tp, offset, &v);
2417        if (!res)
2418                *val = cpu_to_be32(v);
2419        return res;
2420}
2421
2422/* tp->lock is held. */
2423static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2424{
2425        u32 addr_high, addr_low;
2426        int i;
2427
2428        addr_high = ((tp->dev->dev_addr[0] << 8) |
2429                     tp->dev->dev_addr[1]);
2430        addr_low = ((tp->dev->dev_addr[2] << 24) |
2431                    (tp->dev->dev_addr[3] << 16) |
2432                    (tp->dev->dev_addr[4] <<  8) |
2433                    (tp->dev->dev_addr[5] <<  0));
2434        for (i = 0; i < 4; i++) {
2435                if (i == 1 && skip_mac_1)
2436                        continue;
2437                tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2438                tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2439        }
2440
2441        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2442            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2443                for (i = 0; i < 12; i++) {
2444                        tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2445                        tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2446                }
2447        }
2448
2449        addr_high = (tp->dev->dev_addr[0] +
2450                     tp->dev->dev_addr[1] +
2451                     tp->dev->dev_addr[2] +
2452                     tp->dev->dev_addr[3] +
2453                     tp->dev->dev_addr[4] +
2454                     tp->dev->dev_addr[5]) &
2455                TX_BACKOFF_SEED_MASK;
2456        tw32(MAC_TX_BACKOFF_SEED, addr_high);
2457}
2458
2459static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2460{
2461        u32 misc_host_ctrl;
2462        bool device_should_wake, do_low_power;
2463
2464        /* Make sure register accesses (indirect or otherwise)
2465         * will function correctly.
2466         */
2467        pci_write_config_dword(tp->pdev,
2468                               TG3PCI_MISC_HOST_CTRL,
2469                               tp->misc_host_ctrl);
2470
2471        switch (state) {
2472        case PCI_D0:
2473                pci_enable_wake(tp->pdev, state, false);
2474                pci_set_power_state(tp->pdev, PCI_D0);
2475
2476                /* Switch out of Vaux if it is a NIC */
2477                if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2478                        tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2479
2480                return 0;
2481
2482        case PCI_D1:
2483        case PCI_D2:
2484        case PCI_D3hot:
2485                break;
2486
2487        default:
2488                printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2489                        tp->dev->name, state);
2490                return -EINVAL;
2491        }
2492
2493        /* Restore the CLKREQ setting. */
2494        if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2495                u16 lnkctl;
2496
2497                pci_read_config_word(tp->pdev,
2498                                     tp->pcie_cap + PCI_EXP_LNKCTL,
2499                                     &lnkctl);
2500                lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2501                pci_write_config_word(tp->pdev,
2502                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2503                                      lnkctl);
2504        }
2505
2506        misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2507        tw32(TG3PCI_MISC_HOST_CTRL,
2508             misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2509
2510        device_should_wake = pci_pme_capable(tp->pdev, state) &&
2511                             device_may_wakeup(&tp->pdev->dev) &&
2512                             (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2513
2514        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2515                do_low_power = false;
2516                if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2517                    !tp->link_config.phy_is_low_power) {
2518                        struct phy_device *phydev;
2519                        u32 phyid, advertising;
2520
2521                        phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2522
2523                        tp->link_config.phy_is_low_power = 1;
2524
2525                        tp->link_config.orig_speed = phydev->speed;
2526                        tp->link_config.orig_duplex = phydev->duplex;
2527                        tp->link_config.orig_autoneg = phydev->autoneg;
2528                        tp->link_config.orig_advertising = phydev->advertising;
2529
2530                        advertising = ADVERTISED_TP |
2531                                      ADVERTISED_Pause |
2532                                      ADVERTISED_Autoneg |
2533                                      ADVERTISED_10baseT_Half;
2534
2535                        if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2536                            device_should_wake) {
2537                                if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2538                                        advertising |=
2539                                                ADVERTISED_100baseT_Half |
2540                                                ADVERTISED_100baseT_Full |
2541                                                ADVERTISED_10baseT_Full;
2542                                else
2543                                        advertising |= ADVERTISED_10baseT_Full;
2544                        }
2545
2546                        phydev->advertising = advertising;
2547
2548                        phy_start_aneg(phydev);
2549
2550                        phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2551                        if (phyid != TG3_PHY_ID_BCMAC131) {
2552                                phyid &= TG3_PHY_OUI_MASK;
2553                                if (phyid == TG3_PHY_OUI_1 ||
2554                                    phyid == TG3_PHY_OUI_2 ||
2555                                    phyid == TG3_PHY_OUI_3)
2556                                        do_low_power = true;
2557                        }
2558                }
2559        } else {
2560                do_low_power = true;
2561
2562                if (tp->link_config.phy_is_low_power == 0) {
2563                        tp->link_config.phy_is_low_power = 1;
2564                        tp->link_config.orig_speed = tp->link_config.speed;
2565                        tp->link_config.orig_duplex = tp->link_config.duplex;
2566                        tp->link_config.orig_autoneg = tp->link_config.autoneg;
2567                }
2568
2569                if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2570                        tp->link_config.speed = SPEED_10;
2571                        tp->link_config.duplex = DUPLEX_HALF;
2572                        tp->link_config.autoneg = AUTONEG_ENABLE;
2573                        tg3_setup_phy(tp, 0);
2574                }
2575        }
2576
2577        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2578                u32 val;
2579
2580                val = tr32(GRC_VCPU_EXT_CTRL);
2581                tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2582        } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2583                int i;
2584                u32 val;
2585
2586                for (i = 0; i < 200; i++) {
2587                        tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2588                        if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2589                                break;
2590                        msleep(1);
2591                }
2592        }
2593        if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2594                tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2595                                                     WOL_DRV_STATE_SHUTDOWN |
2596                                                     WOL_DRV_WOL |
2597                                                     WOL_SET_MAGIC_PKT);
2598
2599        if (device_should_wake) {
2600                u32 mac_mode;
2601
2602                if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2603                        if (do_low_power) {
2604                                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2605                                udelay(40);
2606                        }
2607
2608                        if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2609                                mac_mode = MAC_MODE_PORT_MODE_GMII;
2610                        else
2611                                mac_mode = MAC_MODE_PORT_MODE_MII;
2612
2613                        mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2614                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2615                            ASIC_REV_5700) {
2616                                u32 speed = (tp->tg3_flags &
2617                                             TG3_FLAG_WOL_SPEED_100MB) ?
2618                                             SPEED_100 : SPEED_10;
2619                                if (tg3_5700_link_polarity(tp, speed))
2620                                        mac_mode |= MAC_MODE_LINK_POLARITY;
2621                                else
2622                                        mac_mode &= ~MAC_MODE_LINK_POLARITY;
2623                        }
2624                } else {
2625                        mac_mode = MAC_MODE_PORT_MODE_TBI;
2626                }
2627
2628                if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2629                        tw32(MAC_LED_CTRL, tp->led_ctrl);
2630
2631                mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2632                if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2633                    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2634                    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2635                     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2636                        mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2637
2638                if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2639                        mac_mode |= tp->mac_mode &
2640                                    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2641                        if (mac_mode & MAC_MODE_APE_TX_EN)
2642                                mac_mode |= MAC_MODE_TDE_ENABLE;
2643                }
2644
2645                tw32_f(MAC_MODE, mac_mode);
2646                udelay(100);
2647
2648                tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2649                udelay(10);
2650        }
2651
2652        if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2653            (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2655                u32 base_val;
2656
2657                base_val = tp->pci_clock_ctrl;
2658                base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2659                             CLOCK_CTRL_TXCLK_DISABLE);
2660
2661                tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2662                            CLOCK_CTRL_PWRDOWN_PLL133, 40);
2663        } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2664                   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2665                   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2666                /* do nothing */
2667        } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2668                     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2669                u32 newbits1, newbits2;
2670
2671                if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2672                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2673                        newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2674                                    CLOCK_CTRL_TXCLK_DISABLE |
2675                                    CLOCK_CTRL_ALTCLK);
2676                        newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2677                } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2678                        newbits1 = CLOCK_CTRL_625_CORE;
2679                        newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2680                } else {
2681                        newbits1 = CLOCK_CTRL_ALTCLK;
2682                        newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2683                }
2684
2685                tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2686                            40);
2687
2688                tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2689                            40);
2690
2691                if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2692                        u32 newbits3;
2693
2694                        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2695                            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2696                                newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2697                                            CLOCK_CTRL_TXCLK_DISABLE |
2698                                            CLOCK_CTRL_44MHZ_CORE);
2699                        } else {
2700                                newbits3 = CLOCK_CTRL_44MHZ_CORE;
2701                        }
2702
2703                        tw32_wait_f(TG3PCI_CLOCK_CTRL,
2704                                    tp->pci_clock_ctrl | newbits3, 40);
2705                }
2706        }
2707
2708        if (!(device_should_wake) &&
2709            !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2710                tg3_power_down_phy(tp, do_low_power);
2711
2712        tg3_frob_aux_power(tp);
2713
2714        /* Workaround for unstable PLL clock */
2715        if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2716            (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2717                u32 val = tr32(0x7d00);
2718
2719                val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2720                tw32(0x7d00, val);
2721                if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2722                        int err;
2723
2724                        err = tg3_nvram_lock(tp);
2725                        tg3_halt_cpu(tp, RX_CPU_BASE);
2726                        if (!err)
2727                                tg3_nvram_unlock(tp);
2728                }
2729        }
2730
2731        tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2732
2733        if (device_should_wake)
2734                pci_enable_wake(tp->pdev, state, true);
2735
2736        /* Finally, set the new power state. */
2737        pci_set_power_state(tp->pdev, state);
2738
2739        return 0;
2740}
2741
2742static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2743{
2744        switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2745        case MII_TG3_AUX_STAT_10HALF:
2746                *speed = SPEED_10;
2747                *duplex = DUPLEX_HALF;
2748                break;
2749
2750        case MII_TG3_AUX_STAT_10FULL:
2751                *speed = SPEED_10;
2752                *duplex = DUPLEX_FULL;
2753                break;
2754
2755        case MII_TG3_AUX_STAT_100HALF:
2756                *speed = SPEED_100;
2757                *duplex = DUPLEX_HALF;
2758                break;
2759
2760        case MII_TG3_AUX_STAT_100FULL:
2761                *speed = SPEED_100;
2762                *duplex = DUPLEX_FULL;
2763                break;
2764
2765        case MII_TG3_AUX_STAT_1000HALF:
2766                *speed = SPEED_1000;
2767                *duplex = DUPLEX_HALF;
2768                break;
2769
2770        case MII_TG3_AUX_STAT_1000FULL:
2771                *speed = SPEED_1000;
2772                *duplex = DUPLEX_FULL;
2773                break;
2774
2775        default:
2776                if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2777                        *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2778                                 SPEED_10;
2779                        *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2780                                  DUPLEX_HALF;
2781                        break;
2782                }
2783                *speed = SPEED_INVALID;
2784                *duplex = DUPLEX_INVALID;
2785                break;
2786        }
2787}
2788
2789static void tg3_phy_copper_begin(struct tg3 *tp)
2790{
2791        u32 new_adv;
2792        int i;
2793
2794        if (tp->link_config.phy_is_low_power) {
2795                /* Entering low power mode.  Disable gigabit and
2796                 * 100baseT advertisements.
2797                 */
2798                tg3_writephy(tp, MII_TG3_CTRL, 0);
2799
2800                new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2801                           ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2802                if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2803                        new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2804
2805                tg3_writephy(tp, MII_ADVERTISE, new_adv);
2806        } else if (tp->link_config.speed == SPEED_INVALID) {
2807                if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2808                        tp->link_config.advertising &=
2809                                ~(ADVERTISED_1000baseT_Half |
2810                                  ADVERTISED_1000baseT_Full);
2811
2812                new_adv = ADVERTISE_CSMA;
2813                if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2814                        new_adv |= ADVERTISE_10HALF;
2815                if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2816                        new_adv |= ADVERTISE_10FULL;
2817                if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2818                        new_adv |= ADVERTISE_100HALF;
2819                if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2820                        new_adv |= ADVERTISE_100FULL;
2821
2822                new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2823
2824                tg3_writephy(tp, MII_ADVERTISE, new_adv);
2825
2826                if (tp->link_config.advertising &
2827                    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2828                        new_adv = 0;
2829                        if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2830                                new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2831                        if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2832                                new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2833                        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2834                            (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2835                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2836                                new_adv |= (MII_TG3_CTRL_AS_MASTER |
2837                                            MII_TG3_CTRL_ENABLE_AS_MASTER);
2838                        tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2839                } else {
2840                        tg3_writephy(tp, MII_TG3_CTRL, 0);
2841                }
2842        } else {
2843                new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2844                new_adv |= ADVERTISE_CSMA;
2845
2846                /* Asking for a specific link mode. */
2847                if (tp->link_config.speed == SPEED_1000) {
2848                        tg3_writephy(tp, MII_ADVERTISE, new_adv);
2849
2850                        if (tp->link_config.duplex == DUPLEX_FULL)
2851                                new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2852                        else
2853                                new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2854                        if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2855                            tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2856                                new_adv |= (MII_TG3_CTRL_AS_MASTER |
2857                                            MII_TG3_CTRL_ENABLE_AS_MASTER);
2858                } else {
2859                        if (tp->link_config.speed == SPEED_100) {
2860                                if (tp->link_config.duplex == DUPLEX_FULL)
2861                                        new_adv |= ADVERTISE_100FULL;
2862                                else
2863                                        new_adv |= ADVERTISE_100HALF;
2864                        } else {
2865                                if (tp->link_config.duplex == DUPLEX_FULL)
2866                                        new_adv |= ADVERTISE_10FULL;
2867                                else
2868                                        new_adv |= ADVERTISE_10HALF;
2869                        }
2870                        tg3_writephy(tp, MII_ADVERTISE, new_adv);
2871
2872                        new_adv = 0;
2873                }
2874
2875                tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2876        }
2877
2878        if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2879            tp->link_config.speed != SPEED_INVALID) {
2880                u32 bmcr, orig_bmcr;
2881
2882                tp->link_config.active_speed = tp->link_config.speed;
2883                tp->link_config.active_duplex = tp->link_config.duplex;
2884
2885                bmcr = 0;
2886                switch (tp->link_config.speed) {
2887                default:
2888                case SPEED_10:
2889                        break;
2890
2891                case SPEED_100:
2892                        bmcr |= BMCR_SPEED100;
2893                        break;
2894
2895                case SPEED_1000:
2896                        bmcr |= TG3_BMCR_SPEED1000;
2897                        break;
2898                }
2899
2900                if (tp->link_config.duplex == DUPLEX_FULL)
2901                        bmcr |= BMCR_FULLDPLX;
2902
2903                if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2904                    (bmcr != orig_bmcr)) {
2905                        tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2906                        for (i = 0; i < 1500; i++) {
2907                                u32 tmp;
2908
2909                                udelay(10);
2910                                if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2911                                    tg3_readphy(tp, MII_BMSR, &tmp))
2912                                        continue;
2913                                if (!(tmp & BMSR_LSTATUS)) {
2914                                        udelay(40);
2915                                        break;
2916                                }
2917                        }
2918                        tg3_writephy(tp, MII_BMCR, bmcr);
2919                        udelay(40);
2920                }
2921        } else {
2922                tg3_writephy(tp, MII_BMCR,
2923                             BMCR_ANENABLE | BMCR_ANRESTART);
2924        }
2925}
2926
2927static int tg3_init_5401phy_dsp(struct tg3 *tp)
2928{
2929        int err;
2930
2931        /* Turn off tap power management. */
2932        /* Set Extended packet length bit */
2933        err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2934
2935        err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2936        err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2937
2938        err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2939        err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2940
2941        err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2942        err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2943
2944        err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2945        err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2946
2947        err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2948        err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2949
2950        udelay(40);
2951
2952        return err;
2953}
2954
2955static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2956{
2957        u32 adv_reg, all_mask = 0;
2958
2959        if (mask & ADVERTISED_10baseT_Half)
2960                all_mask |= ADVERTISE_10HALF;
2961        if (mask & ADVERTISED_10baseT_Full)
2962                all_mask |= ADVERTISE_10FULL;
2963        if (mask & ADVERTISED_100baseT_Half)
2964                all_mask |= ADVERTISE_100HALF;
2965        if (mask & ADVERTISED_100baseT_Full)
2966                all_mask |= ADVERTISE_100FULL;
2967
2968        if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2969                return 0;
2970
2971        if ((adv_reg & all_mask) != all_mask)
2972                return 0;
2973        if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2974                u32 tg3_ctrl;
2975
2976                all_mask = 0;
2977                if (mask & ADVERTISED_1000baseT_Half)
2978                        all_mask |= ADVERTISE_1000HALF;
2979                if (mask & ADVERTISED_1000baseT_Full)
2980                        all_mask |= ADVERTISE_1000FULL;
2981
2982                if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2983                        return 0;
2984
2985                if ((tg3_ctrl & all_mask) != all_mask)
2986                        return 0;
2987        }
2988        return 1;
2989}
2990
2991static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2992{
2993        u32 curadv, reqadv;
2994
2995        if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2996                return 1;
2997
2998        curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2999        reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3000
3001        if (tp->link_config.active_duplex == DUPLEX_FULL) {
3002                if (curadv != reqadv)
3003                        return 0;
3004
3005                if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3006                        tg3_readphy(tp, MII_LPA, rmtadv);
3007        } else {
3008                /* Reprogram the advertisement register, even if it
3009                 * does not affect the current link.  If the link
3010                 * gets renegotiated in the future, we can save an
3011                 * additional renegotiation cycle by advertising
3012                 * it correctly in the first place.
3013                 */
3014                if (curadv != reqadv) {
3015                        *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3016                                     ADVERTISE_PAUSE_ASYM);
3017                        tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3018                }
3019        }
3020
3021        return 1;
3022}
3023
3024static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3025{
3026        int current_link_up;
3027        u32 bmsr, dummy;
3028        u32 lcl_adv, rmt_adv;
3029        u16 current_speed;
3030        u8 current_duplex;
3031        int i, err;
3032
3033        tw32(MAC_EVENT, 0);
3034
3035        tw32_f(MAC_STATUS,
3036             (MAC_STATUS_SYNC_CHANGED |
3037              MAC_STATUS_CFG_CHANGED |
3038              MAC_STATUS_MI_COMPLETION |
3039              MAC_STATUS_LNKSTATE_CHANGED));
3040        udelay(40);
3041
3042        if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3043                tw32_f(MAC_MI_MODE,
3044                     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3045                udelay(80);
3046        }
3047
3048        tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3049
3050        /* Some third-party PHYs need to be reset on link going
3051         * down.
3052         */
3053        if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3056            netif_carrier_ok(tp->dev)) {
3057                tg3_readphy(tp, MII_BMSR, &bmsr);
3058                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3059                    !(bmsr & BMSR_LSTATUS))
3060                        force_reset = 1;
3061        }
3062        if (force_reset)
3063                tg3_phy_reset(tp);
3064
3065        if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
3066                tg3_readphy(tp, MII_BMSR, &bmsr);
3067                if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3068                    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3069                        bmsr = 0;
3070
3071                if (!(bmsr & BMSR_LSTATUS)) {
3072                        err = tg3_init_5401phy_dsp(tp);
3073                        if (err)
3074                                return err;
3075
3076                        tg3_readphy(tp, MII_BMSR, &bmsr);
3077                        for (i = 0; i < 1000; i++) {
3078                                udelay(10);
3079                                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3080                                    (bmsr & BMSR_LSTATUS)) {
3081                                        udelay(40);
3082                                        break;
3083                                }
3084                        }
3085
3086                        if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3087                            !(bmsr & BMSR_LSTATUS) &&
3088                            tp->link_config.active_speed == SPEED_1000) {
3089                                err = tg3_phy_reset(tp);
3090                                if (!err)
3091                                        err = tg3_init_5401phy_dsp(tp);
3092                                if (err)
3093                                        return err;
3094                        }
3095                }
3096        } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3097                   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3098                /* 5701 {A0,B0} CRC bug workaround */
3099                tg3_writephy(tp, 0x15, 0x0a75);
3100                tg3_writephy(tp, 0x1c, 0x8c68);
3101                tg3_writephy(tp, 0x1c, 0x8d68);
3102                tg3_writephy(tp, 0x1c, 0x8c68);
3103        }
3104
3105        /* Clear pending interrupts... */
3106        tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3107        tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3108
3109        if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3110                tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3111        else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3112                tg3_writephy(tp, MII_TG3_IMASK, ~0);
3113
3114        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3115            GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3116                if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3117                        tg3_writephy(tp, MII_TG3_EXT_CTRL,
3118                                     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3119                else
3120                        tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3121        }
3122
3123        current_link_up = 0;
3124        current_speed = SPEED_INVALID;
3125        current_duplex = DUPLEX_INVALID;
3126
3127        if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3128                u32 val;
3129
3130                tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3131                tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3132                if (!(val & (1 << 10))) {
3133                        val |= (1 << 10);
3134                        tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3135                        goto relink;
3136                }
3137        }
3138
3139        bmsr = 0;
3140        for (i = 0; i < 100; i++) {
3141                tg3_readphy(tp, MII_BMSR, &bmsr);
3142                if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3143                    (bmsr & BMSR_LSTATUS))
3144                        break;
3145                udelay(40);
3146        }
3147
3148        if (bmsr & BMSR_LSTATUS) {
3149                u32 aux_stat, bmcr;
3150
3151                tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3152                for (i = 0; i < 2000; i++) {
3153                        udelay(10);
3154                        if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3155                            aux_stat)
3156                                break;
3157                }
3158
3159                tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3160                                             &current_speed,
3161                                             &current_duplex);
3162
3163                bmcr = 0;
3164                for (i = 0; i < 200; i++) {
3165                        tg3_readphy(tp, MII_BMCR, &bmcr);
3166                        if (tg3_readphy(tp, MII_BMCR, &bmcr))
3167                                continue;
3168                        if (bmcr && bmcr != 0x7fff)
3169                                break;
3170                        udelay(10);
3171                }
3172
3173                lcl_adv = 0;
3174                rmt_adv = 0;
3175
3176                tp->link_config.active_speed = current_speed;
3177                tp->link_config.active_duplex = current_duplex;
3178
3179                if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3180                        if ((bmcr & BMCR_ANENABLE) &&
3181                            tg3_copper_is_advertising_all(tp,
3182                                                tp->link_config.advertising)) {
3183                                if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3184                                                                  &rmt_adv))
3185                                        current_link_up = 1;
3186                        }
3187                } else {
3188                        if (!(bmcr & BMCR_ANENABLE) &&
3189                            tp->link_config.speed == current_speed &&
3190                            tp->link_config.duplex == current_duplex &&
3191                            tp->link_config.flowctrl ==
3192                            tp->link_config.active_flowctrl) {
3193                                current_link_up = 1;
3194                        }
3195                }
3196
3197                if (current_link_up == 1 &&
3198                    tp->link_config.active_duplex == DUPLEX_FULL)
3199                        tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3200        }
3201
3202relink:
3203        if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3204                u32 tmp;
3205
3206                tg3_phy_copper_begin(tp);
3207
3208                tg3_readphy(tp, MII_BMSR, &tmp);
3209                if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3210                    (tmp & BMSR_LSTATUS))
3211                        current_link_up = 1;
3212        }
3213
3214        tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3215        if (current_link_up == 1) {
3216                if (tp->link_config.active_speed == SPEED_100 ||
3217                    tp->link_config.active_speed == SPEED_10)
3218                        tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3219                else
3220                        tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3221        } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3222                tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3223        else
3224                tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3225
3226        tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3227        if (tp->link_config.active_duplex == DUPLEX_HALF)
3228                tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3229
3230        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3231                if (current_link_up == 1 &&
3232                    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3233                        tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3234                else
3235                        tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3236        }
3237
3238        /* ??? Without this setting Netgear GA302T PHY does not
3239         * ??? send/receive packets...
3240         */
3241        if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3242            tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3243                tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3244                tw32_f(MAC_MI_MODE, tp->mi_mode);
3245                udelay(80);
3246        }
3247
3248        tw32_f(MAC_MODE, tp->mac_mode);
3249        udelay(40);
3250
3251        if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3252                /* Polled via timer. */
3253                tw32_f(MAC_EVENT, 0);
3254        } else {
3255                tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3256        }
3257        udelay(40);
3258
3259        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3260            current_link_up == 1 &&
3261            tp->link_config.active_speed == SPEED_1000 &&
3262            ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3263             (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3264                udelay(120);
3265                tw32_f(MAC_STATUS,
3266                     (MAC_STATUS_SYNC_CHANGED |
3267                      MAC_STATUS_CFG_CHANGED));
3268                udelay(40);
3269                tg3_write_mem(tp,
3270                              NIC_SRAM_FIRMWARE_MBOX,
3271                              NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3272        }
3273
3274        /* Prevent send BD corruption. */
3275        if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3276                u16 oldlnkctl, newlnkctl;
3277
3278                pci_read_config_word(tp->pdev,
3279                                     tp->pcie_cap + PCI_EXP_LNKCTL,
3280                                     &oldlnkctl);
3281                if (tp->link_config.active_speed == SPEED_100 ||
3282                    tp->link_config.active_speed == SPEED_10)
3283                        newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3284                else
3285                        newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3286                if (newlnkctl != oldlnkctl)
3287                        pci_write_config_word(tp->pdev,
3288                                              tp->pcie_cap + PCI_EXP_LNKCTL,
3289                                              newlnkctl);
3290        }
3291
3292        if (current_link_up != netif_carrier_ok(tp->dev)) {
3293                if (current_link_up)
3294                        netif_carrier_on(tp->dev);
3295                else
3296                        netif_carrier_off(tp->dev);
3297                tg3_link_report(tp);
3298        }
3299
3300        return 0;
3301}
3302
3303struct tg3_fiber_aneginfo {
3304        int state;
3305#define ANEG_STATE_UNKNOWN              0
3306#define ANEG_STATE_AN_ENABLE            1
3307#define ANEG_STATE_RESTART_INIT         2
3308#define ANEG_STATE_RESTART              3
3309#define ANEG_STATE_DISABLE_LINK_OK      4
3310#define ANEG_STATE_ABILITY_DETECT_INIT  5
3311#define ANEG_STATE_ABILITY_DETECT       6
3312#define ANEG_STATE_ACK_DETECT_INIT      7
3313#define ANEG_STATE_ACK_DETECT           8
3314#define ANEG_STATE_COMPLETE_ACK_INIT    9
3315#define ANEG_STATE_COMPLETE_ACK         10
3316#define ANEG_STATE_IDLE_DETECT_INIT     11
3317#define ANEG_STATE_IDLE_DETECT          12
3318#define ANEG_STATE_LINK_OK              13
3319#define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3320#define ANEG_STATE_NEXT_PAGE_WAIT       15
3321
3322        u32 flags;
3323#define MR_AN_ENABLE            0x00000001
3324#define MR_RESTART_AN           0x00000002
3325#define MR_AN_COMPLETE          0x00000004
3326#define MR_PAGE_RX              0x00000008
3327#define MR_NP_LOADED            0x00000010
3328#define MR_TOGGLE_TX            0x00000020
3329#define MR_LP_ADV_FULL_DUPLEX   0x00000040
3330#define MR_LP_ADV_HALF_DUPLEX   0x00000080
3331#define MR_LP_ADV_SYM_PAUSE     0x00000100
3332#define MR_LP_ADV_ASYM_PAUSE    0x00000200
3333#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3334#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3335#define MR_LP_ADV_NEXT_PAGE     0x00001000
3336#define MR_TOGGLE_RX            0x00002000
3337#define MR_NP_RX                0x00004000
3338
3339#define MR_LINK_OK              0x80000000
3340
3341        unsigned long link_time, cur_time;
3342
3343        u32 ability_match_cfg;
3344        int ability_match_count;
3345
3346        char ability_match, idle_match, ack_match;
3347
3348        u32 txconfig, rxconfig;
3349#define ANEG_CFG_NP             0x00000080
3350#define ANEG_CFG_ACK            0x00000040
3351#define ANEG_CFG_RF2            0x00000020
3352#define ANEG_CFG_RF1            0x00000010
3353#define ANEG_CFG_PS2            0x00000001
3354#define ANEG_CFG_PS1            0x00008000
3355#define ANEG_CFG_HD             0x00004000
3356#define ANEG_CFG_FD             0x00002000
3357#define ANEG_CFG_INVAL          0x00001f06
3358
3359};
3360#define ANEG_OK         0
3361#define ANEG_DONE       1
3362#define ANEG_TIMER_ENAB 2
3363#define ANEG_FAILED     -1
3364
3365#define ANEG_STATE_SETTLE_TIME  10000
3366
3367static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3368                                   struct tg3_fiber_aneginfo *ap)
3369{
3370        u16 flowctrl;
3371        unsigned long delta;
3372        u32 rx_cfg_reg;
3373        int ret;
3374
3375        if (ap->state == ANEG_STATE_UNKNOWN) {
3376                ap->rxconfig = 0;
3377                ap->link_time = 0;
3378                ap->cur_time = 0;
3379                ap->ability_match_cfg = 0;
3380                ap->ability_match_count = 0;
3381                ap->ability_match = 0;
3382                ap->idle_match = 0;
3383                ap->ack_match = 0;
3384        }
3385        ap->cur_time++;
3386
3387        if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3388                rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3389
3390                if (rx_cfg_reg != ap->ability_match_cfg) {
3391                        ap->ability_match_cfg = rx_cfg_reg;
3392                        ap->ability_match = 0;
3393                        ap->ability_match_count = 0;
3394                } else {
3395                        if (++ap->ability_match_count > 1) {
3396                                ap->ability_match = 1;
3397                                ap->ability_match_cfg = rx_cfg_reg;
3398                        }
3399                }
3400                if (rx_cfg_reg & ANEG_CFG_ACK)
3401                        ap->ack_match = 1;
3402                else
3403                        ap->ack_match = 0;
3404
3405                ap->idle_match = 0;
3406        } else {
3407                ap->idle_match = 1;
3408                ap->ability_match_cfg = 0;
3409                ap->ability_match_count = 0;
3410                ap->ability_match = 0;
3411                ap->ack_match = 0;
3412
3413                rx_cfg_reg = 0;
3414        }
3415
3416        ap->rxconfig = rx_cfg_reg;
3417        ret = ANEG_OK;
3418
3419        switch(ap->state) {
3420        case ANEG_STATE_UNKNOWN:
3421                if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3422                        ap->state = ANEG_STATE_AN_ENABLE;
3423
3424                /* fallthru */
3425        case ANEG_STATE_AN_ENABLE:
3426                ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3427                if (ap->flags & MR_AN_ENABLE) {
3428                        ap->link_time = 0;
3429                        ap->cur_time = 0;
3430                        ap->ability_match_cfg = 0;
3431                        ap->ability_match_count = 0;
3432                        ap->ability_match = 0;
3433                        ap->idle_match = 0;
3434                        ap->ack_match = 0;
3435
3436                        ap->state = ANEG_STATE_RESTART_INIT;
3437                } else {
3438                        ap->state = ANEG_STATE_DISABLE_LINK_OK;
3439                }
3440                break;
3441
3442        case ANEG_STATE_RESTART_INIT:
3443                ap->link_time = ap->cur_time;
3444                ap->flags &= ~(MR_NP_LOADED);
3445                ap->txconfig = 0;
3446                tw32(MAC_TX_AUTO_NEG, 0);
3447                tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3448                tw32_f(MAC_MODE, tp->mac_mode);
3449                udelay(40);
3450
3451                ret = ANEG_TIMER_ENAB;
3452                ap->state = ANEG_STATE_RESTART;
3453
3454                /* fallthru */
3455        case ANEG_STATE_RESTART:
3456                delta = ap->cur_time - ap->link_time;
3457                if (delta > ANEG_STATE_SETTLE_TIME) {
3458                        ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3459                } else {
3460                        ret = ANEG_TIMER_ENAB;
3461                }
3462                break;
3463
3464        case ANEG_STATE_DISABLE_LINK_OK:
3465                ret = ANEG_DONE;
3466                break;
3467
3468        case ANEG_STATE_ABILITY_DETECT_INIT:
3469                ap->flags &= ~(MR_TOGGLE_TX);
3470                ap->txconfig = ANEG_CFG_FD;
3471                flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3472                if (flowctrl & ADVERTISE_1000XPAUSE)
3473                        ap->txconfig |= ANEG_CFG_PS1;
3474                if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3475                        ap->txconfig |= ANEG_CFG_PS2;
3476                tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3477                tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3478                tw32_f(MAC_MODE, tp->mac_mode);
3479                udelay(40);
3480
3481                ap->state = ANEG_STATE_ABILITY_DETECT;
3482                break;
3483
3484        case ANEG_STATE_ABILITY_DETECT:
3485                if (ap->ability_match != 0 && ap->rxconfig != 0) {
3486                        ap->state = ANEG_STATE_ACK_DETECT_INIT;
3487                }
3488                break;
3489
3490        case ANEG_STATE_ACK_DETECT_INIT:
3491                ap->txconfig |= ANEG_CFG_ACK;
3492                tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3493                tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3494                tw32_f(MAC_MODE, tp->mac_mode);
3495                udelay(40);
3496
3497                ap->state = ANEG_STATE_ACK_DETECT;
3498
3499                /* fallthru */
3500        case ANEG_STATE_ACK_DETECT:
3501                if (ap->ack_match != 0) {
3502                        if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3503                            (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3504                                ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3505                        } else {
3506                                ap->state = ANEG_STATE_AN_ENABLE;
3507                        }
3508                } else if (ap->ability_match != 0 &&
3509                           ap->rxconfig == 0) {
3510                        ap->state = ANEG_STATE_AN_ENABLE;
3511                }
3512                break;
3513
3514        case ANEG_STATE_COMPLETE_ACK_INIT:
3515                if (ap->rxconfig & ANEG_CFG_INVAL) {
3516                        ret = ANEG_FAILED;
3517                        break;
3518                }
3519                ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3520                               MR_LP_ADV_HALF_DUPLEX |
3521                               MR_LP_ADV_SYM_PAUSE |
3522                               MR_LP_ADV_ASYM_PAUSE |
3523                               MR_LP_ADV_REMOTE_FAULT1 |
3524                               MR_LP_ADV_REMOTE_FAULT2 |
3525                               MR_LP_ADV_NEXT_PAGE |
3526                               MR_TOGGLE_RX |
3527                               MR_NP_RX);
3528                if (ap->rxconfig & ANEG_CFG_FD)
3529                        ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3530                if (ap->rxconfig & ANEG_CFG_HD)
3531                        ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3532                if (ap->rxconfig & ANEG_CFG_PS1)
3533                        ap->flags |= MR_LP_ADV_SYM_PAUSE;
3534                if (ap->rxconfig & ANEG_CFG_PS2)
3535                        ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3536                if (ap->rxconfig & ANEG_CFG_RF1)
3537                        ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3538                if (ap->rxconfig & ANEG_CFG_RF2)
3539                        ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3540                if (ap->rxconfig & ANEG_CFG_NP)
3541                        ap->flags |= MR_LP_ADV_NEXT_PAGE;
3542
3543                ap->link_time = ap->cur_time;
3544
3545                ap->flags ^= (MR_TOGGLE_TX);
3546                if (ap->rxconfig & 0x0008)
3547                        ap->flags |= MR_TOGGLE_RX;
3548                if (ap->rxconfig & ANEG_CFG_NP)
3549                        ap->flags |= MR_NP_RX;
3550                ap->flags |= MR_PAGE_RX;
3551
3552                ap->state = ANEG_STATE_COMPLETE_ACK;
3553                ret = ANEG_TIMER_ENAB;
3554                break;
3555
3556        case ANEG_STATE_COMPLETE_ACK:
3557                if (ap->ability_match != 0 &&
3558                    ap->rxconfig == 0) {
3559                        ap->state = ANEG_STATE_AN_ENABLE;
3560                        break;
3561                }
3562                delta = ap->cur_time - ap->link_time;
3563                if (delta > ANEG_STATE_SETTLE_TIME) {
3564                        if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3565                                ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3566                        } else {
3567                                if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3568                                    !(ap->flags & MR_NP_RX)) {
3569                                        ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3570                                } else {
3571                                        ret = ANEG_FAILED;
3572                                }
3573                        }
3574                }
3575                break;
3576
3577        case ANEG_STATE_IDLE_DETECT_INIT:
3578                ap->link_time = ap->cur_time;
3579                tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3580                tw32_f(MAC_MODE, tp->mac_mode);
3581                udelay(40);
3582
3583                ap->state = ANEG_STATE_IDLE_DETECT;
3584                ret = ANEG_TIMER_ENAB;
3585                break;
3586
3587        case ANEG_STATE_IDLE_DETECT:
3588                if (ap->ability_match != 0 &&
3589                    ap->rxconfig == 0) {
3590                        ap->state = ANEG_STATE_AN_ENABLE;
3591                        break;
3592                }
3593                delta = ap->cur_time - ap->link_time;
3594                if (delta > ANEG_STATE_SETTLE_TIME) {
3595                        /* XXX another gem from the Broadcom driver :( */
3596                        ap->state = ANEG_STATE_LINK_OK;
3597                }
3598                break;
3599
3600        case ANEG_STATE_LINK_OK:
3601                ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3602                ret = ANEG_DONE;
3603                break;
3604
3605        case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3606                /* ??? unimplemented */
3607                break;
3608
3609        case ANEG_STATE_NEXT_PAGE_WAIT:
3610                /* ??? unimplemented */
3611                break;
3612
3613        default:
3614                ret = ANEG_FAILED;
3615                break;
3616        }
3617
3618        return ret;
3619}
3620
3621static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3622{
3623        int res = 0;
3624        struct tg3_fiber_aneginfo aninfo;
3625        int status = ANEG_FAILED;
3626        unsigned int tick;
3627        u32 tmp;
3628
3629        tw32_f(MAC_TX_AUTO_NEG, 0);
3630
3631        tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3632        tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3633        udelay(40);
3634
3635        tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3636        udelay(40);
3637
3638        memset(&aninfo, 0, sizeof(aninfo));
3639        aninfo.flags |= MR_AN_ENABLE;
3640        aninfo.state = ANEG_STATE_UNKNOWN;
3641        aninfo.cur_time = 0;
3642        tick = 0;
3643        while (++tick < 195000) {
3644                status = tg3_fiber_aneg_smachine(tp, &aninfo);
3645                if (status == ANEG_DONE || status == ANEG_FAILED)
3646                        break;
3647
3648                udelay(1);
3649        }
3650
3651        tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3652        tw32_f(MAC_MODE, tp->mac_mode);
3653        udelay(40);
3654
3655        *txflags = aninfo.txconfig;
3656        *rxflags = aninfo.flags;
3657
3658        if (status == ANEG_DONE &&
3659            (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3660                             MR_LP_ADV_FULL_DUPLEX)))
3661                res = 1;
3662
3663        return res;
3664}
3665
3666static void tg3_init_bcm8002(struct tg3 *tp)
3667{
3668        u32 mac_status = tr32(MAC_STATUS);
3669        int i;
3670
3671        /* Reset when initting first time or we have a link. */
3672        if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3673            !(mac_status & MAC_STATUS_PCS_SYNCED))
3674                return;
3675
3676        /* Set PLL lock range. */
3677        tg3_writephy(tp, 0x16, 0x8007);
3678
3679        /* SW reset */
3680        tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3681
3682        /* Wait for reset to complete. */
3683        /* XXX schedule_timeout() ... */
3684        for (i = 0; i < 500; i++)
3685                udelay(10);
3686
3687        /* Config mode; select PMA/Ch 1 regs. */
3688        tg3_writephy(tp, 0x10, 0x8411);
3689
3690        /* Enable auto-lock and comdet, select txclk for tx. */
3691        tg3_writephy(tp, 0x11, 0x0a10);
3692
3693        tg3_writephy(tp, 0x18, 0x00a0);
3694        tg3_writephy(tp, 0x16, 0x41ff);
3695
3696        /* Assert and deassert POR. */
3697        tg3_writephy(tp, 0x13, 0x0400);
3698        udelay(40);
3699        tg3_writephy(tp, 0x13, 0x0000);
3700
3701        tg3_writephy(tp, 0x11, 0x0a50);
3702        udelay(40);
3703        tg3_writephy(tp, 0x11, 0x0a10);
3704
3705        /* Wait for signal to stabilize */
3706        /* XXX schedule_timeout() ... */
3707        for (i = 0; i < 15000; i++)
3708                udelay(10);
3709
3710        /* Deselect the channel register so we can read the PHYID
3711         * later.
3712         */
3713        tg3_writephy(tp, 0x10, 0x8011);
3714}
3715
3716static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3717{
3718        u16 flowctrl;
3719        u32 sg_dig_ctrl, sg_dig_status;
3720        u32 serdes_cfg, expected_sg_dig_ctrl;
3721        int workaround, port_a;
3722        int current_link_up;
3723
3724        serdes_cfg = 0;
3725        expected_sg_dig_ctrl = 0;
3726        workaround = 0;
3727        port_a = 1;
3728        current_link_up = 0;
3729
3730        if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3731            tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3732                workaround = 1;
3733                if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3734                        port_a = 0;
3735
3736                /* preserve bits 0-11,13,14 for signal pre-emphasis */
3737                /* preserve bits 20-23 for voltage regulator */
3738                serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3739        }
3740
3741        sg_dig_ctrl = tr32(SG_DIG_CTRL);
3742
3743        if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3744                if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3745                        if (workaround) {
3746                                u32 val = serdes_cfg;
3747
3748                                if (port_a)
3749                                        val |= 0xc010000;
3750                                else
3751                                        val |= 0x4010000;
3752                                tw32_f(MAC_SERDES_CFG, val);
3753                        }
3754
3755                        tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3756                }
3757                if (mac_status & MAC_STATUS_PCS_SYNCED) {
3758                        tg3_setup_flow_control(tp, 0, 0);
3759                        current_link_up = 1;
3760                }
3761                goto out;
3762        }
3763
3764        /* Want auto-negotiation.  */
3765        expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3766
3767        flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3768        if (flowctrl & ADVERTISE_1000XPAUSE)
3769                expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3770        if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3771                expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3772
3773        if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3774                if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3775                    tp->serdes_counter &&
3776                    ((mac_status & (MAC_STATUS_PCS_SYNCED |
3777                                    MAC_STATUS_RCVD_CFG)) ==
3778                     MAC_STATUS_PCS_SYNCED)) {
3779                        tp->serdes_counter--;
3780                        current_link_up = 1;
3781                        goto out;
3782                }
3783restart_autoneg:
3784                if (workaround)
3785                        tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3786                tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3787                udelay(5);
3788                tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3789
3790                tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3791                tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3792        } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3793                                 MAC_STATUS_SIGNAL_DET)) {
3794                sg_dig_status = tr32(SG_DIG_STATUS);
3795                mac_status = tr32(MAC_STATUS);
3796
3797                if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3798                    (mac_status & MAC_STATUS_PCS_SYNCED)) {
3799                        u32 local_adv = 0, remote_adv = 0;
3800
3801                        if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3802                                local_adv |= ADVERTISE_1000XPAUSE;
3803                        if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3804                                local_adv |= ADVERTISE_1000XPSE_ASYM;
3805
3806                        if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3807                                remote_adv |= LPA_1000XPAUSE;
3808                        if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3809                                remote_adv |= LPA_1000XPAUSE_ASYM;
3810
3811                        tg3_setup_flow_control(tp, local_adv, remote_adv);
3812                        current_link_up = 1;
3813                        tp->serdes_counter = 0;
3814                        tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3815                } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3816                        if (tp->serdes_counter)
3817                                tp->serdes_counter--;
3818                        else {
3819                                if (workaround) {
3820                                        u32 val = serdes_cfg;
3821
3822                                        if (port_a)
3823                                                val |= 0xc010000;
3824                                        else
3825                                                val |= 0x4010000;
3826
3827                                        tw32_f(MAC_SERDES_CFG, val);
3828                                }
3829
3830                                tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3831                                udelay(40);
3832
3833                                /* Link parallel detection - link is up */
3834                                /* only if we have PCS_SYNC and not */
3835                                /* receiving config code words */
3836                                mac_status = tr32(MAC_STATUS);
3837                                if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3838                                    !(mac_status & MAC_STATUS_RCVD_CFG)) {
3839                                        tg3_setup_flow_control(tp, 0, 0);
3840                                        current_link_up = 1;
3841                                        tp->tg3_flags2 |=
3842                                                TG3_FLG2_PARALLEL_DETECT;
3843                                        tp->serdes_counter =
3844                                                SERDES_PARALLEL_DET_TIMEOUT;
3845                                } else
3846                                        goto restart_autoneg;
3847                        }
3848                }
3849        } else {
3850                tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3851                tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3852        }
3853
3854out:
3855        return current_link_up;
3856}
3857
3858static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3859{
3860        int current_link_up = 0;
3861
3862        if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3863                goto out;
3864
3865        if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3866                u32 txflags, rxflags;
3867                int i;
3868
3869                if (fiber_autoneg(tp, &txflags, &rxflags)) {
3870                        u32 local_adv = 0, remote_adv = 0;
3871
3872                        if (txflags & ANEG_CFG_PS1)
3873                                local_adv |= ADVERTISE_1000XPAUSE;
3874                        if (txflags & ANEG_CFG_PS2)
3875                                local_adv |= ADVERTISE_1000XPSE_ASYM;
3876
3877                        if (rxflags & MR_LP_ADV_SYM_PAUSE)
3878                                remote_adv |= LPA_1000XPAUSE;
3879                        if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3880                                remote_adv |= LPA_1000XPAUSE_ASYM;
3881
3882                        tg3_setup_flow_control(tp, local_adv, remote_adv);
3883
3884                        current_link_up = 1;
3885                }
3886                for (i = 0; i < 30; i++) {
3887                        udelay(20);
3888                        tw32_f(MAC_STATUS,
3889                               (MAC_STATUS_SYNC_CHANGED |
3890                                MAC_STATUS_CFG_CHANGED));
3891                        udelay(40);
3892                        if ((tr32(MAC_STATUS) &
3893                             (MAC_STATUS_SYNC_CHANGED |
3894                              MAC_STATUS_CFG_CHANGED)) == 0)
3895                                break;
3896                }
3897
3898                mac_status = tr32(MAC_STATUS);
3899                if (current_link_up == 0 &&
3900                    (mac_status & MAC_STATUS_PCS_SYNCED) &&
3901                    !(mac_status & MAC_STATUS_RCVD_CFG))
3902                        current_link_up = 1;
3903        } else {
3904                tg3_setup_flow_control(tp, 0, 0);
3905
3906                /* Forcing 1000FD link up. */
3907                current_link_up = 1;
3908
3909                tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3910                udelay(40);
3911
3912                tw32_f(MAC_MODE, tp->mac_mode);
3913                udelay(40);
3914        }
3915
3916out:
3917        return current_link_up;
3918}
3919
3920static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3921{
3922        u32 orig_pause_cfg;
3923        u16 orig_active_speed;
3924        u8 orig_active_duplex;
3925        u32 mac_status;
3926        int current_link_up;
3927        int i;
3928
3929        orig_pause_cfg = tp->link_config.active_flowctrl;
3930        orig_active_speed = tp->link_config.active_speed;
3931        orig_active_duplex = tp->link_config.active_duplex;
3932
3933        if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3934            netif_carrier_ok(tp->dev) &&
3935            (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3936                mac_status = tr32(MAC_STATUS);
3937                mac_status &= (MAC_STATUS_PCS_SYNCED |
3938                               MAC_STATUS_SIGNAL_DET |
3939                               MAC_STATUS_CFG_CHANGED |
3940                               MAC_STATUS_RCVD_CFG);
3941                if (mac_status == (MAC_STATUS_PCS_SYNCED |
3942                                   MAC_STATUS_SIGNAL_DET)) {
3943                        tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3944                                            MAC_STATUS_CFG_CHANGED));
3945                        return 0;
3946                }
3947        }
3948
3949        tw32_f(MAC_TX_AUTO_NEG, 0);
3950
3951        tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3952        tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3953        tw32_f(MAC_MODE, tp->mac_mode);
3954        udelay(40);
3955
3956        if (tp->phy_id == PHY_ID_BCM8002)
3957                tg3_init_bcm8002(tp);
3958
3959        /* Enable link change event even when serdes polling.  */
3960        tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3961        udelay(40);
3962
3963        current_link_up = 0;
3964        mac_status = tr32(MAC_STATUS);
3965
3966        if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3967                current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3968        else
3969                current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3970
3971        tp->napi[0].hw_status->status =
3972                (SD_STATUS_UPDATED |
3973                 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3974
3975        for (i = 0; i < 100; i++) {
3976                tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3977                                    MAC_STATUS_CFG_CHANGED));
3978                udelay(5);
3979                if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3980                                         MAC_STATUS_CFG_CHANGED |
3981                                         MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3982                        break;
3983        }
3984
3985        mac_status = tr32(MAC_STATUS);
3986        if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3987                current_link_up = 0;
3988                if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3989                    tp->serdes_counter == 0) {
3990                        tw32_f(MAC_MODE, (tp->mac_mode |
3991                                          MAC_MODE_SEND_CONFIGS));
3992                        udelay(1);
3993                        tw32_f(MAC_MODE, tp->mac_mode);
3994                }
3995        }
3996
3997        if (current_link_up == 1) {
3998                tp->link_config.active_speed = SPEED_1000;
3999                tp->link_config.active_duplex = DUPLEX_FULL;
4000                tw32(MAC_LED_CTRL, (tp->led_ctrl |
4001                                    LED_CTRL_LNKLED_OVERRIDE |
4002                                    LED_CTRL_1000MBPS_ON));
4003        } else {
4004                tp->link_config.active_speed = SPEED_INVALID;
4005                tp->link_config.active_duplex = DUPLEX_INVALID;
4006                tw32(MAC_LED_CTRL, (tp->led_ctrl |
4007                                    LED_CTRL_LNKLED_OVERRIDE |
4008                                    LED_CTRL_TRAFFIC_OVERRIDE));
4009        }
4010
4011        if (current_link_up != netif_carrier_ok(tp->dev)) {
4012                if (current_link_up)
4013                        netif_carrier_on(tp->dev);
4014                else
4015                        netif_carrier_off(tp->dev);
4016                tg3_link_report(tp);
4017        } else {
4018                u32 now_pause_cfg = tp->link_config.active_flowctrl;
4019                if (orig_pause_cfg != now_pause_cfg ||
4020                    orig_active_speed != tp->link_config.active_speed ||
4021                    orig_active_duplex != tp->link_config.active_duplex)
4022                        tg3_link_report(tp);
4023        }
4024
4025        return 0;
4026}
4027
4028static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4029{
4030        int current_link_up, err = 0;
4031        u32 bmsr, bmcr;
4032        u16 current_speed;
4033        u8 current_duplex;
4034        u32 local_adv, remote_adv;
4035
4036        tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4037        tw32_f(MAC_MODE, tp->mac_mode);
4038        udelay(40);
4039
4040        tw32(MAC_EVENT, 0);
4041
4042        tw32_f(MAC_STATUS,
4043             (MAC_STATUS_SYNC_CHANGED |
4044              MAC_STATUS_CFG_CHANGED |
4045              MAC_STATUS_MI_COMPLETION |
4046              MAC_STATUS_LNKSTATE_CHANGED));
4047        udelay(40);
4048
4049        if (force_reset)
4050                tg3_phy_reset(tp);
4051
4052        current_link_up = 0;
4053        current_speed = SPEED_INVALID;
4054        current_duplex = DUPLEX_INVALID;
4055
4056        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4057        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4058        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4059                if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4060                        bmsr |= BMSR_LSTATUS;
4061                else
4062                        bmsr &= ~BMSR_LSTATUS;
4063        }
4064
4065        err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4066
4067        if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4068            (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4069                /* do nothing, just check for link up at the end */
4070        } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4071                u32 adv, new_adv;
4072
4073                err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4074                new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4075                                  ADVERTISE_1000XPAUSE |
4076                                  ADVERTISE_1000XPSE_ASYM |
4077                                  ADVERTISE_SLCT);
4078
4079                new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4080
4081                if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4082                        new_adv |= ADVERTISE_1000XHALF;
4083                if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4084                        new_adv |= ADVERTISE_1000XFULL;
4085
4086                if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4087                        tg3_writephy(tp, MII_ADVERTISE, new_adv);
4088                        bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4089                        tg3_writephy(tp, MII_BMCR, bmcr);
4090
4091                        tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4092                        tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4093                        tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4094
4095                        return err;
4096                }
4097        } else {
4098                u32 new_bmcr;
4099
4100                bmcr &= ~BMCR_SPEED1000;
4101                new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4102
4103                if (tp->link_config.duplex == DUPLEX_FULL)
4104                        new_bmcr |= BMCR_FULLDPLX;
4105
4106                if (new_bmcr != bmcr) {
4107                        /* BMCR_SPEED1000 is a reserved bit that needs
4108                         * to be set on write.
4109                         */
4110                        new_bmcr |= BMCR_SPEED1000;
4111
4112                        /* Force a linkdown */
4113                        if (netif_carrier_ok(tp->dev)) {
4114                                u32 adv;
4115
4116                                err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4117                                adv &= ~(ADVERTISE_1000XFULL |
4118                                         ADVERTISE_1000XHALF |
4119                                         ADVERTISE_SLCT);
4120                                tg3_writephy(tp, MII_ADVERTISE, adv);
4121                                tg3_writephy(tp, MII_BMCR, bmcr |
4122                                                           BMCR_ANRESTART |
4123                                                           BMCR_ANENABLE);
4124                                udelay(10);
4125                                netif_carrier_off(tp->dev);
4126                        }
4127                        tg3_writephy(tp, MII_BMCR, new_bmcr);
4128                        bmcr = new_bmcr;
4129                        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4130                        err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4131                        if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4132                            ASIC_REV_5714) {
4133                                if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4134                                        bmsr |= BMSR_LSTATUS;
4135                                else
4136                                        bmsr &= ~BMSR_LSTATUS;
4137                        }
4138                        tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4139                }
4140        }
4141
4142        if (bmsr & BMSR_LSTATUS) {
4143                current_speed = SPEED_1000;
4144                current_link_up = 1;
4145                if (bmcr & BMCR_FULLDPLX)
4146                        current_duplex = DUPLEX_FULL;
4147                else
4148                        current_duplex = DUPLEX_HALF;
4149
4150                local_adv = 0;
4151                remote_adv = 0;
4152
4153                if (bmcr & BMCR_ANENABLE) {
4154                        u32 common;
4155
4156                        err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4157                        err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4158                        common = local_adv & remote_adv;
4159                        if (common & (ADVERTISE_1000XHALF |
4160                                      ADVERTISE_1000XFULL)) {
4161                                if (common & ADVERTISE_1000XFULL)
4162                                        current_duplex = DUPLEX_FULL;
4163                                else
4164                                        current_duplex = DUPLEX_HALF;
4165                        }
4166                        else
4167                                current_link_up = 0;
4168                }
4169        }
4170
4171        if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4172                tg3_setup_flow_control(tp, local_adv, remote_adv);
4173
4174        tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4175        if (tp->link_config.active_duplex == DUPLEX_HALF)
4176                tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4177
4178        tw32_f(MAC_MODE, tp->mac_mode);
4179        udelay(40);
4180
4181        tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4182
4183        tp->link_config.active_speed = current_speed;
4184        tp->link_config.active_duplex = current_duplex;
4185
4186        if (current_link_up != netif_carrier_ok(tp->dev)) {
4187                if (current_link_up)
4188                        netif_carrier_on(tp->dev);
4189                else {
4190                        netif_carrier_off(tp->dev);
4191                        tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4192                }
4193                tg3_link_report(tp);
4194        }
4195        return err;
4196}
4197
4198static void tg3_serdes_parallel_detect(struct tg3 *tp)
4199{
4200        if (tp->serdes_counter) {
4201                /* Give autoneg time to complete. */
4202                tp->serdes_counter--;
4203                return;
4204        }
4205        if (!netif_carrier_ok(tp->dev) &&
4206            (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4207                u32 bmcr;
4208
4209                tg3_readphy(tp, MII_BMCR, &bmcr);
4210                if (bmcr & BMCR_ANENABLE) {
4211                        u32 phy1, phy2;
4212
4213                        /* Select shadow register 0x1f */
4214                        tg3_writephy(tp, 0x1c, 0x7c00);
4215                        tg3_readphy(tp, 0x1c, &phy1);
4216
4217                        /* Select expansion interrupt status register */
4218                        tg3_writephy(tp, 0x17, 0x0f01);
4219                        tg3_readphy(tp, 0x15, &phy2);
4220                        tg3_readphy(tp, 0x15, &phy2);
4221
4222                        if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4223                                /* We have signal detect and not receiving
4224                                 * config code words, link is up by parallel
4225                                 * detection.
4226                                 */
4227
4228                                bmcr &= ~BMCR_ANENABLE;
4229                                bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4230                                tg3_writephy(tp, MII_BMCR, bmcr);
4231                                tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4232                        }
4233                }
4234        }
4235        else if (netif_carrier_ok(tp->dev) &&
4236                 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4237                 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4238                u32 phy2;
4239
4240                /* Select expansion interrupt status register */
4241                tg3_writephy(tp, 0x17, 0x0f01);
4242                tg3_readphy(tp, 0x15, &phy2);
4243                if (phy2 & 0x20) {
4244                        u32 bmcr;
4245
4246                        /* Config code words received, turn on autoneg. */
4247                        tg3_readphy(tp, MII_BMCR, &bmcr);
4248                        tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4249
4250                        tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4251
4252                }
4253        }
4254}
4255
4256static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4257{
4258        int err;
4259
4260        if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4261                err = tg3_setup_fiber_phy(tp, force_reset);
4262        } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4263                err = tg3_setup_fiber_mii_phy(tp, force_reset);
4264        } else {
4265                err = tg3_setup_copper_phy(tp, force_reset);
4266        }
4267
4268        if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4269                u32 val, scale;
4270
4271                val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4272                if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4273                        scale = 65;
4274                else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4275                        scale = 6;
4276                else
4277                        scale = 12;
4278
4279                val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4280                val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4281                tw32(GRC_MISC_CFG, val);
4282        }
4283
4284        if (tp->link_config.active_speed == SPEED_1000 &&
4285            tp->link_config.active_duplex == DUPLEX_HALF)
4286                tw32(MAC_TX_LENGTHS,
4287                     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4288                      (6 << TX_LENGTHS_IPG_SHIFT) |
4289                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4290        else
4291                tw32(MAC_TX_LENGTHS,
4292                     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4293                      (6 << TX_LENGTHS_IPG_SHIFT) |
4294                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4295
4296        if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4297                if (netif_carrier_ok(tp->dev)) {
4298                        tw32(HOSTCC_STAT_COAL_TICKS,
4299                             tp->coal.stats_block_coalesce_usecs);
4300                } else {
4301                        tw32(HOSTCC_STAT_COAL_TICKS, 0);
4302                }
4303        }
4304
4305        if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4306                u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4307                if (!netif_carrier_ok(tp->dev))
4308                        val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4309                              tp->pwrmgmt_thresh;
4310                else
4311                        val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4312                tw32(PCIE_PWR_MGMT_THRESH, val);
4313        }
4314
4315        return err;
4316}
4317
4318/* This is called whenever we suspect that the system chipset is re-
4319 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4320 * is bogus tx completions. We try to recover by setting the
4321 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4322 * in the workqueue.
4323 */
4324static void tg3_tx_recover(struct tg3 *tp)
4325{
4326        BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4327               tp->write32_tx_mbox == tg3_write_indirect_mbox);
4328
4329        printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4330               "mapped I/O cycles to the network device, attempting to "
4331               "recover. Please report the problem to the driver maintainer "
4332               "and include system chipset information.\n", tp->dev->name);
4333
4334        spin_lock(&tp->lock);
4335        tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4336        spin_unlock(&tp->lock);
4337}
4338
4339static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4340{
4341        smp_mb();
4342        return tnapi->tx_pending -
4343               ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4344}
4345
4346/* Tigon3 never reports partial packet sends.  So we do not
4347 * need special logic to handle SKBs that have not had all
4348 * of their frags sent yet, like SunGEM does.
4349 */
4350static void tg3_tx(struct tg3_napi *tnapi)
4351{
4352        struct tg3 *tp = tnapi->tp;
4353        u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4354        u32 sw_idx = tnapi->tx_cons;
4355        struct netdev_queue *txq;
4356        int index = tnapi - tp->napi;
4357
4358        if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4359                index--;
4360
4361        txq = netdev_get_tx_queue(tp->dev, index);
4362
4363        while (sw_idx != hw_idx) {
4364                struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4365                struct sk_buff *skb = ri->skb;
4366                int i, tx_bug = 0;
4367
4368                if (unlikely(skb == NULL)) {
4369                        tg3_tx_recover(tp);
4370                        return;
4371                }
4372
4373                pci_unmap_single(tp->pdev,
4374                                 pci_unmap_addr(ri, mapping),
4375                                 skb_headlen(skb),
4376                                 PCI_DMA_TODEVICE);
4377
4378                ri->skb = NULL;
4379
4380                sw_idx = NEXT_TX(sw_idx);
4381
4382                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4383                        ri = &tnapi->tx_buffers[sw_idx];
4384                        if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4385                                tx_bug = 1;
4386
4387                        pci_unmap_page(tp->pdev,
4388                                       pci_unmap_addr(ri, mapping),
4389                                       skb_shinfo(skb)->frags[i].size,
4390                                       PCI_DMA_TODEVICE);
4391                        sw_idx = NEXT_TX(sw_idx);
4392                }
4393
4394                dev_kfree_skb(skb);
4395
4396                if (unlikely(tx_bug)) {
4397                        tg3_tx_recover(tp);
4398                        return;
4399                }
4400        }
4401
4402        tnapi->tx_cons = sw_idx;
4403
4404        /* Need to make the tx_cons update visible to tg3_start_xmit()
4405         * before checking for netif_queue_stopped().  Without the
4406         * memory barrier, there is a small possibility that tg3_start_xmit()
4407         * will miss it and cause the queue to be stopped forever.
4408         */
4409        smp_mb();
4410
4411        if (unlikely(netif_tx_queue_stopped(txq) &&
4412                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4413                __netif_tx_lock(txq, smp_processor_id());
4414                if (netif_tx_queue_stopped(txq) &&
4415                    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4416                        netif_tx_wake_queue(txq);
4417                __netif_tx_unlock(txq);
4418        }
4419}
4420
4421static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4422{
4423        if (!ri->skb)
4424                return;
4425
4426        pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4427                         map_sz, PCI_DMA_FROMDEVICE);
4428        dev_kfree_skb_any(ri->skb);
4429        ri->skb = NULL;
4430}
4431
4432/* Returns size of skb allocated or < 0 on error.
4433 *
4434 * We only need to fill in the address because the other members
4435 * of the RX descriptor are invariant, see tg3_init_rings.
4436 *
4437 * Note the purposeful assymetry of cpu vs. chip accesses.  For
4438 * posting buffers we only dirty the first cache line of the RX
4439 * descriptor (containing the address).  Whereas for the RX status
4440 * buffers the cpu only reads the last cacheline of the RX descriptor
4441 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4442 */
4443static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4444                            u32 opaque_key, u32 dest_idx_unmasked)
4445{
4446        struct tg3_rx_buffer_desc *desc;
4447        struct ring_info *map, *src_map;
4448        struct sk_buff *skb;
4449        dma_addr_t mapping;
4450        int skb_size, dest_idx;
4451
4452        src_map = NULL;
4453        switch (opaque_key) {
4454        case RXD_OPAQUE_RING_STD:
4455                dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4456                desc = &tpr->rx_std[dest_idx];
4457                map = &tpr->rx_std_buffers[dest_idx];
4458                skb_size = tp->rx_pkt_map_sz;
4459                break;
4460
4461        case RXD_OPAQUE_RING_JUMBO:
4462                dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4463                desc = &tpr->rx_jmb[dest_idx].std;
4464                map = &tpr->rx_jmb_buffers[dest_idx];
4465                skb_size = TG3_RX_JMB_MAP_SZ;
4466                break;
4467
4468        default:
4469                return -EINVAL;
4470        }
4471
4472        /* Do not overwrite any of the map or rp information
4473         * until we are sure we can commit to a new buffer.
4474         *
4475         * Callers depend upon this behavior and assume that
4476         * we leave everything unchanged if we fail.
4477         */
4478        skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4479        if (skb == NULL)
4480                return -ENOMEM;
4481
4482        skb_reserve(skb, tp->rx_offset);
4483
4484        mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4485                                 PCI_DMA_FROMDEVICE);
4486        if (pci_dma_mapping_error(tp->pdev, mapping)) {
4487                dev_kfree_skb(skb);
4488                return -EIO;
4489        }
4490
4491        map->skb = skb;
4492        pci_unmap_addr_set(map, mapping, mapping);
4493
4494        desc->addr_hi = ((u64)mapping >> 32);
4495        desc->addr_lo = ((u64)mapping & 0xffffffff);
4496
4497        return skb_size;
4498}
4499
4500/* We only need to move over in the address because the other
4501 * members of the RX descriptor are invariant.  See notes above
4502 * tg3_alloc_rx_skb for full details.
4503 */
4504static void tg3_recycle_rx(struct tg3_napi *tnapi,
4505                           struct tg3_rx_prodring_set *dpr,
4506                           u32 opaque_key, int src_idx,
4507                           u32 dest_idx_unmasked)
4508{
4509        struct tg3 *tp = tnapi->tp;
4510        struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4511        struct ring_info *src_map, *dest_map;
4512        int dest_idx;
4513        struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4514
4515        switch (opaque_key) {
4516        case RXD_OPAQUE_RING_STD:
4517                dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4518                dest_desc = &dpr->rx_std[dest_idx];
4519                dest_map = &dpr->rx_std_buffers[dest_idx];
4520                src_desc = &spr->rx_std[src_idx];
4521                src_map = &spr->rx_std_buffers[src_idx];
4522                break;
4523
4524        case RXD_OPAQUE_RING_JUMBO:
4525                dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4526                dest_desc = &dpr->rx_jmb[dest_idx].std;
4527                dest_map = &dpr->rx_jmb_buffers[dest_idx];
4528                src_desc = &spr->rx_jmb[src_idx].std;
4529                src_map = &spr->rx_jmb_buffers[src_idx];
4530                break;
4531
4532        default:
4533                return;
4534        }
4535
4536        dest_map->skb = src_map->skb;
4537        pci_unmap_addr_set(dest_map, mapping,
4538                           pci_unmap_addr(src_map, mapping));
4539        dest_desc->addr_hi = src_desc->addr_hi;
4540        dest_desc->addr_lo = src_desc->addr_lo;
4541        src_map->skb = NULL;
4542}
4543
4544/* The RX ring scheme is composed of multiple rings which post fresh
4545 * buffers to the chip, and one special ring the chip uses to report
4546 * status back to the host.
4547 *
4548 * The special ring reports the status of received packets to the
4549 * host.  The chip does not write into the original descriptor the
4550 * RX buffer was obtained from.  The chip simply takes the original
4551 * descriptor as provided by the host, updates the status and length
4552 * field, then writes this into the next status ring entry.
4553 *
4554 * Each ring the host uses to post buffers to the chip is described
4555 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4556 * it is first placed into the on-chip ram.  When the packet's length
4557 * is known, it walks down the TG3_BDINFO entries to select the ring.
4558 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4559 * which is within the range of the new packet's length is chosen.
4560 *
4561 * The "separate ring for rx status" scheme may sound queer, but it makes
4562 * sense from a cache coherency perspective.  If only the host writes
4563 * to the buffer post rings, and only the chip writes to the rx status
4564 * rings, then cache lines never move beyond shared-modified state.
4565 * If both the host and chip were to write into the same ring, cache line
4566 * eviction could occur since both entities want it in an exclusive state.
4567 */
4568static int tg3_rx(struct tg3_napi *tnapi, int budget)
4569{
4570        struct tg3 *tp = tnapi->tp;
4571        u32 work_mask, rx_std_posted = 0;
4572        u32 std_prod_idx, jmb_prod_idx;
4573        u32 sw_idx = tnapi->rx_rcb_ptr;
4574        u16 hw_idx;
4575        int received;
4576        struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4577
4578        hw_idx = *(tnapi->rx_rcb_prod_idx);
4579        /*
4580         * We need to order the read of hw_idx and the read of
4581         * the opaque cookie.
4582         */
4583        rmb();
4584        work_mask = 0;
4585        received = 0;
4586        std_prod_idx = tpr->rx_std_prod_idx;
4587        jmb_prod_idx = tpr->rx_jmb_prod_idx;
4588        while (sw_idx != hw_idx && budget > 0) {
4589                struct ring_info *ri;
4590                struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4591                unsigned int len;
4592                struct sk_buff *skb;
4593                dma_addr_t dma_addr;
4594                u32 opaque_key, desc_idx, *post_ptr;
4595
4596                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4597                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4598                if (opaque_key == RXD_OPAQUE_RING_STD) {
4599                        ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4600                        dma_addr = pci_unmap_addr(ri, mapping);
4601                        skb = ri->skb;
4602                        post_ptr = &std_prod_idx;
4603                        rx_std_posted++;
4604                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4605                        ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4606                        dma_addr = pci_unmap_addr(ri, mapping);
4607                        skb = ri->skb;
4608                        post_ptr = &jmb_prod_idx;
4609                } else
4610                        goto next_pkt_nopost;
4611
4612                work_mask |= opaque_key;
4613
4614                if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4615                    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4616                drop_it:
4617                        tg3_recycle_rx(tnapi, tpr, opaque_key,
4618                                       desc_idx, *post_ptr);
4619                drop_it_no_recycle:
4620                        /* Other statistics kept track of by card. */
4621                        tp->net_stats.rx_dropped++;
4622                        goto next_pkt;
4623                }
4624
4625                len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4626                      ETH_FCS_LEN;
4627
4628                if (len > RX_COPY_THRESHOLD &&
4629                    tp->rx_offset == NET_IP_ALIGN) {
4630                    /* rx_offset will likely not equal NET_IP_ALIGN
4631                     * if this is a 5701 card running in PCI-X mode
4632                     * [see tg3_get_invariants()]
4633                     */
4634                        int skb_size;
4635
4636                        skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4637                                                    *post_ptr);
4638                        if (skb_size < 0)
4639                                goto drop_it;
4640
4641                        ri->skb = NULL;
4642
4643                        pci_unmap_single(tp->pdev, dma_addr, skb_size,
4644                                         PCI_DMA_FROMDEVICE);
4645
4646                        skb_put(skb, len);
4647                } else {
4648                        struct sk_buff *copy_skb;
4649
4650                        tg3_recycle_rx(tnapi, tpr, opaque_key,
4651                                       desc_idx, *post_ptr);
4652
4653                        copy_skb = netdev_alloc_skb(tp->dev,
4654                                                    len + TG3_RAW_IP_ALIGN);
4655                        if (copy_skb == NULL)
4656                                goto drop_it_no_recycle;
4657
4658                        skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4659                        skb_put(copy_skb, len);
4660                        pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4661                        skb_copy_from_linear_data(skb, copy_skb->data, len);
4662                        pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4663
4664                        /* We'll reuse the original ring buffer. */
4665                        skb = copy_skb;
4666                }
4667
4668                if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4669                    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4670                    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4671                      >> RXD_TCPCSUM_SHIFT) == 0xffff))
4672                        skb->ip_summed = CHECKSUM_UNNECESSARY;
4673                else
4674                        skb->ip_summed = CHECKSUM_NONE;
4675
4676                skb->protocol = eth_type_trans(skb, tp->dev);
4677
4678                if (len > (tp->dev->mtu + ETH_HLEN) &&
4679                    skb->protocol != htons(ETH_P_8021Q)) {
4680                        dev_kfree_skb(skb);
4681                        goto next_pkt;
4682                }
4683
4684#if TG3_VLAN_TAG_USED
4685                if (tp->vlgrp != NULL &&
4686                    desc->type_flags & RXD_FLAG_VLAN) {
4687                        vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4688                                         desc->err_vlan & RXD_VLAN_MASK, skb);
4689                } else
4690#endif
4691                        napi_gro_receive(&tnapi->napi, skb);
4692
4693                received++;
4694                budget--;
4695
4696next_pkt:
4697                (*post_ptr)++;
4698
4699                if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4700                        tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4701                        tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4702                                     tpr->rx_std_prod_idx);
4703                        work_mask &= ~RXD_OPAQUE_RING_STD;
4704                        rx_std_posted = 0;
4705                }
4706next_pkt_nopost:
4707                sw_idx++;
4708                sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4709
4710                /* Refresh hw_idx to see if there is new work */
4711                if (sw_idx == hw_idx) {
4712                        hw_idx = *(tnapi->rx_rcb_prod_idx);
4713                        rmb();
4714                }
4715        }
4716
4717        /* ACK the status ring. */
4718        tnapi->rx_rcb_ptr = sw_idx;
4719        tw32_rx_mbox(tnapi->consmbox, sw_idx);
4720
4721        /* Refill RX ring(s). */
4722        if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4723                if (work_mask & RXD_OPAQUE_RING_STD) {
4724                        tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4725                        tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4726                                     tpr->rx_std_prod_idx);
4727                }
4728                if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4729                        tpr->rx_jmb_prod_idx = jmb_prod_idx %
4730                                               TG3_RX_JUMBO_RING_SIZE;
4731                        tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4732                                     tpr->rx_jmb_prod_idx);
4733                }
4734                mmiowb();
4735        } else if (work_mask) {
4736                /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4737                 * updated before the producer indices can be updated.
4738                 */
4739                smp_wmb();
4740
4741                tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4742                tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4743
4744                napi_schedule(&tp->napi[1].napi);
4745        }
4746
4747        return received;
4748}
4749
4750static void tg3_poll_link(struct tg3 *tp)
4751{
4752        /* handle link change and other phy events */
4753        if (!(tp->tg3_flags &
4754              (TG3_FLAG_USE_LINKCHG_REG |
4755               TG3_FLAG_POLL_SERDES))) {
4756                struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4757
4758                if (sblk->status & SD_STATUS_LINK_CHG) {
4759                        sblk->status = SD_STATUS_UPDATED |
4760                                       (sblk->status & ~SD_STATUS_LINK_CHG);
4761                        spin_lock(&tp->lock);
4762                        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4763                                tw32_f(MAC_STATUS,
4764                                     (MAC_STATUS_SYNC_CHANGED |
4765                                      MAC_STATUS_CFG_CHANGED |
4766                                      MAC_STATUS_MI_COMPLETION |
4767                                      MAC_STATUS_LNKSTATE_CHANGED));
4768                                udelay(40);
4769                        } else
4770                                tg3_setup_phy(tp, 0);
4771                        spin_unlock(&tp->lock);
4772                }
4773        }
4774}
4775
4776static void tg3_rx_prodring_xfer(struct tg3 *tp,
4777                                 struct tg3_rx_prodring_set *dpr,
4778                                 struct tg3_rx_prodring_set *spr)
4779{
4780        u32 si, di, cpycnt, src_prod_idx;
4781        int i;
4782
4783        while (1) {
4784                src_prod_idx = spr->rx_std_prod_idx;
4785
4786                /* Make sure updates to the rx_std_buffers[] entries and the
4787                 * standard producer index are seen in the correct order.
4788                 */
4789                smp_rmb();
4790
4791                if (spr->rx_std_cons_idx == src_prod_idx)
4792                        break;
4793
4794                if (spr->rx_std_cons_idx < src_prod_idx)
4795                        cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4796                else
4797                        cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4798
4799                cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4800
4801                si = spr->rx_std_cons_idx;
4802                di = dpr->rx_std_prod_idx;
4803
4804                memcpy(&dpr->rx_std_buffers[di],
4805                       &spr->rx_std_buffers[si],
4806                       cpycnt * sizeof(struct ring_info));
4807
4808                for (i = 0; i < cpycnt; i++, di++, si++) {
4809                        struct tg3_rx_buffer_desc *sbd, *dbd;
4810                        sbd = &spr->rx_std[si];
4811                        dbd = &dpr->rx_std[di];
4812                        dbd->addr_hi = sbd->addr_hi;
4813                        dbd->addr_lo = sbd->addr_lo;
4814                }
4815
4816                spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4817                                       TG3_RX_RING_SIZE;
4818                dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4819                                       TG3_RX_RING_SIZE;
4820        }
4821
4822        while (1) {
4823                src_prod_idx = spr->rx_jmb_prod_idx;
4824
4825                /* Make sure updates to the rx_jmb_buffers[] entries and
4826                 * the jumbo producer index are seen in the correct order.
4827                 */
4828                smp_rmb();
4829
4830                if (spr->rx_jmb_cons_idx == src_prod_idx)
4831                        break;
4832
4833                if (spr->rx_jmb_cons_idx < src_prod_idx)
4834                        cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4835                else
4836                        cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4837
4838                cpycnt = min(cpycnt,
4839                             TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4840
4841                si = spr->rx_jmb_cons_idx;
4842                di = dpr->rx_jmb_prod_idx;
4843
4844                memcpy(&dpr->rx_jmb_buffers[di],
4845                       &spr->rx_jmb_buffers[si],
4846                       cpycnt * sizeof(struct ring_info));
4847
4848                for (i = 0; i < cpycnt; i++, di++, si++) {
4849                        struct tg3_rx_buffer_desc *sbd, *dbd;
4850                        sbd = &spr->rx_jmb[si].std;
4851                        dbd = &dpr->rx_jmb[di].std;
4852                        dbd->addr_hi = sbd->addr_hi;
4853                        dbd->addr_lo = sbd->addr_lo;
4854                }
4855
4856                spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4857                                       TG3_RX_JUMBO_RING_SIZE;
4858                dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4859                                       TG3_RX_JUMBO_RING_SIZE;
4860        }
4861}
4862
4863static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4864{
4865        struct tg3 *tp = tnapi->tp;
4866
4867        /* run TX completion thread */
4868        if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4869                tg3_tx(tnapi);
4870                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4871                        return work_done;
4872        }
4873
4874        /* run RX thread, within the bounds set by NAPI.
4875         * All RX "locking" is done by ensuring outside
4876         * code synchronizes with tg3->napi.poll()
4877         */
4878        if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4879                work_done += tg3_rx(tnapi, budget - work_done);
4880
4881        if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4882                int i;
4883                u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4884                u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4885
4886                for (i = 2; i < tp->irq_cnt; i++)
4887                        tg3_rx_prodring_xfer(tp, tnapi->prodring,
4888                                             tp->napi[i].prodring);
4889
4890                wmb();
4891
4892                if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4893                        u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4894                        tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4895                }
4896
4897                if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4898                        u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4899                        tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4900                }
4901
4902                mmiowb();
4903        }
4904
4905        return work_done;
4906}
4907
4908static int tg3_poll_msix(struct napi_struct *napi, int budget)
4909{
4910        struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4911        struct tg3 *tp = tnapi->tp;
4912        int work_done = 0;
4913        struct tg3_hw_status *sblk = tnapi->hw_status;
4914
4915        while (1) {
4916                work_done = tg3_poll_work(tnapi, work_done, budget);
4917
4918                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4919                        goto tx_recovery;
4920
4921                if (unlikely(work_done >= budget))
4922                        break;
4923
4924                /* tp->last_tag is used in tg3_restart_ints() below
4925                 * to tell the hw how much work has been processed,
4926                 * so we must read it before checking for more work.
4927                 */
4928                tnapi->last_tag = sblk->status_tag;
4929                tnapi->last_irq_tag = tnapi->last_tag;
4930                rmb();
4931
4932                /* check for RX/TX work to do */
4933                if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4934                    *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4935                        napi_complete(napi);
4936                        /* Reenable interrupts. */
4937                        tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4938                        mmiowb();
4939                        break;
4940                }
4941        }
4942
4943        return work_done;
4944
4945tx_recovery:
4946        /* work_done is guaranteed to be less than budget. */
4947        napi_complete(napi);
4948        schedule_work(&tp->reset_task);
4949        return work_done;
4950}
4951
4952static int tg3_poll(struct napi_struct *napi, int budget)
4953{
4954        struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4955        struct tg3 *tp = tnapi->tp;
4956        int work_done = 0;
4957        struct tg3_hw_status *sblk = tnapi->hw_status;
4958
4959        while (1) {
4960                tg3_poll_link(tp);
4961
4962                work_done = tg3_poll_work(tnapi, work_done, budget);
4963
4964                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4965                        goto tx_recovery;
4966
4967                if (unlikely(work_done >= budget))
4968                        break;
4969
4970                if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4971                        /* tp->last_tag is used in tg3_int_reenable() below
4972                         * to tell the hw how much work has been processed,
4973                         * so we must read it before checking for more work.
4974                         */
4975                        tnapi->last_tag = sblk->status_tag;
4976                        tnapi->last_irq_tag = tnapi->last_tag;
4977                        rmb();
4978                } else
4979                        sblk->status &= ~SD_STATUS_UPDATED;
4980
4981                if (likely(!tg3_has_work(tnapi))) {
4982                        napi_complete(napi);
4983                        tg3_int_reenable(tnapi);
4984                        break;
4985                }
4986        }
4987
4988        return work_done;
4989
4990tx_recovery:
4991        /* work_done is guaranteed to be less than budget. */
4992        napi_complete(napi);
4993        schedule_work(&tp->reset_task);
4994        return work_done;
4995}
4996
4997static void tg3_irq_quiesce(struct tg3 *tp)
4998{
4999        int i;
5000
5001        BUG_ON(tp->irq_sync);
5002
5003        tp->irq_sync = 1;
5004        smp_mb();
5005
5006        for (i = 0; i < tp->irq_cnt; i++)
5007                synchronize_irq(tp->napi[i].irq_vec);
5008}
5009
5010static inline int tg3_irq_sync(struct tg3 *tp)
5011{
5012        return tp->irq_sync;
5013}
5014
5015/* Fully shutdown all tg3 driver activity elsewhere in the system.
5016 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5017 * with as well.  Most of the time, this is not necessary except when
5018 * shutting down the device.
5019 */
5020static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5021{
5022        spin_lock_bh(&tp->lock);
5023        if (irq_sync)
5024                tg3_irq_quiesce(tp);
5025}
5026
5027static inline void tg3_full_unlock(struct tg3 *tp)
5028{
5029        spin_unlock_bh(&tp->lock);
5030}
5031
5032/* One-shot MSI handler - Chip automatically disables interrupt
5033 * after sending MSI so driver doesn't have to do it.
5034 */
5035static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5036{
5037        struct tg3_napi *tnapi = dev_id;
5038        struct tg3 *tp = tnapi->tp;
5039
5040        prefetch(tnapi->hw_status);
5041        if (tnapi->rx_rcb)
5042                prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5043
5044        if (likely(!tg3_irq_sync(tp)))
5045                napi_schedule(&tnapi->napi);
5046
5047        return IRQ_HANDLED;
5048}
5049
5050/* MSI ISR - No need to check for interrupt sharing and no need to
5051 * flush status block and interrupt mailbox. PCI ordering rules
5052 * guarantee that MSI will arrive after the status block.
5053 */
5054static irqreturn_t tg3_msi(int irq, void *dev_id)
5055{
5056        struct tg3_napi *tnapi = dev_id;
5057        struct tg3 *tp = tnapi->tp;
5058
5059        prefetch(tnapi->hw_status);
5060        if (tnapi->rx_rcb)
5061                prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5062        /*
5063         * Writing any value to intr-mbox-0 clears PCI INTA# and
5064         * chip-internal interrupt pending events.
5065         * Writing non-zero to intr-mbox-0 additional tells the
5066         * NIC to stop sending us irqs, engaging "in-intr-handler"
5067         * event coalescing.
5068         */
5069        tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5070        if (likely(!tg3_irq_sync(tp)))
5071                napi_schedule(&tnapi->napi);
5072
5073        return IRQ_RETVAL(1);
5074}
5075
5076static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5077{
5078        struct tg3_napi *tnapi = dev_id;
5079        struct tg3 *tp = tnapi->tp;
5080        struct tg3_hw_status *sblk = tnapi->hw_status;
5081        unsigned int handled = 1;
5082
5083        /* In INTx mode, it is possible for the interrupt to arrive at
5084         * the CPU before the status block posted prior to the interrupt.
5085         * Reading the PCI State register will confirm whether the
5086         * interrupt is ours and will flush the status block.
5087         */
5088        if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5089                if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5090                    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5091                        handled = 0;
5092                        goto out;
5093                }
5094        }
5095
5096        /*
5097         * Writing any value to intr-mbox-0 clears PCI INTA# and
5098         * chip-internal interrupt pending events.
5099         * Writing non-zero to intr-mbox-0 additional tells the
5100         * NIC to stop sending us irqs, engaging "in-intr-handler"
5101         * event coalescing.
5102         *
5103         * Flush the mailbox to de-assert the IRQ immediately to prevent
5104         * spurious interrupts.  The flush impacts performance but
5105         * excessive spurious interrupts can be worse in some cases.
5106         */
5107        tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5108        if (tg3_irq_sync(tp))
5109                goto out;
5110        sblk->status &= ~SD_STATUS_UPDATED;
5111        if (likely(tg3_has_work(tnapi))) {
5112                prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5113                napi_schedule(&tnapi->napi);
5114        } else {
5115                /* No work, shared interrupt perhaps?  re-enable
5116                 * interrupts, and flush that PCI write
5117                 */
5118                tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5119                               0x00000000);
5120        }
5121out:
5122        return IRQ_RETVAL(handled);
5123}
5124
5125static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5126{
5127        struct tg3_napi *tnapi = dev_id;
5128        struct tg3 *tp = tnapi->tp;
5129        struct tg3_hw_status *sblk = tnapi->hw_status;
5130        unsigned int handled = 1;
5131
5132        /* In INTx mode, it is possible for the interrupt to arrive at
5133         * the CPU before the status block posted prior to the interrupt.
5134         * Reading the PCI State register will confirm whether the
5135         * interrupt is ours and will flush the status block.
5136         */
5137        if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5138                if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5139                    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5140                        handled = 0;
5141                        goto out;
5142                }
5143        }
5144
5145        /*
5146         * writing any value to intr-mbox-0 clears PCI INTA# and
5147         * chip-internal interrupt pending events.
5148         * writing non-zero to intr-mbox-0 additional tells the
5149         * NIC to stop sending us irqs, engaging "in-intr-handler"
5150         * event coalescing.
5151         *
5152         * Flush the mailbox to de-assert the IRQ immediately to prevent
5153         * spurious interrupts.  The flush impacts performance but
5154         * excessive spurious interrupts can be worse in some cases.
5155         */
5156        tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5157
5158        /*
5159         * In a shared interrupt configuration, sometimes other devices'
5160         * interrupts will scream.  We record the current status tag here
5161         * so that the above check can report that the screaming interrupts
5162         * are unhandled.  Eventually they will be silenced.
5163         */
5164        tnapi->last_irq_tag = sblk->status_tag;
5165
5166        if (tg3_irq_sync(tp))
5167                goto out;
5168
5169        prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5170
5171        napi_schedule(&tnapi->napi);
5172
5173out:
5174        return IRQ_RETVAL(handled);
5175}
5176
5177/* ISR for interrupt test */
5178static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5179{
5180        struct tg3_napi *tnapi = dev_id;
5181        struct tg3 *tp = tnapi->tp;
5182        struct tg3_hw_status *sblk = tnapi->hw_status;
5183
5184        if ((sblk->status & SD_STATUS_UPDATED) ||
5185            !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5186                tg3_disable_ints(tp);
5187                return IRQ_RETVAL(1);
5188        }
5189        return IRQ_RETVAL(0);
5190}
5191
5192static int tg3_init_hw(struct tg3 *, int);
5193static int