linux/drivers/net/ethernet/neterion/s2io.c
<<
>>
Prefs
   1/************************************************************************
   2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
   3 * Copyright(c) 2002-2010 Exar Corp.
   4 *
   5 * This software may be used and distributed according to the terms of
   6 * the GNU General Public License (GPL), incorporated herein by reference.
   7 * Drivers based on or derived from this code fall under the GPL and must
   8 * retain the authorship, copyright and license notice.  This file is not
   9 * a complete program and may only be used when the entire operating
  10 * system is licensed under the GPL.
  11 * See the file COPYING in this distribution for more information.
  12 *
  13 * Credits:
  14 * Jeff Garzik          : For pointing out the improper error condition
  15 *                        check in the s2io_xmit routine and also some
  16 *                        issues in the Tx watch dog function. Also for
  17 *                        patiently answering all those innumerable
  18 *                        questions regaring the 2.6 porting issues.
  19 * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
  20 *                        macros available only in 2.6 Kernel.
  21 * Francois Romieu      : For pointing out all code part that were
  22 *                        deprecated and also styling related comments.
  23 * Grant Grundler       : For helping me get rid of some Architecture
  24 *                        dependent code.
  25 * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
  26 *
  27 * The module loadable parameters that are supported by the driver and a brief
  28 * explanation of all the variables.
  29 *
  30 * rx_ring_num : This can be used to program the number of receive rings used
  31 * in the driver.
  32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
  33 *     This is also an array of size 8.
  34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  35 *              values are 1, 2.
  36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
  38 * Tx descriptors that can be associated with each corresponding FIFO.
  39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
  40 *     2(MSI_X). Default value is '2(MSI_X)'
  41 * lro_max_pkts: This parameter defines maximum number of packets can be
  42 *     aggregated as a single large packet
  43 * napi: This parameter used to enable/disable NAPI (polling Rx)
  44 *     Possible values '1' for enable and '0' for disable. Default is '1'
  45 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
  46 *      Possible values '1' for enable and '0' for disable. Default is '0'
  47 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
  48 *                 Possible values '1' for enable , '0' for disable.
  49 *                 Default is '2' - which means disable in promisc mode
  50 *                 and enable in non-promiscuous mode.
  51 * multiq: This parameter used to enable/disable MULTIQUEUE support.
  52 *      Possible values '1' for enable and '0' for disable. Default is '0'
  53 ************************************************************************/
  54
  55#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  56
  57#include <linux/module.h>
  58#include <linux/types.h>
  59#include <linux/errno.h>
  60#include <linux/ioport.h>
  61#include <linux/pci.h>
  62#include <linux/dma-mapping.h>
  63#include <linux/kernel.h>
  64#include <linux/netdevice.h>
  65#include <linux/etherdevice.h>
  66#include <linux/mdio.h>
  67#include <linux/skbuff.h>
  68#include <linux/init.h>
  69#include <linux/delay.h>
  70#include <linux/stddef.h>
  71#include <linux/ioctl.h>
  72#include <linux/timex.h>
  73#include <linux/ethtool.h>
  74#include <linux/workqueue.h>
  75#include <linux/if_vlan.h>
  76#include <linux/ip.h>
  77#include <linux/tcp.h>
  78#include <linux/uaccess.h>
  79#include <linux/io.h>
  80#include <linux/slab.h>
  81#include <linux/prefetch.h>
  82#include <net/tcp.h>
  83#include <net/checksum.h>
  84
  85#include <asm/div64.h>
  86#include <asm/irq.h>
  87
  88/* local include */
  89#include "s2io.h"
  90#include "s2io-regs.h"
  91
  92#define DRV_VERSION "2.0.26.28"
  93
  94/* S2io Driver name & version. */
  95static const char s2io_driver_name[] = "Neterion";
  96static const char s2io_driver_version[] = DRV_VERSION;
  97
  98static const int rxd_size[2] = {32, 48};
  99static const int rxd_count[2] = {127, 85};
 100
 101static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 102{
 103        int ret;
 104
 105        ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
 106               (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
 107
 108        return ret;
 109}
 110
 111/*
 112 * Cards with following subsystem_id have a link state indication
 113 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
 114 * macro below identifies these cards given the subsystem_id.
 115 */
 116#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid)              \
 117        (dev_type == XFRAME_I_DEVICE) ?                                 \
 118        ((((subid >= 0x600B) && (subid <= 0x600D)) ||                   \
 119          ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 120
 121#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 122                                      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
 123
 124static inline int is_s2io_card_up(const struct s2io_nic *sp)
 125{
 126        return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
 127}
 128
 129/* Ethtool related variables and Macros. */
 130static const char s2io_gstrings[][ETH_GSTRING_LEN] = {
 131        "Register test\t(offline)",
 132        "Eeprom test\t(offline)",
 133        "Link test\t(online)",
 134        "RLDRAM test\t(offline)",
 135        "BIST Test\t(offline)"
 136};
 137
 138static const char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
 139        {"tmac_frms"},
 140        {"tmac_data_octets"},
 141        {"tmac_drop_frms"},
 142        {"tmac_mcst_frms"},
 143        {"tmac_bcst_frms"},
 144        {"tmac_pause_ctrl_frms"},
 145        {"tmac_ttl_octets"},
 146        {"tmac_ucst_frms"},
 147        {"tmac_nucst_frms"},
 148        {"tmac_any_err_frms"},
 149        {"tmac_ttl_less_fb_octets"},
 150        {"tmac_vld_ip_octets"},
 151        {"tmac_vld_ip"},
 152        {"tmac_drop_ip"},
 153        {"tmac_icmp"},
 154        {"tmac_rst_tcp"},
 155        {"tmac_tcp"},
 156        {"tmac_udp"},
 157        {"rmac_vld_frms"},
 158        {"rmac_data_octets"},
 159        {"rmac_fcs_err_frms"},
 160        {"rmac_drop_frms"},
 161        {"rmac_vld_mcst_frms"},
 162        {"rmac_vld_bcst_frms"},
 163        {"rmac_in_rng_len_err_frms"},
 164        {"rmac_out_rng_len_err_frms"},
 165        {"rmac_long_frms"},
 166        {"rmac_pause_ctrl_frms"},
 167        {"rmac_unsup_ctrl_frms"},
 168        {"rmac_ttl_octets"},
 169        {"rmac_accepted_ucst_frms"},
 170        {"rmac_accepted_nucst_frms"},
 171        {"rmac_discarded_frms"},
 172        {"rmac_drop_events"},
 173        {"rmac_ttl_less_fb_octets"},
 174        {"rmac_ttl_frms"},
 175        {"rmac_usized_frms"},
 176        {"rmac_osized_frms"},
 177        {"rmac_frag_frms"},
 178        {"rmac_jabber_frms"},
 179        {"rmac_ttl_64_frms"},
 180        {"rmac_ttl_65_127_frms"},
 181        {"rmac_ttl_128_255_frms"},
 182        {"rmac_ttl_256_511_frms"},
 183        {"rmac_ttl_512_1023_frms"},
 184        {"rmac_ttl_1024_1518_frms"},
 185        {"rmac_ip"},
 186        {"rmac_ip_octets"},
 187        {"rmac_hdr_err_ip"},
 188        {"rmac_drop_ip"},
 189        {"rmac_icmp"},
 190        {"rmac_tcp"},
 191        {"rmac_udp"},
 192        {"rmac_err_drp_udp"},
 193        {"rmac_xgmii_err_sym"},
 194        {"rmac_frms_q0"},
 195        {"rmac_frms_q1"},
 196        {"rmac_frms_q2"},
 197        {"rmac_frms_q3"},
 198        {"rmac_frms_q4"},
 199        {"rmac_frms_q5"},
 200        {"rmac_frms_q6"},
 201        {"rmac_frms_q7"},
 202        {"rmac_full_q0"},
 203        {"rmac_full_q1"},
 204        {"rmac_full_q2"},
 205        {"rmac_full_q3"},
 206        {"rmac_full_q4"},
 207        {"rmac_full_q5"},
 208        {"rmac_full_q6"},
 209        {"rmac_full_q7"},
 210        {"rmac_pause_cnt"},
 211        {"rmac_xgmii_data_err_cnt"},
 212        {"rmac_xgmii_ctrl_err_cnt"},
 213        {"rmac_accepted_ip"},
 214        {"rmac_err_tcp"},
 215        {"rd_req_cnt"},
 216        {"new_rd_req_cnt"},
 217        {"new_rd_req_rtry_cnt"},
 218        {"rd_rtry_cnt"},
 219        {"wr_rtry_rd_ack_cnt"},
 220        {"wr_req_cnt"},
 221        {"new_wr_req_cnt"},
 222        {"new_wr_req_rtry_cnt"},
 223        {"wr_rtry_cnt"},
 224        {"wr_disc_cnt"},
 225        {"rd_rtry_wr_ack_cnt"},
 226        {"txp_wr_cnt"},
 227        {"txd_rd_cnt"},
 228        {"txd_wr_cnt"},
 229        {"rxd_rd_cnt"},
 230        {"rxd_wr_cnt"},
 231        {"txf_rd_cnt"},
 232        {"rxf_wr_cnt"}
 233};
 234
 235static const char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
 236        {"rmac_ttl_1519_4095_frms"},
 237        {"rmac_ttl_4096_8191_frms"},
 238        {"rmac_ttl_8192_max_frms"},
 239        {"rmac_ttl_gt_max_frms"},
 240        {"rmac_osized_alt_frms"},
 241        {"rmac_jabber_alt_frms"},
 242        {"rmac_gt_max_alt_frms"},
 243        {"rmac_vlan_frms"},
 244        {"rmac_len_discard"},
 245        {"rmac_fcs_discard"},
 246        {"rmac_pf_discard"},
 247        {"rmac_da_discard"},
 248        {"rmac_red_discard"},
 249        {"rmac_rts_discard"},
 250        {"rmac_ingm_full_discard"},
 251        {"link_fault_cnt"}
 252};
 253
 254static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 255        {"\n DRIVER STATISTICS"},
 256        {"single_bit_ecc_errs"},
 257        {"double_bit_ecc_errs"},
 258        {"parity_err_cnt"},
 259        {"serious_err_cnt"},
 260        {"soft_reset_cnt"},
 261        {"fifo_full_cnt"},
 262        {"ring_0_full_cnt"},
 263        {"ring_1_full_cnt"},
 264        {"ring_2_full_cnt"},
 265        {"ring_3_full_cnt"},
 266        {"ring_4_full_cnt"},
 267        {"ring_5_full_cnt"},
 268        {"ring_6_full_cnt"},
 269        {"ring_7_full_cnt"},
 270        {"alarm_transceiver_temp_high"},
 271        {"alarm_transceiver_temp_low"},
 272        {"alarm_laser_bias_current_high"},
 273        {"alarm_laser_bias_current_low"},
 274        {"alarm_laser_output_power_high"},
 275        {"alarm_laser_output_power_low"},
 276        {"warn_transceiver_temp_high"},
 277        {"warn_transceiver_temp_low"},
 278        {"warn_laser_bias_current_high"},
 279        {"warn_laser_bias_current_low"},
 280        {"warn_laser_output_power_high"},
 281        {"warn_laser_output_power_low"},
 282        {"lro_aggregated_pkts"},
 283        {"lro_flush_both_count"},
 284        {"lro_out_of_sequence_pkts"},
 285        {"lro_flush_due_to_max_pkts"},
 286        {"lro_avg_aggr_pkts"},
 287        {"mem_alloc_fail_cnt"},
 288        {"pci_map_fail_cnt"},
 289        {"watchdog_timer_cnt"},
 290        {"mem_allocated"},
 291        {"mem_freed"},
 292        {"link_up_cnt"},
 293        {"link_down_cnt"},
 294        {"link_up_time"},
 295        {"link_down_time"},
 296        {"tx_tcode_buf_abort_cnt"},
 297        {"tx_tcode_desc_abort_cnt"},
 298        {"tx_tcode_parity_err_cnt"},
 299        {"tx_tcode_link_loss_cnt"},
 300        {"tx_tcode_list_proc_err_cnt"},
 301        {"rx_tcode_parity_err_cnt"},
 302        {"rx_tcode_abort_cnt"},
 303        {"rx_tcode_parity_abort_cnt"},
 304        {"rx_tcode_rda_fail_cnt"},
 305        {"rx_tcode_unkn_prot_cnt"},
 306        {"rx_tcode_fcs_err_cnt"},
 307        {"rx_tcode_buf_size_err_cnt"},
 308        {"rx_tcode_rxd_corrupt_cnt"},
 309        {"rx_tcode_unkn_err_cnt"},
 310        {"tda_err_cnt"},
 311        {"pfc_err_cnt"},
 312        {"pcc_err_cnt"},
 313        {"tti_err_cnt"},
 314        {"tpa_err_cnt"},
 315        {"sm_err_cnt"},
 316        {"lso_err_cnt"},
 317        {"mac_tmac_err_cnt"},
 318        {"mac_rmac_err_cnt"},
 319        {"xgxs_txgxs_err_cnt"},
 320        {"xgxs_rxgxs_err_cnt"},
 321        {"rc_err_cnt"},
 322        {"prc_pcix_err_cnt"},
 323        {"rpa_err_cnt"},
 324        {"rda_err_cnt"},
 325        {"rti_err_cnt"},
 326        {"mc_err_cnt"}
 327};
 328
 329#define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
 330#define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
 331#define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
 332
 333#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN)
 334#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN)
 335
 336#define XFRAME_I_STAT_STRINGS_LEN (XFRAME_I_STAT_LEN * ETH_GSTRING_LEN)
 337#define XFRAME_II_STAT_STRINGS_LEN (XFRAME_II_STAT_LEN * ETH_GSTRING_LEN)
 338
 339#define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
 340#define S2IO_STRINGS_LEN        (S2IO_TEST_LEN * ETH_GSTRING_LEN)
 341
 342#define S2IO_TIMER_CONF(timer, handle, arg, exp)        \
 343        init_timer(&timer);                             \
 344        timer.function = handle;                        \
 345        timer.data = (unsigned long)arg;                \
 346        mod_timer(&timer, (jiffies + exp))              \
 347
 348/* copy mac addr to def_mac_addr array */
 349static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
 350{
 351        sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
 352        sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
 353        sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
 354        sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
 355        sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
 356        sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
 357}
 358
 359/*
 360 * Constants to be programmed into the Xena's registers, to configure
 361 * the XAUI.
 362 */
 363
 364#define END_SIGN        0x0
 365static const u64 herc_act_dtx_cfg[] = {
 366        /* Set address */
 367        0x8000051536750000ULL, 0x80000515367500E0ULL,
 368        /* Write data */
 369        0x8000051536750004ULL, 0x80000515367500E4ULL,
 370        /* Set address */
 371        0x80010515003F0000ULL, 0x80010515003F00E0ULL,
 372        /* Write data */
 373        0x80010515003F0004ULL, 0x80010515003F00E4ULL,
 374        /* Set address */
 375        0x801205150D440000ULL, 0x801205150D4400E0ULL,
 376        /* Write data */
 377        0x801205150D440004ULL, 0x801205150D4400E4ULL,
 378        /* Set address */
 379        0x80020515F2100000ULL, 0x80020515F21000E0ULL,
 380        /* Write data */
 381        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 382        /* Done */
 383        END_SIGN
 384};
 385
 386static const u64 xena_dtx_cfg[] = {
 387        /* Set address */
 388        0x8000051500000000ULL, 0x80000515000000E0ULL,
 389        /* Write data */
 390        0x80000515D9350004ULL, 0x80000515D93500E4ULL,
 391        /* Set address */
 392        0x8001051500000000ULL, 0x80010515000000E0ULL,
 393        /* Write data */
 394        0x80010515001E0004ULL, 0x80010515001E00E4ULL,
 395        /* Set address */
 396        0x8002051500000000ULL, 0x80020515000000E0ULL,
 397        /* Write data */
 398        0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 399        END_SIGN
 400};
 401
 402/*
 403 * Constants for Fixing the MacAddress problem seen mostly on
 404 * Alpha machines.
 405 */
 406static const u64 fix_mac[] = {
 407        0x0060000000000000ULL, 0x0060600000000000ULL,
 408        0x0040600000000000ULL, 0x0000600000000000ULL,
 409        0x0020600000000000ULL, 0x0060600000000000ULL,
 410        0x0020600000000000ULL, 0x0060600000000000ULL,
 411        0x0020600000000000ULL, 0x0060600000000000ULL,
 412        0x0020600000000000ULL, 0x0060600000000000ULL,
 413        0x0020600000000000ULL, 0x0060600000000000ULL,
 414        0x0020600000000000ULL, 0x0060600000000000ULL,
 415        0x0020600000000000ULL, 0x0060600000000000ULL,
 416        0x0020600000000000ULL, 0x0060600000000000ULL,
 417        0x0020600000000000ULL, 0x0060600000000000ULL,
 418        0x0020600000000000ULL, 0x0060600000000000ULL,
 419        0x0020600000000000ULL, 0x0000600000000000ULL,
 420        0x0040600000000000ULL, 0x0060600000000000ULL,
 421        END_SIGN
 422};
 423
 424MODULE_LICENSE("GPL");
 425MODULE_VERSION(DRV_VERSION);
 426
 427
 428/* Module Loadable parameters. */
 429S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
 430S2IO_PARM_INT(rx_ring_num, 1);
 431S2IO_PARM_INT(multiq, 0);
 432S2IO_PARM_INT(rx_ring_mode, 1);
 433S2IO_PARM_INT(use_continuous_tx_intrs, 1);
 434S2IO_PARM_INT(rmac_pause_time, 0x100);
 435S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
 436S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
 437S2IO_PARM_INT(shared_splits, 0);
 438S2IO_PARM_INT(tmac_util_period, 5);
 439S2IO_PARM_INT(rmac_util_period, 5);
 440S2IO_PARM_INT(l3l4hdr_size, 128);
 441/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
 442S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
 443/* Frequency of Rx desc syncs expressed as power of 2 */
 444S2IO_PARM_INT(rxsync_frequency, 3);
 445/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
 446S2IO_PARM_INT(intr_type, 2);
 447/* Large receive offload feature */
 448
 449/* Max pkts to be aggregated by LRO at one time. If not specified,
 450 * aggregation happens until we hit max IP pkt size(64K)
 451 */
 452S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
 453S2IO_PARM_INT(indicate_max_pkts, 0);
 454
 455S2IO_PARM_INT(napi, 1);
 456S2IO_PARM_INT(ufo, 0);
 457S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
 458
 459static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
 460{DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 461static unsigned int rx_ring_sz[MAX_RX_RINGS] =
 462{[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 463static unsigned int rts_frm_len[MAX_RX_RINGS] =
 464{[0 ...(MAX_RX_RINGS - 1)] = 0 };
 465
 466module_param_array(tx_fifo_len, uint, NULL, 0);
 467module_param_array(rx_ring_sz, uint, NULL, 0);
 468module_param_array(rts_frm_len, uint, NULL, 0);
 469
 470/*
 471 * S2IO device table.
 472 * This table lists all the devices that this driver supports.
 473 */
 474static const struct pci_device_id s2io_tbl[] = {
 475        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
 476         PCI_ANY_ID, PCI_ANY_ID},
 477        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 478         PCI_ANY_ID, PCI_ANY_ID},
 479        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
 480         PCI_ANY_ID, PCI_ANY_ID},
 481        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
 482         PCI_ANY_ID, PCI_ANY_ID},
 483        {0,}
 484};
 485
 486MODULE_DEVICE_TABLE(pci, s2io_tbl);
 487
 488static const struct pci_error_handlers s2io_err_handler = {
 489        .error_detected = s2io_io_error_detected,
 490        .slot_reset = s2io_io_slot_reset,
 491        .resume = s2io_io_resume,
 492};
 493
 494static struct pci_driver s2io_driver = {
 495        .name = "S2IO",
 496        .id_table = s2io_tbl,
 497        .probe = s2io_init_nic,
 498        .remove = s2io_rem_nic,
 499        .err_handler = &s2io_err_handler,
 500};
 501
 502/* A simplifier macro used both by init and free shared_mem Fns(). */
 503#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
 504
 505/* netqueue manipulation helper functions */
 506static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
 507{
 508        if (!sp->config.multiq) {
 509                int i;
 510
 511                for (i = 0; i < sp->config.tx_fifo_num; i++)
 512                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
 513        }
 514        netif_tx_stop_all_queues(sp->dev);
 515}
 516
 517static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
 518{
 519        if (!sp->config.multiq)
 520                sp->mac_control.fifos[fifo_no].queue_state =
 521                        FIFO_QUEUE_STOP;
 522
 523        netif_tx_stop_all_queues(sp->dev);
 524}
 525
 526static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
 527{
 528        if (!sp->config.multiq) {
 529                int i;
 530
 531                for (i = 0; i < sp->config.tx_fifo_num; i++)
 532                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 533        }
 534        netif_tx_start_all_queues(sp->dev);
 535}
 536
 537static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
 538{
 539        if (!sp->config.multiq) {
 540                int i;
 541
 542                for (i = 0; i < sp->config.tx_fifo_num; i++)
 543                        sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
 544        }
 545        netif_tx_wake_all_queues(sp->dev);
 546}
 547
 548static inline void s2io_wake_tx_queue(
 549        struct fifo_info *fifo, int cnt, u8 multiq)
 550{
 551
 552        if (multiq) {
 553                if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
 554                        netif_wake_subqueue(fifo->dev, fifo->fifo_no);
 555        } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
 556                if (netif_queue_stopped(fifo->dev)) {
 557                        fifo->queue_state = FIFO_QUEUE_START;
 558                        netif_wake_queue(fifo->dev);
 559                }
 560        }
 561}
 562
 563/**
 564 * init_shared_mem - Allocation and Initialization of Memory
 565 * @nic: Device private variable.
 566 * Description: The function allocates all the memory areas shared
 567 * between the NIC and the driver. This includes Tx descriptors,
 568 * Rx descriptors and the statistics block.
 569 */
 570
 571static int init_shared_mem(struct s2io_nic *nic)
 572{
 573        u32 size;
 574        void *tmp_v_addr, *tmp_v_addr_next;
 575        dma_addr_t tmp_p_addr, tmp_p_addr_next;
 576        struct RxD_block *pre_rxd_blk = NULL;
 577        int i, j, blk_cnt;
 578        int lst_size, lst_per_page;
 579        struct net_device *dev = nic->dev;
 580        unsigned long tmp;
 581        struct buffAdd *ba;
 582        struct config_param *config = &nic->config;
 583        struct mac_info *mac_control = &nic->mac_control;
 584        unsigned long long mem_allocated = 0;
 585
 586        /* Allocation and initialization of TXDLs in FIFOs */
 587        size = 0;
 588        for (i = 0; i < config->tx_fifo_num; i++) {
 589                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 590
 591                size += tx_cfg->fifo_len;
 592        }
 593        if (size > MAX_AVAILABLE_TXDS) {
 594                DBG_PRINT(ERR_DBG,
 595                          "Too many TxDs requested: %d, max supported: %d\n",
 596                          size, MAX_AVAILABLE_TXDS);
 597                return -EINVAL;
 598        }
 599
 600        size = 0;
 601        for (i = 0; i < config->tx_fifo_num; i++) {
 602                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 603
 604                size = tx_cfg->fifo_len;
 605                /*
 606                 * Legal values are from 2 to 8192
 607                 */
 608                if (size < 2) {
 609                        DBG_PRINT(ERR_DBG, "Fifo %d: Invalid length (%d) - "
 610                                  "Valid lengths are 2 through 8192\n",
 611                                  i, size);
 612                        return -EINVAL;
 613                }
 614        }
 615
 616        lst_size = (sizeof(struct TxD) * config->max_txds);
 617        lst_per_page = PAGE_SIZE / lst_size;
 618
 619        for (i = 0; i < config->tx_fifo_num; i++) {
 620                struct fifo_info *fifo = &mac_control->fifos[i];
 621                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 622                int fifo_len = tx_cfg->fifo_len;
 623                int list_holder_size = fifo_len * sizeof(struct list_info_hold);
 624
 625                fifo->list_info = kzalloc(list_holder_size, GFP_KERNEL);
 626                if (!fifo->list_info) {
 627                        DBG_PRINT(INFO_DBG, "Malloc failed for list_info\n");
 628                        return -ENOMEM;
 629                }
 630                mem_allocated += list_holder_size;
 631        }
 632        for (i = 0; i < config->tx_fifo_num; i++) {
 633                int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 634                                                lst_per_page);
 635                struct fifo_info *fifo = &mac_control->fifos[i];
 636                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 637
 638                fifo->tx_curr_put_info.offset = 0;
 639                fifo->tx_curr_put_info.fifo_len = tx_cfg->fifo_len - 1;
 640                fifo->tx_curr_get_info.offset = 0;
 641                fifo->tx_curr_get_info.fifo_len = tx_cfg->fifo_len - 1;
 642                fifo->fifo_no = i;
 643                fifo->nic = nic;
 644                fifo->max_txds = MAX_SKB_FRAGS + 2;
 645                fifo->dev = dev;
 646
 647                for (j = 0; j < page_num; j++) {
 648                        int k = 0;
 649                        dma_addr_t tmp_p;
 650                        void *tmp_v;
 651                        tmp_v = pci_alloc_consistent(nic->pdev,
 652                                                     PAGE_SIZE, &tmp_p);
 653                        if (!tmp_v) {
 654                                DBG_PRINT(INFO_DBG,
 655                                          "pci_alloc_consistent failed for TxDL\n");
 656                                return -ENOMEM;
 657                        }
 658                        /* If we got a zero DMA address(can happen on
 659                         * certain platforms like PPC), reallocate.
 660                         * Store virtual address of page we don't want,
 661                         * to be freed later.
 662                         */
 663                        if (!tmp_p) {
 664                                mac_control->zerodma_virt_addr = tmp_v;
 665                                DBG_PRINT(INIT_DBG,
 666                                          "%s: Zero DMA address for TxDL. "
 667                                          "Virtual address %p\n",
 668                                          dev->name, tmp_v);
 669                                tmp_v = pci_alloc_consistent(nic->pdev,
 670                                                             PAGE_SIZE, &tmp_p);
 671                                if (!tmp_v) {
 672                                        DBG_PRINT(INFO_DBG,
 673                                                  "pci_alloc_consistent failed for TxDL\n");
 674                                        return -ENOMEM;
 675                                }
 676                                mem_allocated += PAGE_SIZE;
 677                        }
 678                        while (k < lst_per_page) {
 679                                int l = (j * lst_per_page) + k;
 680                                if (l == tx_cfg->fifo_len)
 681                                        break;
 682                                fifo->list_info[l].list_virt_addr =
 683                                        tmp_v + (k * lst_size);
 684                                fifo->list_info[l].list_phy_addr =
 685                                        tmp_p + (k * lst_size);
 686                                k++;
 687                        }
 688                }
 689        }
 690
 691        for (i = 0; i < config->tx_fifo_num; i++) {
 692                struct fifo_info *fifo = &mac_control->fifos[i];
 693                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 694
 695                size = tx_cfg->fifo_len;
 696                fifo->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
 697                if (!fifo->ufo_in_band_v)
 698                        return -ENOMEM;
 699                mem_allocated += (size * sizeof(u64));
 700        }
 701
 702        /* Allocation and initialization of RXDs in Rings */
 703        size = 0;
 704        for (i = 0; i < config->rx_ring_num; i++) {
 705                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 706                struct ring_info *ring = &mac_control->rings[i];
 707
 708                if (rx_cfg->num_rxd % (rxd_count[nic->rxd_mode] + 1)) {
 709                        DBG_PRINT(ERR_DBG, "%s: Ring%d RxD count is not a "
 710                                  "multiple of RxDs per Block\n",
 711                                  dev->name, i);
 712                        return FAILURE;
 713                }
 714                size += rx_cfg->num_rxd;
 715                ring->block_count = rx_cfg->num_rxd /
 716                        (rxd_count[nic->rxd_mode] + 1);
 717                ring->pkt_cnt = rx_cfg->num_rxd - ring->block_count;
 718        }
 719        if (nic->rxd_mode == RXD_MODE_1)
 720                size = (size * (sizeof(struct RxD1)));
 721        else
 722                size = (size * (sizeof(struct RxD3)));
 723
 724        for (i = 0; i < config->rx_ring_num; i++) {
 725                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 726                struct ring_info *ring = &mac_control->rings[i];
 727
 728                ring->rx_curr_get_info.block_index = 0;
 729                ring->rx_curr_get_info.offset = 0;
 730                ring->rx_curr_get_info.ring_len = rx_cfg->num_rxd - 1;
 731                ring->rx_curr_put_info.block_index = 0;
 732                ring->rx_curr_put_info.offset = 0;
 733                ring->rx_curr_put_info.ring_len = rx_cfg->num_rxd - 1;
 734                ring->nic = nic;
 735                ring->ring_no = i;
 736
 737                blk_cnt = rx_cfg->num_rxd / (rxd_count[nic->rxd_mode] + 1);
 738                /*  Allocating all the Rx blocks */
 739                for (j = 0; j < blk_cnt; j++) {
 740                        struct rx_block_info *rx_blocks;
 741                        int l;
 742
 743                        rx_blocks = &ring->rx_blocks[j];
 744                        size = SIZE_OF_BLOCK;   /* size is always page size */
 745                        tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
 746                                                          &tmp_p_addr);
 747                        if (tmp_v_addr == NULL) {
 748                                /*
 749                                 * In case of failure, free_shared_mem()
 750                                 * is called, which should free any
 751                                 * memory that was alloced till the
 752                                 * failure happened.
 753                                 */
 754                                rx_blocks->block_virt_addr = tmp_v_addr;
 755                                return -ENOMEM;
 756                        }
 757                        mem_allocated += size;
 758                        memset(tmp_v_addr, 0, size);
 759
 760                        size = sizeof(struct rxd_info) *
 761                                rxd_count[nic->rxd_mode];
 762                        rx_blocks->block_virt_addr = tmp_v_addr;
 763                        rx_blocks->block_dma_addr = tmp_p_addr;
 764                        rx_blocks->rxds = kmalloc(size,  GFP_KERNEL);
 765                        if (!rx_blocks->rxds)
 766                                return -ENOMEM;
 767                        mem_allocated += size;
 768                        for (l = 0; l < rxd_count[nic->rxd_mode]; l++) {
 769                                rx_blocks->rxds[l].virt_addr =
 770                                        rx_blocks->block_virt_addr +
 771                                        (rxd_size[nic->rxd_mode] * l);
 772                                rx_blocks->rxds[l].dma_addr =
 773                                        rx_blocks->block_dma_addr +
 774                                        (rxd_size[nic->rxd_mode] * l);
 775                        }
 776                }
 777                /* Interlinking all Rx Blocks */
 778                for (j = 0; j < blk_cnt; j++) {
 779                        int next = (j + 1) % blk_cnt;
 780                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 781                        tmp_v_addr_next = ring->rx_blocks[next].block_virt_addr;
 782                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 783                        tmp_p_addr_next = ring->rx_blocks[next].block_dma_addr;
 784
 785                        pre_rxd_blk = tmp_v_addr;
 786                        pre_rxd_blk->reserved_2_pNext_RxD_block =
 787                                (unsigned long)tmp_v_addr_next;
 788                        pre_rxd_blk->pNext_RxD_Blk_physical =
 789                                (u64)tmp_p_addr_next;
 790                }
 791        }
 792        if (nic->rxd_mode == RXD_MODE_3B) {
 793                /*
 794                 * Allocation of Storages for buffer addresses in 2BUFF mode
 795                 * and the buffers as well.
 796                 */
 797                for (i = 0; i < config->rx_ring_num; i++) {
 798                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 799                        struct ring_info *ring = &mac_control->rings[i];
 800
 801                        blk_cnt = rx_cfg->num_rxd /
 802                                (rxd_count[nic->rxd_mode] + 1);
 803                        size = sizeof(struct buffAdd *) * blk_cnt;
 804                        ring->ba = kmalloc(size, GFP_KERNEL);
 805                        if (!ring->ba)
 806                                return -ENOMEM;
 807                        mem_allocated += size;
 808                        for (j = 0; j < blk_cnt; j++) {
 809                                int k = 0;
 810
 811                                size = sizeof(struct buffAdd) *
 812                                        (rxd_count[nic->rxd_mode] + 1);
 813                                ring->ba[j] = kmalloc(size, GFP_KERNEL);
 814                                if (!ring->ba[j])
 815                                        return -ENOMEM;
 816                                mem_allocated += size;
 817                                while (k != rxd_count[nic->rxd_mode]) {
 818                                        ba = &ring->ba[j][k];
 819                                        size = BUF0_LEN + ALIGN_SIZE;
 820                                        ba->ba_0_org = kmalloc(size, GFP_KERNEL);
 821                                        if (!ba->ba_0_org)
 822                                                return -ENOMEM;
 823                                        mem_allocated += size;
 824                                        tmp = (unsigned long)ba->ba_0_org;
 825                                        tmp += ALIGN_SIZE;
 826                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 827                                        ba->ba_0 = (void *)tmp;
 828
 829                                        size = BUF1_LEN + ALIGN_SIZE;
 830                                        ba->ba_1_org = kmalloc(size, GFP_KERNEL);
 831                                        if (!ba->ba_1_org)
 832                                                return -ENOMEM;
 833                                        mem_allocated += size;
 834                                        tmp = (unsigned long)ba->ba_1_org;
 835                                        tmp += ALIGN_SIZE;
 836                                        tmp &= ~((unsigned long)ALIGN_SIZE);
 837                                        ba->ba_1 = (void *)tmp;
 838                                        k++;
 839                                }
 840                        }
 841                }
 842        }
 843
 844        /* Allocation and initialization of Statistics block */
 845        size = sizeof(struct stat_block);
 846        mac_control->stats_mem =
 847                pci_alloc_consistent(nic->pdev, size,
 848                                     &mac_control->stats_mem_phy);
 849
 850        if (!mac_control->stats_mem) {
 851                /*
 852                 * In case of failure, free_shared_mem() is called, which
 853                 * should free any memory that was alloced till the
 854                 * failure happened.
 855                 */
 856                return -ENOMEM;
 857        }
 858        mem_allocated += size;
 859        mac_control->stats_mem_sz = size;
 860
 861        tmp_v_addr = mac_control->stats_mem;
 862        mac_control->stats_info = tmp_v_addr;
 863        memset(tmp_v_addr, 0, size);
 864        DBG_PRINT(INIT_DBG, "%s: Ring Mem PHY: 0x%llx\n",
 865                dev_name(&nic->pdev->dev), (unsigned long long)tmp_p_addr);
 866        mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
 867        return SUCCESS;
 868}
 869
 870/**
 871 * free_shared_mem - Free the allocated Memory
 872 * @nic:  Device private variable.
 873 * Description: This function is to free all memory locations allocated by
 874 * the init_shared_mem() function and return it to the kernel.
 875 */
 876
 877static void free_shared_mem(struct s2io_nic *nic)
 878{
 879        int i, j, blk_cnt, size;
 880        void *tmp_v_addr;
 881        dma_addr_t tmp_p_addr;
 882        int lst_size, lst_per_page;
 883        struct net_device *dev;
 884        int page_num = 0;
 885        struct config_param *config;
 886        struct mac_info *mac_control;
 887        struct stat_block *stats;
 888        struct swStat *swstats;
 889
 890        if (!nic)
 891                return;
 892
 893        dev = nic->dev;
 894
 895        config = &nic->config;
 896        mac_control = &nic->mac_control;
 897        stats = mac_control->stats_info;
 898        swstats = &stats->sw_stat;
 899
 900        lst_size = sizeof(struct TxD) * config->max_txds;
 901        lst_per_page = PAGE_SIZE / lst_size;
 902
 903        for (i = 0; i < config->tx_fifo_num; i++) {
 904                struct fifo_info *fifo = &mac_control->fifos[i];
 905                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 906
 907                page_num = TXD_MEM_PAGE_CNT(tx_cfg->fifo_len, lst_per_page);
 908                for (j = 0; j < page_num; j++) {
 909                        int mem_blks = (j * lst_per_page);
 910                        struct list_info_hold *fli;
 911
 912                        if (!fifo->list_info)
 913                                return;
 914
 915                        fli = &fifo->list_info[mem_blks];
 916                        if (!fli->list_virt_addr)
 917                                break;
 918                        pci_free_consistent(nic->pdev, PAGE_SIZE,
 919                                            fli->list_virt_addr,
 920                                            fli->list_phy_addr);
 921                        swstats->mem_freed += PAGE_SIZE;
 922                }
 923                /* If we got a zero DMA address during allocation,
 924                 * free the page now
 925                 */
 926                if (mac_control->zerodma_virt_addr) {
 927                        pci_free_consistent(nic->pdev, PAGE_SIZE,
 928                                            mac_control->zerodma_virt_addr,
 929                                            (dma_addr_t)0);
 930                        DBG_PRINT(INIT_DBG,
 931                                  "%s: Freeing TxDL with zero DMA address. "
 932                                  "Virtual address %p\n",
 933                                  dev->name, mac_control->zerodma_virt_addr);
 934                        swstats->mem_freed += PAGE_SIZE;
 935                }
 936                kfree(fifo->list_info);
 937                swstats->mem_freed += tx_cfg->fifo_len *
 938                        sizeof(struct list_info_hold);
 939        }
 940
 941        size = SIZE_OF_BLOCK;
 942        for (i = 0; i < config->rx_ring_num; i++) {
 943                struct ring_info *ring = &mac_control->rings[i];
 944
 945                blk_cnt = ring->block_count;
 946                for (j = 0; j < blk_cnt; j++) {
 947                        tmp_v_addr = ring->rx_blocks[j].block_virt_addr;
 948                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
 949                        if (tmp_v_addr == NULL)
 950                                break;
 951                        pci_free_consistent(nic->pdev, size,
 952                                            tmp_v_addr, tmp_p_addr);
 953                        swstats->mem_freed += size;
 954                        kfree(ring->rx_blocks[j].rxds);
 955                        swstats->mem_freed += sizeof(struct rxd_info) *
 956                                rxd_count[nic->rxd_mode];
 957                }
 958        }
 959
 960        if (nic->rxd_mode == RXD_MODE_3B) {
 961                /* Freeing buffer storage addresses in 2BUFF mode. */
 962                for (i = 0; i < config->rx_ring_num; i++) {
 963                        struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
 964                        struct ring_info *ring = &mac_control->rings[i];
 965
 966                        blk_cnt = rx_cfg->num_rxd /
 967                                (rxd_count[nic->rxd_mode] + 1);
 968                        for (j = 0; j < blk_cnt; j++) {
 969                                int k = 0;
 970                                if (!ring->ba[j])
 971                                        continue;
 972                                while (k != rxd_count[nic->rxd_mode]) {
 973                                        struct buffAdd *ba = &ring->ba[j][k];
 974                                        kfree(ba->ba_0_org);
 975                                        swstats->mem_freed +=
 976                                                BUF0_LEN + ALIGN_SIZE;
 977                                        kfree(ba->ba_1_org);
 978                                        swstats->mem_freed +=
 979                                                BUF1_LEN + ALIGN_SIZE;
 980                                        k++;
 981                                }
 982                                kfree(ring->ba[j]);
 983                                swstats->mem_freed += sizeof(struct buffAdd) *
 984                                        (rxd_count[nic->rxd_mode] + 1);
 985                        }
 986                        kfree(ring->ba);
 987                        swstats->mem_freed += sizeof(struct buffAdd *) *
 988                                blk_cnt;
 989                }
 990        }
 991
 992        for (i = 0; i < nic->config.tx_fifo_num; i++) {
 993                struct fifo_info *fifo = &mac_control->fifos[i];
 994                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
 995
 996                if (fifo->ufo_in_band_v) {
 997                        swstats->mem_freed += tx_cfg->fifo_len *
 998                                sizeof(u64);
 999                        kfree(fifo->ufo_in_band_v);
1000                }
1001        }
1002
1003        if (mac_control->stats_mem) {
1004                swstats->mem_freed += mac_control->stats_mem_sz;
1005                pci_free_consistent(nic->pdev,
1006                                    mac_control->stats_mem_sz,
1007                                    mac_control->stats_mem,
1008                                    mac_control->stats_mem_phy);
1009        }
1010}
1011
1012/**
1013 * s2io_verify_pci_mode -
1014 */
1015
1016static int s2io_verify_pci_mode(struct s2io_nic *nic)
1017{
1018        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1019        register u64 val64 = 0;
1020        int     mode;
1021
1022        val64 = readq(&bar0->pci_mode);
1023        mode = (u8)GET_PCI_MODE(val64);
1024
1025        if (val64 & PCI_MODE_UNKNOWN_MODE)
1026                return -1;      /* Unknown PCI mode */
1027        return mode;
1028}
1029
1030#define NEC_VENID   0x1033
1031#define NEC_DEVID   0x0125
1032static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1033{
1034        struct pci_dev *tdev = NULL;
1035        for_each_pci_dev(tdev) {
1036                if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1037                        if (tdev->bus == s2io_pdev->bus->parent) {
1038                                pci_dev_put(tdev);
1039                                return 1;
1040                        }
1041                }
1042        }
1043        return 0;
1044}
1045
1046static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1047/**
1048 * s2io_print_pci_mode -
1049 */
1050static int s2io_print_pci_mode(struct s2io_nic *nic)
1051{
1052        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1053        register u64 val64 = 0;
1054        int     mode;
1055        struct config_param *config = &nic->config;
1056        const char *pcimode;
1057
1058        val64 = readq(&bar0->pci_mode);
1059        mode = (u8)GET_PCI_MODE(val64);
1060
1061        if (val64 & PCI_MODE_UNKNOWN_MODE)
1062                return -1;      /* Unknown PCI mode */
1063
1064        config->bus_speed = bus_speed[mode];
1065
1066        if (s2io_on_nec_bridge(nic->pdev)) {
1067                DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1068                          nic->dev->name);
1069                return mode;
1070        }
1071
1072        switch (mode) {
1073        case PCI_MODE_PCI_33:
1074                pcimode = "33MHz PCI bus";
1075                break;
1076        case PCI_MODE_PCI_66:
1077                pcimode = "66MHz PCI bus";
1078                break;
1079        case PCI_MODE_PCIX_M1_66:
1080                pcimode = "66MHz PCIX(M1) bus";
1081                break;
1082        case PCI_MODE_PCIX_M1_100:
1083                pcimode = "100MHz PCIX(M1) bus";
1084                break;
1085        case PCI_MODE_PCIX_M1_133:
1086                pcimode = "133MHz PCIX(M1) bus";
1087                break;
1088        case PCI_MODE_PCIX_M2_66:
1089                pcimode = "133MHz PCIX(M2) bus";
1090                break;
1091        case PCI_MODE_PCIX_M2_100:
1092                pcimode = "200MHz PCIX(M2) bus";
1093                break;
1094        case PCI_MODE_PCIX_M2_133:
1095                pcimode = "266MHz PCIX(M2) bus";
1096                break;
1097        default:
1098                pcimode = "unsupported bus!";
1099                mode = -1;
1100        }
1101
1102        DBG_PRINT(ERR_DBG, "%s: Device is on %d bit %s\n",
1103                  nic->dev->name, val64 & PCI_MODE_32_BITS ? 32 : 64, pcimode);
1104
1105        return mode;
1106}
1107
1108/**
1109 *  init_tti - Initialization transmit traffic interrupt scheme
1110 *  @nic: device private variable
1111 *  @link: link status (UP/DOWN) used to enable/disable continuous
1112 *  transmit interrupts
1113 *  Description: The function configures transmit traffic interrupts
1114 *  Return Value:  SUCCESS on success and
1115 *  '-1' on failure
1116 */
1117
1118static int init_tti(struct s2io_nic *nic, int link)
1119{
1120        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1121        register u64 val64 = 0;
1122        int i;
1123        struct config_param *config = &nic->config;
1124
1125        for (i = 0; i < config->tx_fifo_num; i++) {
1126                /*
1127                 * TTI Initialization. Default Tx timer gets us about
1128                 * 250 interrupts per sec. Continuous interrupts are enabled
1129                 * by default.
1130                 */
1131                if (nic->device_type == XFRAME_II_DEVICE) {
1132                        int count = (nic->config.bus_speed * 125)/2;
1133                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1134                } else
1135                        val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1136
1137                val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1138                        TTI_DATA1_MEM_TX_URNG_B(0x10) |
1139                        TTI_DATA1_MEM_TX_URNG_C(0x30) |
1140                        TTI_DATA1_MEM_TX_TIMER_AC_EN;
1141                if (i == 0)
1142                        if (use_continuous_tx_intrs && (link == LINK_UP))
1143                                val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1144                writeq(val64, &bar0->tti_data1_mem);
1145
1146                if (nic->config.intr_type == MSI_X) {
1147                        val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1148                                TTI_DATA2_MEM_TX_UFC_B(0x100) |
1149                                TTI_DATA2_MEM_TX_UFC_C(0x200) |
1150                                TTI_DATA2_MEM_TX_UFC_D(0x300);
1151                } else {
1152                        if ((nic->config.tx_steering_type ==
1153                             TX_DEFAULT_STEERING) &&
1154                            (config->tx_fifo_num > 1) &&
1155                            (i >= nic->udp_fifo_idx) &&
1156                            (i < (nic->udp_fifo_idx +
1157                                  nic->total_udp_fifos)))
1158                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1159                                        TTI_DATA2_MEM_TX_UFC_B(0x80) |
1160                                        TTI_DATA2_MEM_TX_UFC_C(0x100) |
1161                                        TTI_DATA2_MEM_TX_UFC_D(0x120);
1162                        else
1163                                val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1164                                        TTI_DATA2_MEM_TX_UFC_B(0x20) |
1165                                        TTI_DATA2_MEM_TX_UFC_C(0x40) |
1166                                        TTI_DATA2_MEM_TX_UFC_D(0x80);
1167                }
1168
1169                writeq(val64, &bar0->tti_data2_mem);
1170
1171                val64 = TTI_CMD_MEM_WE |
1172                        TTI_CMD_MEM_STROBE_NEW_CMD |
1173                        TTI_CMD_MEM_OFFSET(i);
1174                writeq(val64, &bar0->tti_command_mem);
1175
1176                if (wait_for_cmd_complete(&bar0->tti_command_mem,
1177                                          TTI_CMD_MEM_STROBE_NEW_CMD,
1178                                          S2IO_BIT_RESET) != SUCCESS)
1179                        return FAILURE;
1180        }
1181
1182        return SUCCESS;
1183}
1184
1185/**
1186 *  init_nic - Initialization of hardware
1187 *  @nic: device private variable
1188 *  Description: The function sequentially configures every block
1189 *  of the H/W from their reset values.
1190 *  Return Value:  SUCCESS on success and
1191 *  '-1' on failure (endian settings incorrect).
1192 */
1193
1194static int init_nic(struct s2io_nic *nic)
1195{
1196        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1197        struct net_device *dev = nic->dev;
1198        register u64 val64 = 0;
1199        void __iomem *add;
1200        u32 time;
1201        int i, j;
1202        int dtx_cnt = 0;
1203        unsigned long long mem_share;
1204        int mem_size;
1205        struct config_param *config = &nic->config;
1206        struct mac_info *mac_control = &nic->mac_control;
1207
1208        /* to set the swapper controle on the card */
1209        if (s2io_set_swapper(nic)) {
1210                DBG_PRINT(ERR_DBG, "ERROR: Setting Swapper failed\n");
1211                return -EIO;
1212        }
1213
1214        /*
1215         * Herc requires EOI to be removed from reset before XGXS, so..
1216         */
1217        if (nic->device_type & XFRAME_II_DEVICE) {
1218                val64 = 0xA500000000ULL;
1219                writeq(val64, &bar0->sw_reset);
1220                msleep(500);
1221                val64 = readq(&bar0->sw_reset);
1222        }
1223
1224        /* Remove XGXS from reset state */
1225        val64 = 0;
1226        writeq(val64, &bar0->sw_reset);
1227        msleep(500);
1228        val64 = readq(&bar0->sw_reset);
1229
1230        /* Ensure that it's safe to access registers by checking
1231         * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1232         */
1233        if (nic->device_type == XFRAME_II_DEVICE) {
1234                for (i = 0; i < 50; i++) {
1235                        val64 = readq(&bar0->adapter_status);
1236                        if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1237                                break;
1238                        msleep(10);
1239                }
1240                if (i == 50)
1241                        return -ENODEV;
1242        }
1243
1244        /*  Enable Receiving broadcasts */
1245        add = &bar0->mac_cfg;
1246        val64 = readq(&bar0->mac_cfg);
1247        val64 |= MAC_RMAC_BCAST_ENABLE;
1248        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1249        writel((u32)val64, add);
1250        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1251        writel((u32) (val64 >> 32), (add + 4));
1252
1253        /* Read registers in all blocks */
1254        val64 = readq(&bar0->mac_int_mask);
1255        val64 = readq(&bar0->mc_int_mask);
1256        val64 = readq(&bar0->xgxs_int_mask);
1257
1258        /*  Set MTU */
1259        val64 = dev->mtu;
1260        writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1261
1262        if (nic->device_type & XFRAME_II_DEVICE) {
1263                while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1264                        SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1265                                          &bar0->dtx_control, UF);
1266                        if (dtx_cnt & 0x1)
1267                                msleep(1); /* Necessary!! */
1268                        dtx_cnt++;
1269                }
1270        } else {
1271                while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1272                        SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1273                                          &bar0->dtx_control, UF);
1274                        val64 = readq(&bar0->dtx_control);
1275                        dtx_cnt++;
1276                }
1277        }
1278
1279        /*  Tx DMA Initialization */
1280        val64 = 0;
1281        writeq(val64, &bar0->tx_fifo_partition_0);
1282        writeq(val64, &bar0->tx_fifo_partition_1);
1283        writeq(val64, &bar0->tx_fifo_partition_2);
1284        writeq(val64, &bar0->tx_fifo_partition_3);
1285
1286        for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1287                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
1288
1289                val64 |= vBIT(tx_cfg->fifo_len - 1, ((j * 32) + 19), 13) |
1290                        vBIT(tx_cfg->fifo_priority, ((j * 32) + 5), 3);
1291
1292                if (i == (config->tx_fifo_num - 1)) {
1293                        if (i % 2 == 0)
1294                                i++;
1295                }
1296
1297                switch (i) {
1298                case 1:
1299                        writeq(val64, &bar0->tx_fifo_partition_0);
1300                        val64 = 0;
1301                        j = 0;
1302                        break;
1303                case 3:
1304                        writeq(val64, &bar0->tx_fifo_partition_1);
1305                        val64 = 0;
1306                        j = 0;
1307                        break;
1308                case 5:
1309                        writeq(val64, &bar0->tx_fifo_partition_2);
1310                        val64 = 0;
1311                        j = 0;
1312                        break;
1313                case 7:
1314                        writeq(val64, &bar0->tx_fifo_partition_3);
1315                        val64 = 0;
1316                        j = 0;
1317                        break;
1318                default:
1319                        j++;
1320                        break;
1321                }
1322        }
1323
1324        /*
1325         * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1326         * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1327         */
1328        if ((nic->device_type == XFRAME_I_DEVICE) && (nic->pdev->revision < 4))
1329                writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1330
1331        val64 = readq(&bar0->tx_fifo_partition_0);
1332        DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1333                  &bar0->tx_fifo_partition_0, (unsigned long long)val64);
1334
1335        /*
1336         * Initialization of Tx_PA_CONFIG register to ignore packet
1337         * integrity checking.
1338         */
1339        val64 = readq(&bar0->tx_pa_cfg);
1340        val64 |= TX_PA_CFG_IGNORE_FRM_ERR |
1341                TX_PA_CFG_IGNORE_SNAP_OUI |
1342                TX_PA_CFG_IGNORE_LLC_CTRL |
1343                TX_PA_CFG_IGNORE_L2_ERR;
1344        writeq(val64, &bar0->tx_pa_cfg);
1345
1346        /* Rx DMA intialization. */
1347        val64 = 0;
1348        for (i = 0; i < config->rx_ring_num; i++) {
1349                struct rx_ring_config *rx_cfg = &config->rx_cfg[i];
1350
1351                val64 |= vBIT(rx_cfg->ring_priority, (5 + (i * 8)), 3);
1352        }
1353        writeq(val64, &bar0->rx_queue_priority);
1354
1355        /*
1356         * Allocating equal share of memory to all the
1357         * configured Rings.
1358         */
1359        val64 = 0;
1360        if (nic->device_type & XFRAME_II_DEVICE)
1361                mem_size = 32;
1362        else
1363                mem_size = 64;
1364
1365        for (i = 0; i < config->rx_ring_num; i++) {
1366                switch (i) {
1367                case 0:
1368                        mem_share = (mem_size / config->rx_ring_num +
1369                                     mem_size % config->rx_ring_num);
1370                        val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1371                        continue;
1372                case 1:
1373                        mem_share = (mem_size / config->rx_ring_num);
1374                        val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1375                        continue;
1376                case 2:
1377                        mem_share = (mem_size / config->rx_ring_num);
1378                        val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1379                        continue;
1380                case 3:
1381                        mem_share = (mem_size / config->rx_ring_num);
1382                        val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1383                        continue;
1384                case 4:
1385                        mem_share = (mem_size / config->rx_ring_num);
1386                        val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1387                        continue;
1388                case 5:
1389                        mem_share = (mem_size / config->rx_ring_num);
1390                        val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1391                        continue;
1392                case 6:
1393                        mem_share = (mem_size / config->rx_ring_num);
1394                        val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1395                        continue;
1396                case 7:
1397                        mem_share = (mem_size / config->rx_ring_num);
1398                        val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1399                        continue;
1400                }
1401        }
1402        writeq(val64, &bar0->rx_queue_cfg);
1403
1404        /*
1405         * Filling Tx round robin registers
1406         * as per the number of FIFOs for equal scheduling priority
1407         */
1408        switch (config->tx_fifo_num) {
1409        case 1:
1410                val64 = 0x0;
1411                writeq(val64, &bar0->tx_w_round_robin_0);
1412                writeq(val64, &bar0->tx_w_round_robin_1);
1413                writeq(val64, &bar0->tx_w_round_robin_2);
1414                writeq(val64, &bar0->tx_w_round_robin_3);
1415                writeq(val64, &bar0->tx_w_round_robin_4);
1416                break;
1417        case 2:
1418                val64 = 0x0001000100010001ULL;
1419                writeq(val64, &bar0->tx_w_round_robin_0);
1420                writeq(val64, &bar0->tx_w_round_robin_1);
1421                writeq(val64, &bar0->tx_w_round_robin_2);
1422                writeq(val64, &bar0->tx_w_round_robin_3);
1423                val64 = 0x0001000100000000ULL;
1424                writeq(val64, &bar0->tx_w_round_robin_4);
1425                break;
1426        case 3:
1427                val64 = 0x0001020001020001ULL;
1428                writeq(val64, &bar0->tx_w_round_robin_0);
1429                val64 = 0x0200010200010200ULL;
1430                writeq(val64, &bar0->tx_w_round_robin_1);
1431                val64 = 0x0102000102000102ULL;
1432                writeq(val64, &bar0->tx_w_round_robin_2);
1433                val64 = 0x0001020001020001ULL;
1434                writeq(val64, &bar0->tx_w_round_robin_3);
1435                val64 = 0x0200010200000000ULL;
1436                writeq(val64, &bar0->tx_w_round_robin_4);
1437                break;
1438        case 4:
1439                val64 = 0x0001020300010203ULL;
1440                writeq(val64, &bar0->tx_w_round_robin_0);
1441                writeq(val64, &bar0->tx_w_round_robin_1);
1442                writeq(val64, &bar0->tx_w_round_robin_2);
1443                writeq(val64, &bar0->tx_w_round_robin_3);
1444                val64 = 0x0001020300000000ULL;
1445                writeq(val64, &bar0->tx_w_round_robin_4);
1446                break;
1447        case 5:
1448                val64 = 0x0001020304000102ULL;
1449                writeq(val64, &bar0->tx_w_round_robin_0);
1450                val64 = 0x0304000102030400ULL;
1451                writeq(val64, &bar0->tx_w_round_robin_1);
1452                val64 = 0x0102030400010203ULL;
1453                writeq(val64, &bar0->tx_w_round_robin_2);
1454                val64 = 0x0400010203040001ULL;
1455                writeq(val64, &bar0->tx_w_round_robin_3);
1456                val64 = 0x0203040000000000ULL;
1457                writeq(val64, &bar0->tx_w_round_robin_4);
1458                break;
1459        case 6:
1460                val64 = 0x0001020304050001ULL;
1461                writeq(val64, &bar0->tx_w_round_robin_0);
1462                val64 = 0x0203040500010203ULL;
1463                writeq(val64, &bar0->tx_w_round_robin_1);
1464                val64 = 0x0405000102030405ULL;
1465                writeq(val64, &bar0->tx_w_round_robin_2);
1466                val64 = 0x0001020304050001ULL;
1467                writeq(val64, &bar0->tx_w_round_robin_3);
1468                val64 = 0x0203040500000000ULL;
1469                writeq(val64, &bar0->tx_w_round_robin_4);
1470                break;
1471        case 7:
1472                val64 = 0x0001020304050600ULL;
1473                writeq(val64, &bar0->tx_w_round_robin_0);
1474                val64 = 0x0102030405060001ULL;
1475                writeq(val64, &bar0->tx_w_round_robin_1);
1476                val64 = 0x0203040506000102ULL;
1477                writeq(val64, &bar0->tx_w_round_robin_2);
1478                val64 = 0x0304050600010203ULL;
1479                writeq(val64, &bar0->tx_w_round_robin_3);
1480                val64 = 0x0405060000000000ULL;
1481                writeq(val64, &bar0->tx_w_round_robin_4);
1482                break;
1483        case 8:
1484                val64 = 0x0001020304050607ULL;
1485                writeq(val64, &bar0->tx_w_round_robin_0);
1486                writeq(val64, &bar0->tx_w_round_robin_1);
1487                writeq(val64, &bar0->tx_w_round_robin_2);
1488                writeq(val64, &bar0->tx_w_round_robin_3);
1489                val64 = 0x0001020300000000ULL;
1490                writeq(val64, &bar0->tx_w_round_robin_4);
1491                break;
1492        }
1493
1494        /* Enable all configured Tx FIFO partitions */
1495        val64 = readq(&bar0->tx_fifo_partition_0);
1496        val64 |= (TX_FIFO_PARTITION_EN);
1497        writeq(val64, &bar0->tx_fifo_partition_0);
1498
1499        /* Filling the Rx round robin registers as per the
1500         * number of Rings and steering based on QoS with
1501         * equal priority.
1502         */
1503        switch (config->rx_ring_num) {
1504        case 1:
1505                val64 = 0x0;
1506                writeq(val64, &bar0->rx_w_round_robin_0);
1507                writeq(val64, &bar0->rx_w_round_robin_1);
1508                writeq(val64, &bar0->rx_w_round_robin_2);
1509                writeq(val64, &bar0->rx_w_round_robin_3);
1510                writeq(val64, &bar0->rx_w_round_robin_4);
1511
1512                val64 = 0x8080808080808080ULL;
1513                writeq(val64, &bar0->rts_qos_steering);
1514                break;
1515        case 2:
1516                val64 = 0x0001000100010001ULL;
1517                writeq(val64, &bar0->rx_w_round_robin_0);
1518                writeq(val64, &bar0->rx_w_round_robin_1);
1519                writeq(val64, &bar0->rx_w_round_robin_2);
1520                writeq(val64, &bar0->rx_w_round_robin_3);
1521                val64 = 0x0001000100000000ULL;
1522                writeq(val64, &bar0->rx_w_round_robin_4);
1523
1524                val64 = 0x8080808040404040ULL;
1525                writeq(val64, &bar0->rts_qos_steering);
1526                break;
1527        case 3:
1528                val64 = 0x0001020001020001ULL;
1529                writeq(val64, &bar0->rx_w_round_robin_0);
1530                val64 = 0x0200010200010200ULL;
1531                writeq(val64, &bar0->rx_w_round_robin_1);
1532                val64 = 0x0102000102000102ULL;
1533                writeq(val64, &bar0->rx_w_round_robin_2);
1534                val64 = 0x0001020001020001ULL;
1535                writeq(val64, &bar0->rx_w_round_robin_3);
1536                val64 = 0x0200010200000000ULL;
1537                writeq(val64, &bar0->rx_w_round_robin_4);
1538
1539                val64 = 0x8080804040402020ULL;
1540                writeq(val64, &bar0->rts_qos_steering);
1541                break;
1542        case 4:
1543                val64 = 0x0001020300010203ULL;
1544                writeq(val64, &bar0->rx_w_round_robin_0);
1545                writeq(val64, &bar0->rx_w_round_robin_1);
1546                writeq(val64, &bar0->rx_w_round_robin_2);
1547                writeq(val64, &bar0->rx_w_round_robin_3);
1548                val64 = 0x0001020300000000ULL;
1549                writeq(val64, &bar0->rx_w_round_robin_4);
1550
1551                val64 = 0x8080404020201010ULL;
1552                writeq(val64, &bar0->rts_qos_steering);
1553                break;
1554        case 5:
1555                val64 = 0x0001020304000102ULL;
1556                writeq(val64, &bar0->rx_w_round_robin_0);
1557                val64 = 0x0304000102030400ULL;
1558                writeq(val64, &bar0->rx_w_round_robin_1);
1559                val64 = 0x0102030400010203ULL;
1560                writeq(val64, &bar0->rx_w_round_robin_2);
1561                val64 = 0x0400010203040001ULL;
1562                writeq(val64, &bar0->rx_w_round_robin_3);
1563                val64 = 0x0203040000000000ULL;
1564                writeq(val64, &bar0->rx_w_round_robin_4);
1565
1566                val64 = 0x8080404020201008ULL;
1567                writeq(val64, &bar0->rts_qos_steering);
1568                break;
1569        case 6:
1570                val64 = 0x0001020304050001ULL;
1571                writeq(val64, &bar0->rx_w_round_robin_0);
1572                val64 = 0x0203040500010203ULL;
1573                writeq(val64, &bar0->rx_w_round_robin_1);
1574                val64 = 0x0405000102030405ULL;
1575                writeq(val64, &bar0->rx_w_round_robin_2);
1576                val64 = 0x0001020304050001ULL;
1577                writeq(val64, &bar0->rx_w_round_robin_3);
1578                val64 = 0x0203040500000000ULL;
1579                writeq(val64, &bar0->rx_w_round_robin_4);
1580
1581                val64 = 0x8080404020100804ULL;
1582                writeq(val64, &bar0->rts_qos_steering);
1583                break;
1584        case 7:
1585                val64 = 0x0001020304050600ULL;
1586                writeq(val64, &bar0->rx_w_round_robin_0);
1587                val64 = 0x0102030405060001ULL;
1588                writeq(val64, &bar0->rx_w_round_robin_1);
1589                val64 = 0x0203040506000102ULL;
1590                writeq(val64, &bar0->rx_w_round_robin_2);
1591                val64 = 0x0304050600010203ULL;
1592                writeq(val64, &bar0->rx_w_round_robin_3);
1593                val64 = 0x0405060000000000ULL;
1594                writeq(val64, &bar0->rx_w_round_robin_4);
1595
1596                val64 = 0x8080402010080402ULL;
1597                writeq(val64, &bar0->rts_qos_steering);
1598                break;
1599        case 8:
1600                val64 = 0x0001020304050607ULL;
1601                writeq(val64, &bar0->rx_w_round_robin_0);
1602                writeq(val64, &bar0->rx_w_round_robin_1);
1603                writeq(val64, &bar0->rx_w_round_robin_2);
1604                writeq(val64, &bar0->rx_w_round_robin_3);
1605                val64 = 0x0001020300000000ULL;
1606                writeq(val64, &bar0->rx_w_round_robin_4);
1607
1608                val64 = 0x8040201008040201ULL;
1609                writeq(val64, &bar0->rts_qos_steering);
1610                break;
1611        }
1612
1613        /* UDP Fix */
1614        val64 = 0;
1615        for (i = 0; i < 8; i++)
1616                writeq(val64, &bar0->rts_frm_len_n[i]);
1617
1618        /* Set the default rts frame length for the rings configured */
1619        val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1620        for (i = 0 ; i < config->rx_ring_num ; i++)
1621                writeq(val64, &bar0->rts_frm_len_n[i]);
1622
1623        /* Set the frame length for the configured rings
1624         * desired by the user
1625         */
1626        for (i = 0; i < config->rx_ring_num; i++) {
1627                /* If rts_frm_len[i] == 0 then it is assumed that user not
1628                 * specified frame length steering.
1629                 * If the user provides the frame length then program
1630                 * the rts_frm_len register for those values or else
1631                 * leave it as it is.
1632                 */
1633                if (rts_frm_len[i] != 0) {
1634                        writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1635                               &bar0->rts_frm_len_n[i]);
1636                }
1637        }
1638
1639        /* Disable differentiated services steering logic */
1640        for (i = 0; i < 64; i++) {
1641                if (rts_ds_steer(nic, i, 0) == FAILURE) {
1642                        DBG_PRINT(ERR_DBG,
1643                                  "%s: rts_ds_steer failed on codepoint %d\n",
1644                                  dev->name, i);
1645                        return -ENODEV;
1646                }
1647        }
1648
1649        /* Program statistics memory */
1650        writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1651
1652        if (nic->device_type == XFRAME_II_DEVICE) {
1653                val64 = STAT_BC(0x320);
1654                writeq(val64, &bar0->stat_byte_cnt);
1655        }
1656
1657        /*
1658         * Initializing the sampling rate for the device to calculate the
1659         * bandwidth utilization.
1660         */
1661        val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1662                MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1663        writeq(val64, &bar0->mac_link_util);
1664
1665        /*
1666         * Initializing the Transmit and Receive Traffic Interrupt
1667         * Scheme.
1668         */
1669
1670        /* Initialize TTI */
1671        if (SUCCESS != init_tti(nic, nic->last_link_state))
1672                return -ENODEV;
1673
1674        /* RTI Initialization */
1675        if (nic->device_type == XFRAME_II_DEVICE) {
1676                /*
1677                 * Programmed to generate Apprx 500 Intrs per
1678                 * second
1679                 */
1680                int count = (nic->config.bus_speed * 125)/4;
1681                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1682        } else
1683                val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1684        val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1685                RTI_DATA1_MEM_RX_URNG_B(0x10) |
1686                RTI_DATA1_MEM_RX_URNG_C(0x30) |
1687                RTI_DATA1_MEM_RX_TIMER_AC_EN;
1688
1689        writeq(val64, &bar0->rti_data1_mem);
1690
1691        val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1692                RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1693        if (nic->config.intr_type == MSI_X)
1694                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) |
1695                          RTI_DATA2_MEM_RX_UFC_D(0x40));
1696        else
1697                val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) |
1698                          RTI_DATA2_MEM_RX_UFC_D(0x80));
1699        writeq(val64, &bar0->rti_data2_mem);
1700
1701        for (i = 0; i < config->rx_ring_num; i++) {
1702                val64 = RTI_CMD_MEM_WE |
1703                        RTI_CMD_MEM_STROBE_NEW_CMD |
1704                        RTI_CMD_MEM_OFFSET(i);
1705                writeq(val64, &bar0->rti_command_mem);
1706
1707                /*
1708                 * Once the operation completes, the Strobe bit of the
1709                 * command register will be reset. We poll for this
1710                 * particular condition. We wait for a maximum of 500ms
1711                 * for the operation to complete, if it's not complete
1712                 * by then we return error.
1713                 */
1714                time = 0;
1715                while (true) {
1716                        val64 = readq(&bar0->rti_command_mem);
1717                        if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1718                                break;
1719
1720                        if (time > 10) {
1721                                DBG_PRINT(ERR_DBG, "%s: RTI init failed\n",
1722                                          dev->name);
1723                                return -ENODEV;
1724                        }
1725                        time++;
1726                        msleep(50);
1727                }
1728        }
1729
1730        /*
1731         * Initializing proper values as Pause threshold into all
1732         * the 8 Queues on Rx side.
1733         */
1734        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1735        writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1736
1737        /* Disable RMAC PAD STRIPPING */
1738        add = &bar0->mac_cfg;
1739        val64 = readq(&bar0->mac_cfg);
1740        val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1741        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1742        writel((u32) (val64), add);
1743        writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1744        writel((u32) (val64 >> 32), (add + 4));
1745        val64 = readq(&bar0->mac_cfg);
1746
1747        /* Enable FCS stripping by adapter */
1748        add = &bar0->mac_cfg;
1749        val64 = readq(&bar0->mac_cfg);
1750        val64 |= MAC_CFG_RMAC_STRIP_FCS;
1751        if (nic->device_type == XFRAME_II_DEVICE)
1752                writeq(val64, &bar0->mac_cfg);
1753        else {
1754                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1755                writel((u32) (val64), add);
1756                writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1757                writel((u32) (val64 >> 32), (add + 4));
1758        }
1759
1760        /*
1761         * Set the time value to be inserted in the pause frame
1762         * generated by xena.
1763         */
1764        val64 = readq(&bar0->rmac_pause_cfg);
1765        val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1766        val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1767        writeq(val64, &bar0->rmac_pause_cfg);
1768
1769        /*
1770         * Set the Threshold Limit for Generating the pause frame
1771         * If the amount of data in any Queue exceeds ratio of
1772         * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1773         * pause frame is generated
1774         */
1775        val64 = 0;
1776        for (i = 0; i < 4; i++) {
1777                val64 |= (((u64)0xFF00 |
1778                           nic->mac_control.mc_pause_threshold_q0q3)
1779                          << (i * 2 * 8));
1780        }
1781        writeq(val64, &bar0->mc_pause_thresh_q0q3);
1782
1783        val64 = 0;
1784        for (i = 0; i < 4; i++) {
1785                val64 |= (((u64)0xFF00 |
1786                           nic->mac_control.mc_pause_threshold_q4q7)
1787                          << (i * 2 * 8));
1788        }
1789        writeq(val64, &bar0->mc_pause_thresh_q4q7);
1790
1791        /*
1792         * TxDMA will stop Read request if the number of read split has
1793         * exceeded the limit pointed by shared_splits
1794         */
1795        val64 = readq(&bar0->pic_control);
1796        val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1797        writeq(val64, &bar0->pic_control);
1798
1799        if (nic->config.bus_speed == 266) {
1800                writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1801                writeq(0x0, &bar0->read_retry_delay);
1802                writeq(0x0, &bar0->write_retry_delay);
1803        }
1804
1805        /*
1806         * Programming the Herc to split every write transaction
1807         * that does not start on an ADB to reduce disconnects.
1808         */
1809        if (nic->device_type == XFRAME_II_DEVICE) {
1810                val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1811                        MISC_LINK_STABILITY_PRD(3);
1812                writeq(val64, &bar0->misc_control);
1813                val64 = readq(&bar0->pic_control2);
1814                val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1815                writeq(val64, &bar0->pic_control2);
1816        }
1817        if (strstr(nic->product_name, "CX4")) {
1818                val64 = TMAC_AVG_IPG(0x17);
1819                writeq(val64, &bar0->tmac_avg_ipg);
1820        }
1821
1822        return SUCCESS;
1823}
1824#define LINK_UP_DOWN_INTERRUPT          1
1825#define MAC_RMAC_ERR_TIMER              2
1826
1827static int s2io_link_fault_indication(struct s2io_nic *nic)
1828{
1829        if (nic->device_type == XFRAME_II_DEVICE)
1830                return LINK_UP_DOWN_INTERRUPT;
1831        else
1832                return MAC_RMAC_ERR_TIMER;
1833}
1834
1835/**
1836 *  do_s2io_write_bits -  update alarm bits in alarm register
1837 *  @value: alarm bits
1838 *  @flag: interrupt status
1839 *  @addr: address value
1840 *  Description: update alarm bits in alarm register
1841 *  Return Value:
1842 *  NONE.
1843 */
1844static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1845{
1846        u64 temp64;
1847
1848        temp64 = readq(addr);
1849
1850        if (flag == ENABLE_INTRS)
1851                temp64 &= ~((u64)value);
1852        else
1853                temp64 |= ((u64)value);
1854        writeq(temp64, addr);
1855}
1856
1857static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1858{
1859        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1860        register u64 gen_int_mask = 0;
1861        u64 interruptible;
1862
1863        writeq(DISABLE_ALL_INTRS, &bar0->general_int_mask);
1864        if (mask & TX_DMA_INTR) {
1865                gen_int_mask |= TXDMA_INT_M;
1866
1867                do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1868                                   TXDMA_PCC_INT | TXDMA_TTI_INT |
1869                                   TXDMA_LSO_INT | TXDMA_TPA_INT |
1870                                   TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1871
1872                do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1873                                   PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1874                                   PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1875                                   &bar0->pfc_err_mask);
1876
1877                do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1878                                   TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1879                                   TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1880
1881                do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1882                                   PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1883                                   PCC_N_SERR | PCC_6_COF_OV_ERR |
1884                                   PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1885                                   PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1886                                   PCC_TXB_ECC_SG_ERR,
1887                                   flag, &bar0->pcc_err_mask);
1888
1889                do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1890                                   TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1891
1892                do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1893                                   LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1894                                   LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1895                                   flag, &bar0->lso_err_mask);
1896
1897                do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1898                                   flag, &bar0->tpa_err_mask);
1899
1900                do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1901        }
1902
1903        if (mask & TX_MAC_INTR) {
1904                gen_int_mask |= TXMAC_INT_M;
1905                do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1906                                   &bar0->mac_int_mask);
1907                do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1908                                   TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1909                                   TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1910                                   flag, &bar0->mac_tmac_err_mask);
1911        }
1912
1913        if (mask & TX_XGXS_INTR) {
1914                gen_int_mask |= TXXGXS_INT_M;
1915                do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1916                                   &bar0->xgxs_int_mask);
1917                do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1918                                   TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1919                                   flag, &bar0->xgxs_txgxs_err_mask);
1920        }
1921
1922        if (mask & RX_DMA_INTR) {
1923                gen_int_mask |= RXDMA_INT_M;
1924                do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1925                                   RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1926                                   flag, &bar0->rxdma_int_mask);
1927                do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1928                                   RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1929                                   RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1930                                   RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1931                do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1932                                   PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1933                                   PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1934                                   &bar0->prc_pcix_err_mask);
1935                do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1936                                   RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1937                                   &bar0->rpa_err_mask);
1938                do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1939                                   RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1940                                   RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
1941                                   RDA_FRM_ECC_SG_ERR |
1942                                   RDA_MISC_ERR|RDA_PCIX_ERR,
1943                                   flag, &bar0->rda_err_mask);
1944                do_s2io_write_bits(RTI_SM_ERR_ALARM |
1945                                   RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
1946                                   flag, &bar0->rti_err_mask);
1947        }
1948
1949        if (mask & RX_MAC_INTR) {
1950                gen_int_mask |= RXMAC_INT_M;
1951                do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
1952                                   &bar0->mac_int_mask);
1953                interruptible = (RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
1954                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
1955                                 RMAC_DOUBLE_ECC_ERR);
1956                if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER)
1957                        interruptible |= RMAC_LINK_STATE_CHANGE_INT;
1958                do_s2io_write_bits(interruptible,
1959                                   flag, &bar0->mac_rmac_err_mask);
1960        }
1961
1962        if (mask & RX_XGXS_INTR) {
1963                gen_int_mask |= RXXGXS_INT_M;
1964                do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
1965                                   &bar0->xgxs_int_mask);
1966                do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
1967                                   &bar0->xgxs_rxgxs_err_mask);
1968        }
1969
1970        if (mask & MC_INTR) {
1971                gen_int_mask |= MC_INT_M;
1972                do_s2io_write_bits(MC_INT_MASK_MC_INT,
1973                                   flag, &bar0->mc_int_mask);
1974                do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
1975                                   MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
1976                                   &bar0->mc_err_mask);
1977        }
1978        nic->general_int_mask = gen_int_mask;
1979
1980        /* Remove this line when alarm interrupts are enabled */
1981        nic->general_int_mask = 0;
1982}
1983
1984/**
1985 *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1986 *  @nic: device private variable,
1987 *  @mask: A mask indicating which Intr block must be modified and,
1988 *  @flag: A flag indicating whether to enable or disable the Intrs.
1989 *  Description: This function will either disable or enable the interrupts
1990 *  depending on the flag argument. The mask argument can be used to
1991 *  enable/disable any Intr block.
1992 *  Return Value: NONE.
1993 */
1994
1995static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1996{
1997        struct XENA_dev_config __iomem *bar0 = nic->bar0;
1998        register u64 temp64 = 0, intr_mask = 0;
1999
2000        intr_mask = nic->general_int_mask;
2001
2002        /*  Top level interrupt classification */
2003        /*  PIC Interrupts */
2004        if (mask & TX_PIC_INTR) {
2005                /*  Enable PIC Intrs in the general intr mask register */
2006                intr_mask |= TXPIC_INT_M;
2007                if (flag == ENABLE_INTRS) {
2008                        /*
2009                         * If Hercules adapter enable GPIO otherwise
2010                         * disable all PCIX, Flash, MDIO, IIC and GPIO
2011                         * interrupts for now.
2012                         * TODO
2013                         */
2014                        if (s2io_link_fault_indication(nic) ==
2015                            LINK_UP_DOWN_INTERRUPT) {
2016                                do_s2io_write_bits(PIC_INT_GPIO, flag,
2017                                                   &bar0->pic_int_mask);
2018                                do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2019                                                   &bar0->gpio_int_mask);
2020                        } else
2021                                writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2022                } else if (flag == DISABLE_INTRS) {
2023                        /*
2024                         * Disable PIC Intrs in the general
2025                         * intr mask register
2026                         */
2027                        writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2028                }
2029        }
2030
2031        /*  Tx traffic interrupts */
2032        if (mask & TX_TRAFFIC_INTR) {
2033                intr_mask |= TXTRAFFIC_INT_M;
2034                if (flag == ENABLE_INTRS) {
2035                        /*
2036                         * Enable all the Tx side interrupts
2037                         * writing 0 Enables all 64 TX interrupt levels
2038                         */
2039                        writeq(0x0, &bar0->tx_traffic_mask);
2040                } else if (flag == DISABLE_INTRS) {
2041                        /*
2042                         * Disable Tx Traffic Intrs in the general intr mask
2043                         * register.
2044                         */
2045                        writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2046                }
2047        }
2048
2049        /*  Rx traffic interrupts */
2050        if (mask & RX_TRAFFIC_INTR) {
2051                intr_mask |= RXTRAFFIC_INT_M;
2052                if (flag == ENABLE_INTRS) {
2053                        /* writing 0 Enables all 8 RX interrupt levels */
2054                        writeq(0x0, &bar0->rx_traffic_mask);
2055                } else if (flag == DISABLE_INTRS) {
2056                        /*
2057                         * Disable Rx Traffic Intrs in the general intr mask
2058                         * register.
2059                         */
2060                        writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2061                }
2062        }
2063
2064        temp64 = readq(&bar0->general_int_mask);
2065        if (flag == ENABLE_INTRS)
2066                temp64 &= ~((u64)intr_mask);
2067        else
2068                temp64 = DISABLE_ALL_INTRS;
2069        writeq(temp64, &bar0->general_int_mask);
2070
2071        nic->general_int_mask = readq(&bar0->general_int_mask);
2072}
2073
2074/**
2075 *  verify_pcc_quiescent- Checks for PCC quiescent state
2076 *  Return: 1 If PCC is quiescence
2077 *          0 If PCC is not quiescence
2078 */
2079static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2080{
2081        int ret = 0, herc;
2082        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2083        u64 val64 = readq(&bar0->adapter_status);
2084
2085        herc = (sp->device_type == XFRAME_II_DEVICE);
2086
2087        if (flag == false) {
2088                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2089                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2090                                ret = 1;
2091                } else {
2092                        if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2093                                ret = 1;
2094                }
2095        } else {
2096                if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2097                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2098                             ADAPTER_STATUS_RMAC_PCC_IDLE))
2099                                ret = 1;
2100                } else {
2101                        if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2102                             ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2103                                ret = 1;
2104                }
2105        }
2106
2107        return ret;
2108}
2109/**
2110 *  verify_xena_quiescence - Checks whether the H/W is ready
2111 *  Description: Returns whether the H/W is ready to go or not. Depending
2112 *  on whether adapter enable bit was written or not the comparison
2113 *  differs and the calling function passes the input argument flag to
2114 *  indicate this.
2115 *  Return: 1 If xena is quiescence
2116 *          0 If Xena is not quiescence
2117 */
2118
2119static int verify_xena_quiescence(struct s2io_nic *sp)
2120{
2121        int  mode;
2122        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2123        u64 val64 = readq(&bar0->adapter_status);
2124        mode = s2io_verify_pci_mode(sp);
2125
2126        if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2127                DBG_PRINT(ERR_DBG, "TDMA is not ready!\n");
2128                return 0;
2129        }
2130        if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2131                DBG_PRINT(ERR_DBG, "RDMA is not ready!\n");
2132                return 0;
2133        }
2134        if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2135                DBG_PRINT(ERR_DBG, "PFC is not ready!\n");
2136                return 0;
2137        }
2138        if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2139                DBG_PRINT(ERR_DBG, "TMAC BUF is not empty!\n");
2140                return 0;
2141        }
2142        if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2143                DBG_PRINT(ERR_DBG, "PIC is not QUIESCENT!\n");
2144                return 0;
2145        }
2146        if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2147                DBG_PRINT(ERR_DBG, "MC_DRAM is not ready!\n");
2148                return 0;
2149        }
2150        if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2151                DBG_PRINT(ERR_DBG, "MC_QUEUES is not ready!\n");
2152                return 0;
2153        }
2154        if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2155                DBG_PRINT(ERR_DBG, "M_PLL is not locked!\n");
2156                return 0;
2157        }
2158
2159        /*
2160         * In PCI 33 mode, the P_PLL is not used, and therefore,
2161         * the the P_PLL_LOCK bit in the adapter_status register will
2162         * not be asserted.
2163         */
2164        if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2165            sp->device_type == XFRAME_II_DEVICE &&
2166            mode != PCI_MODE_PCI_33) {
2167                DBG_PRINT(ERR_DBG, "P_PLL is not locked!\n");
2168                return 0;
2169        }
2170        if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2171              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2172                DBG_PRINT(ERR_DBG, "RC_PRC is not QUIESCENT!\n");
2173                return 0;
2174        }
2175        return 1;
2176}
2177
2178/**
2179 * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2180 * @sp: Pointer to device specifc structure
2181 * Description :
2182 * New procedure to clear mac address reading  problems on Alpha platforms
2183 *
2184 */
2185
2186static void fix_mac_address(struct s2io_nic *sp)
2187{
2188        struct XENA_dev_config __iomem *bar0 = sp->bar0;
2189        int i = 0;
2190
2191        while (fix_mac[i] != END_SIGN) {
2192                writeq(fix_mac[i++], &bar0->gpio_control);
2193                udelay(10);
2194                (void) readq(&bar0->gpio_control);
2195        }
2196}
2197
2198/**
2199 *  start_nic - Turns the device on
2200 *  @nic : device private variable.
2201 *  Description:
2202 *  This function actually turns the device on. Before this  function is
2203 *  called,all Registers are configured from their reset states
2204 *  and shared memory is allocated but the NIC is still quiescent. On
2205 *  calling this function, the device interrupts are cleared and the NIC is
2206 *  literally switched on by writing into the adapter control register.
2207 *  Return Value:
2208 *  SUCCESS on success and -1 on failure.
2209 */
2210
2211static int start_nic(struct s2io_nic *nic)
2212{
2213        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2214        struct net_device *dev = nic->dev;
2215        register u64 val64 = 0;
2216        u16 subid, i;
2217        struct config_param *config = &nic->config;
2218        struct mac_info *mac_control = &nic->mac_control;
2219
2220        /*  PRC Initialization and configuration */
2221        for (i = 0; i < config->rx_ring_num; i++) {
2222                struct ring_info *ring = &mac_control->rings[i];
2223
2224                writeq((u64)ring->rx_blocks[0].block_dma_addr,
2225                       &bar0->prc_rxd0_n[i]);
2226
2227                val64 = readq(&bar0->prc_ctrl_n[i]);
2228                if (nic->rxd_mode == RXD_MODE_1)
2229                        val64 |= PRC_CTRL_RC_ENABLED;
2230                else
2231                        val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2232                if (nic->device_type == XFRAME_II_DEVICE)
2233                        val64 |= PRC_CTRL_GROUP_READS;
2234                val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2235                val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2236                writeq(val64, &bar0->prc_ctrl_n[i]);
2237        }
2238
2239        if (nic->rxd_mode == RXD_MODE_3B) {
2240                /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2241                val64 = readq(&bar0->rx_pa_cfg);
2242                val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2243                writeq(val64, &bar0->rx_pa_cfg);
2244        }
2245
2246        if (vlan_tag_strip == 0) {
2247                val64 = readq(&bar0->rx_pa_cfg);
2248                val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2249                writeq(val64, &bar0->rx_pa_cfg);
2250                nic->vlan_strip_flag = 0;
2251        }
2252
2253        /*
2254         * Enabling MC-RLDRAM. After enabling the device, we timeout
2255         * for around 100ms, which is approximately the time required
2256         * for the device to be ready for operation.
2257         */
2258        val64 = readq(&bar0->mc_rldram_mrs);
2259        val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2260        SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2261        val64 = readq(&bar0->mc_rldram_mrs);
2262
2263        msleep(100);    /* Delay by around 100 ms. */
2264
2265        /* Enabling ECC Protection. */
2266        val64 = readq(&bar0->adapter_control);
2267        val64 &= ~ADAPTER_ECC_EN;
2268        writeq(val64, &bar0->adapter_control);
2269
2270        /*
2271         * Verify if the device is ready to be enabled, if so enable
2272         * it.
2273         */
2274        val64 = readq(&bar0->adapter_status);
2275        if (!verify_xena_quiescence(nic)) {
2276                DBG_PRINT(ERR_DBG, "%s: device is not ready, "
2277                          "Adapter status reads: 0x%llx\n",
2278                          dev->name, (unsigned long long)val64);
2279                return FAILURE;
2280        }
2281
2282        /*
2283         * With some switches, link might be already up at this point.
2284         * Because of this weird behavior, when we enable laser,
2285         * we may not get link. We need to handle this. We cannot
2286         * figure out which switch is misbehaving. So we are forced to
2287         * make a global change.
2288         */
2289
2290        /* Enabling Laser. */
2291        val64 = readq(&bar0->adapter_control);
2292        val64 |= ADAPTER_EOI_TX_ON;
2293        writeq(val64, &bar0->adapter_control);
2294
2295        if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2296                /*
2297                 * Dont see link state interrupts initially on some switches,
2298                 * so directly scheduling the link state task here.
2299                 */
2300                schedule_work(&nic->set_link_task);
2301        }
2302        /* SXE-002: Initialize link and activity LED */
2303        subid = nic->pdev->subsystem_device;
2304        if (((subid & 0xFF) >= 0x07) &&
2305            (nic->device_type == XFRAME_I_DEVICE)) {
2306                val64 = readq(&bar0->gpio_control);
2307                val64 |= 0x0000800000000000ULL;
2308                writeq(val64, &bar0->gpio_control);
2309                val64 = 0x0411040400000000ULL;
2310                writeq(val64, (void __iomem *)bar0 + 0x2700);
2311        }
2312
2313        return SUCCESS;
2314}
2315/**
2316 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2317 */
2318static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2319                                        struct TxD *txdlp, int get_off)
2320{
2321        struct s2io_nic *nic = fifo_data->nic;
2322        struct sk_buff *skb;
2323        struct TxD *txds;
2324        u16 j, frg_cnt;
2325
2326        txds = txdlp;
2327        if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2328                pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2329                                 sizeof(u64), PCI_DMA_TODEVICE);
2330                txds++;
2331        }
2332
2333        skb = (struct sk_buff *)((unsigned long)txds->Host_Control);
2334        if (!skb) {
2335                memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2336                return NULL;
2337        }
2338        pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2339                         skb_headlen(skb), PCI_DMA_TODEVICE);
2340        frg_cnt = skb_shinfo(skb)->nr_frags;
2341        if (frg_cnt) {
2342                txds++;
2343                for (j = 0; j < frg_cnt; j++, txds++) {
2344                        const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2345                        if (!txds->Buffer_Pointer)
2346                                break;
2347                        pci_unmap_page(nic->pdev,
2348                                       (dma_addr_t)txds->Buffer_Pointer,
2349                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
2350                }
2351        }
2352        memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2353        return skb;
2354}
2355
2356/**
2357 *  free_tx_buffers - Free all queued Tx buffers
2358 *  @nic : device private variable.
2359 *  Description:
2360 *  Free all queued Tx buffers.
2361 *  Return Value: void
2362 */
2363
2364static void free_tx_buffers(struct s2io_nic *nic)
2365{
2366        struct net_device *dev = nic->dev;
2367        struct sk_buff *skb;
2368        struct TxD *txdp;
2369        int i, j;
2370        int cnt = 0;
2371        struct config_param *config = &nic->config;
2372        struct mac_info *mac_control = &nic->mac_control;
2373        struct stat_block *stats = mac_control->stats_info;
2374        struct swStat *swstats = &stats->sw_stat;
2375
2376        for (i = 0; i < config->tx_fifo_num; i++) {
2377                struct tx_fifo_config *tx_cfg = &config->tx_cfg[i];
2378                struct fifo_info *fifo = &mac_control->fifos[i];
2379                unsigned long flags;
2380
2381                spin_lock_irqsave(&fifo->tx_lock, flags);
2382                for (j = 0; j < tx_cfg->fifo_len; j++) {
2383                        txdp = fifo->list_info[j].list_virt_addr;
2384                        skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2385                        if (skb) {
2386                                swstats->mem_freed += skb->truesize;
2387                                dev_kfree_skb(skb);
2388                                cnt++;
2389                        }
2390                }
2391                DBG_PRINT(INTR_DBG,
2392                          "%s: forcibly freeing %d skbs on FIFO%d\n",
2393                          dev->name, cnt, i);
2394                fifo->tx_curr_get_info.offset = 0;
2395                fifo->tx_curr_put_info.offset = 0;
2396                spin_unlock_irqrestore(&fifo->tx_lock, flags);
2397        }
2398}
2399
2400/**
2401 *   stop_nic -  To stop the nic
2402 *   @nic ; device private variable.
2403 *   Description:
2404 *   This function does exactly the opposite of what the start_nic()
2405 *   function does. This function is called to stop the device.
2406 *   Return Value:
2407 *   void.
2408 */
2409
2410static void stop_nic(struct s2io_nic *nic)
2411{
2412        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2413        register u64 val64 = 0;
2414        u16 interruptible;
2415
2416        /*  Disable all interrupts */
2417        en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2418        interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2419        interruptible |= TX_PIC_INTR;
2420        en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2421
2422        /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2423        val64 = readq(&bar0->adapter_control);
2424        val64 &= ~(ADAPTER_CNTL_EN);
2425        writeq(val64, &bar0->adapter_control);
2426}
2427
2428/**
2429 *  fill_rx_buffers - Allocates the Rx side skbs
2430 *  @ring_info: per ring structure
2431 *  @from_card_up: If this is true, we will map the buffer to get
2432 *     the dma address for buf0 and buf1 to give it to the card.
2433 *     Else we will sync the already mapped buffer to give it to the card.
2434 *  Description:
2435 *  The function allocates Rx side skbs and puts the physical
2436 *  address of these buffers into the RxD buffer pointers, so that the NIC
2437 *  can DMA the received frame into these locations.
2438 *  The NIC supports 3 receive modes, viz
2439 *  1. single buffer,
2440 *  2. three buffer and
2441 *  3. Five buffer modes.
2442 *  Each mode defines how many fragments the received frame will be split
2443 *  up into by the NIC. The frame is split into L3 header, L4 Header,
2444 *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2445 *  is split into 3 fragments. As of now only single buffer mode is
2446 *  supported.
2447 *   Return Value:
2448 *  SUCCESS on success or an appropriate -ve value on failure.
2449 */
2450static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
2451                           int from_card_up)
2452{
2453        struct sk_buff *skb;
2454        struct RxD_t *rxdp;
2455        int off, size, block_no, block_no1;
2456        u32 alloc_tab = 0;
2457        u32 alloc_cnt;
2458        u64 tmp;
2459        struct buffAdd *ba;
2460        struct RxD_t *first_rxdp = NULL;
2461        u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2462        int rxd_index = 0;
2463        struct RxD1 *rxdp1;
2464        struct RxD3 *rxdp3;
2465        struct swStat *swstats = &ring->nic->mac_control.stats_info->sw_stat;
2466
2467        alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2468
2469        block_no1 = ring->rx_curr_get_info.block_index;
2470        while (alloc_tab < alloc_cnt) {
2471                block_no = ring->rx_curr_put_info.block_index;
2472
2473                off = ring->rx_curr_put_info.offset;
2474
2475                rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2476
2477                rxd_index = off + 1;
2478                if (block_no)
2479                        rxd_index += (block_no * ring->rxd_count);
2480
2481                if ((block_no == block_no1) &&
2482                    (off == ring->rx_curr_get_info.offset) &&
2483                    (rxdp->Host_Control)) {
2484                        DBG_PRINT(INTR_DBG, "%s: Get and Put info equated\n",
2485                                  ring->dev->name);
2486                        goto end;
2487                }
2488                if (off && (off == ring->rxd_count)) {
2489                        ring->rx_curr_put_info.block_index++;
2490                        if (ring->rx_curr_put_info.block_index ==
2491                            ring->block_count)
2492                                ring->rx_curr_put_info.block_index = 0;
2493                        block_no = ring->rx_curr_put_info.block_index;
2494                        off = 0;
2495                        ring->rx_curr_put_info.offset = off;
2496                        rxdp = ring->rx_blocks[block_no].block_virt_addr;
2497                        DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2498                                  ring->dev->name, rxdp);
2499
2500                }
2501
2502                if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2503                    ((ring->rxd_mode == RXD_MODE_3B) &&
2504                     (rxdp->Control_2 & s2BIT(0)))) {
2505                        ring->rx_curr_put_info.offset = off;
2506                        goto end;
2507                }
2508                /* calculate size of skb based on ring mode */
2509                size = ring->mtu +
2510                        HEADER_ETHERNET_II_802_3_SIZE +
2511                        HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2512                if (ring->rxd_mode == RXD_MODE_1)
2513                        size += NET_IP_ALIGN;
2514                else
2515                        size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2516
2517                /* allocate skb */
2518                skb = netdev_alloc_skb(nic->dev, size);
2519                if (!skb) {
2520                        DBG_PRINT(INFO_DBG, "%s: Could not allocate skb\n",
2521                                  ring->dev->name);
2522                        if (first_rxdp) {
2523                                wmb();
2524                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2525                        }
2526                        swstats->mem_alloc_fail_cnt++;
2527
2528                        return -ENOMEM ;
2529                }
2530                swstats->mem_allocated += skb->truesize;
2531
2532                if (ring->rxd_mode == RXD_MODE_1) {
2533                        /* 1 buffer mode - normal operation mode */
2534                        rxdp1 = (struct RxD1 *)rxdp;
2535                        memset(rxdp, 0, sizeof(struct RxD1));
2536                        skb_reserve(skb, NET_IP_ALIGN);
2537                        rxdp1->Buffer0_ptr =
2538                                pci_map_single(ring->pdev, skb->data,
2539                                               size - NET_IP_ALIGN,
2540                                               PCI_DMA_FROMDEVICE);
2541                        if (pci_dma_mapping_error(nic->pdev,
2542                                                  rxdp1->Buffer0_ptr))
2543                                goto pci_map_failed;
2544
2545                        rxdp->Control_2 =
2546                                SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2547                        rxdp->Host_Control = (unsigned long)skb;
2548                } else if (ring->rxd_mode == RXD_MODE_3B) {
2549                        /*
2550                         * 2 buffer mode -
2551                         * 2 buffer mode provides 128
2552                         * byte aligned receive buffers.
2553                         */
2554
2555                        rxdp3 = (struct RxD3 *)rxdp;
2556                        /* save buffer pointers to avoid frequent dma mapping */
2557                        Buffer0_ptr = rxdp3->Buffer0_ptr;
2558                        Buffer1_ptr = rxdp3->Buffer1_ptr;
2559                        memset(rxdp, 0, sizeof(struct RxD3));
2560                        /* restore the buffer pointers for dma sync*/
2561                        rxdp3->Buffer0_ptr = Buffer0_ptr;
2562                        rxdp3->Buffer1_ptr = Buffer1_ptr;
2563
2564                        ba = &ring->ba[block_no][off];
2565                        skb_reserve(skb, BUF0_LEN);
2566                        tmp = (u64)(unsigned long)skb->data;
2567                        tmp += ALIGN_SIZE;
2568                        tmp &= ~ALIGN_SIZE;
2569                        skb->data = (void *) (unsigned long)tmp;
2570                        skb_reset_tail_pointer(skb);
2571
2572                        if (from_card_up) {
2573                                rxdp3->Buffer0_ptr =
2574                                        pci_map_single(ring->pdev, ba->ba_0,
2575                                                       BUF0_LEN,
2576                                                       PCI_DMA_FROMDEVICE);
2577                                if (pci_dma_mapping_error(nic->pdev,
2578                                                          rxdp3->Buffer0_ptr))
2579                                        goto pci_map_failed;
2580                        } else
2581                                pci_dma_sync_single_for_device(ring->pdev,
2582                                                               (dma_addr_t)rxdp3->Buffer0_ptr,
2583                                                               BUF0_LEN,
2584                                                               PCI_DMA_FROMDEVICE);
2585
2586                        rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2587                        if (ring->rxd_mode == RXD_MODE_3B) {
2588                                /* Two buffer mode */
2589
2590                                /*
2591                                 * Buffer2 will have L3/L4 header plus
2592                                 * L4 payload
2593                                 */
2594                                rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
2595                                                                    skb->data,
2596                                                                    ring->mtu + 4,
2597                                                                    PCI_DMA_FROMDEVICE);
2598
2599                                if (pci_dma_mapping_error(nic->pdev,
2600                                                          rxdp3->Buffer2_ptr))
2601                                        goto pci_map_failed;
2602
2603                                if (from_card_up) {
2604                                        rxdp3->Buffer1_ptr =
2605                                                pci_map_single(ring->pdev,
2606                                                               ba->ba_1,
2607                                                               BUF1_LEN,
2608                                                               PCI_DMA_FROMDEVICE);
2609
2610                                        if (pci_dma_mapping_error(nic->pdev,
2611                                                                  rxdp3->Buffer1_ptr)) {
2612                                                pci_unmap_single(ring->pdev,
2613                                                                 (dma_addr_t)(unsigned long)
2614                                                                 skb->data,
2615                                                                 ring->mtu + 4,
2616                                                                 PCI_DMA_FROMDEVICE);
2617                                                goto pci_map_failed;
2618                                        }
2619                                }
2620                                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2621                                rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2622                                        (ring->mtu + 4);
2623                        }
2624                        rxdp->Control_2 |= s2BIT(0);
2625                        rxdp->Host_Control = (unsigned long) (skb);
2626                }
2627                if (alloc_tab & ((1 << rxsync_frequency) - 1))
2628                        rxdp->Control_1 |= RXD_OWN_XENA;
2629                off++;
2630                if (off == (ring->rxd_count + 1))
2631                        off = 0;
2632                ring->rx_curr_put_info.offset = off;
2633
2634                rxdp->Control_2 |= SET_RXD_MARKER;
2635                if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2636                        if (first_rxdp) {
2637                                wmb();
2638                                first_rxdp->Control_1 |= RXD_OWN_XENA;
2639                        }
2640                        first_rxdp = rxdp;
2641                }
2642                ring->rx_bufs_left += 1;
2643                alloc_tab++;
2644        }
2645
2646end:
2647        /* Transfer ownership of first descriptor to adapter just before
2648         * exiting. Before that, use memory barrier so that ownership
2649         * and other fields are seen by adapter correctly.
2650         */
2651        if (first_rxdp) {
2652                wmb();
2653                first_rxdp->Control_1 |= RXD_OWN_XENA;
2654        }
2655
2656        return SUCCESS;
2657
2658pci_map_failed:
2659        swstats->pci_map_fail_cnt++;
2660        swstats->mem_freed += skb->truesize;
2661        dev_kfree_skb_irq(skb);
2662        return -ENOMEM;
2663}
2664
2665static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2666{
2667        struct net_device *dev = sp->dev;
2668        int j;
2669        struct sk_buff *skb;
2670        struct RxD_t *rxdp;
2671        struct RxD1 *rxdp1;
2672        struct RxD3 *rxdp3;
2673        struct mac_info *mac_control = &sp->mac_control;
2674        struct stat_block *stats = mac_control->stats_info;
2675        struct swStat *swstats = &stats->sw_stat;
2676
2677        for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2678                rxdp = mac_control->rings[ring_no].
2679                        rx_blocks[blk].rxds[j].virt_addr;
2680                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2681                if (!skb)
2682                        continue;
2683                if (sp->rxd_mode == RXD_MODE_1) {
2684                        rxdp1 = (struct RxD1 *)rxdp;
2685                        pci_unmap_single(sp->pdev,
2686                                         (dma_addr_t)rxdp1->Buffer0_ptr,
2687                                         dev->mtu +
2688                                         HEADER_ETHERNET_II_802_3_SIZE +
2689                                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
2690                                         PCI_DMA_FROMDEVICE);
2691                        memset(rxdp, 0, sizeof(struct RxD1));
2692                } else if (sp->rxd_mode == RXD_MODE_3B) {
2693                        rxdp3 = (struct RxD3 *)rxdp;
2694                        pci_unmap_single(sp->pdev,
2695                                         (dma_addr_t)rxdp3->Buffer0_ptr,
2696                                         BUF0_LEN,
2697                                         PCI_DMA_FROMDEVICE);
2698                        pci_unmap_single(sp->pdev,
2699                                         (dma_addr_t)rxdp3->Buffer1_ptr,
2700                                         BUF1_LEN,
2701                                         PCI_DMA_FROMDEVICE);
2702                        pci_unmap_single(sp->pdev,
2703                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2704                                         dev->mtu + 4,
2705                                         PCI_DMA_FROMDEVICE);
2706                        memset(rxdp, 0, sizeof(struct RxD3));
2707                }
2708                swstats->mem_freed += skb->truesize;
2709                dev_kfree_skb(skb);
2710                mac_control->rings[ring_no].rx_bufs_left -= 1;
2711        }
2712}
2713
2714/**
2715 *  free_rx_buffers - Frees all Rx buffers
2716 *  @sp: device private variable.
2717 *  Description:
2718 *  This function will free all Rx buffers allocated by host.
2719 *  Return Value:
2720 *  NONE.
2721 */
2722
2723static void free_rx_buffers(struct s2io_nic *sp)
2724{
2725        struct net_device *dev = sp->dev;
2726        int i, blk = 0, buf_cnt = 0;
2727        struct config_param *config = &sp->config;
2728        struct mac_info *mac_control = &sp->mac_control;
2729
2730        for (i = 0; i < config->rx_ring_num; i++) {
2731                struct ring_info *ring = &mac_control->rings[i];
2732
2733                for (blk = 0; blk < rx_ring_sz[i]; blk++)
2734                        free_rxd_blk(sp, i, blk);
2735
2736                ring->rx_curr_put_info.block_index = 0;
2737                ring->rx_curr_get_info.block_index = 0;
2738                ring->rx_curr_put_info.offset = 0;
2739                ring->rx_curr_get_info.offset = 0;
2740                ring->rx_bufs_left = 0;
2741                DBG_PRINT(INIT_DBG, "%s: Freed 0x%x Rx Buffers on ring%d\n",
2742                          dev->name, buf_cnt, i);
2743        }
2744}
2745
2746static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
2747{
2748        if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2749                DBG_PRINT(INFO_DBG, "%s: Out of memory in Rx Intr!!\n",
2750                          ring->dev->name);
2751        }
2752        return 0;
2753}
2754
2755/**
2756 * s2io_poll - Rx interrupt handler for NAPI support
2757 * @napi : pointer to the napi structure.
2758 * @budget : The number of packets that were budgeted to be processed
2759 * during  one pass through the 'Poll" function.
2760 * Description:
2761 * Comes into picture only if NAPI support has been incorporated. It does
2762 * the same thing that rx_intr_handler does, but not in a interrupt context
2763 * also It will process only a given number of packets.
2764 * Return value:
2765 * 0 on success and 1 if there are No Rx packets to be processed.
2766 */
2767
2768static int s2io_poll_msix(struct napi_struct *napi, int budget)
2769{
2770        struct ring_info *ring = container_of(napi, struct ring_info, napi);
2771        struct net_device *dev = ring->dev;
2772        int pkts_processed = 0;
2773        u8 __iomem *addr = NULL;
2774        u8 val8 = 0;
2775        struct s2io_nic *nic = netdev_priv(dev);
2776        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2777        int budget_org = budget;
2778
2779        if (unlikely(!is_s2io_card_up(nic)))
2780                return 0;
2781
2782        pkts_processed = rx_intr_handler(ring, budget);
2783        s2io_chk_rx_buffers(nic, ring);
2784
2785        if (pkts_processed < budget_org) {
2786                napi_complete(napi);
2787                /*Re Enable MSI-Rx Vector*/
2788                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
2789                addr += 7 - ring->ring_no;
2790                val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2791                writeb(val8, addr);
2792                val8 = readb(addr);
2793        }
2794        return pkts_processed;
2795}
2796
2797static int s2io_poll_inta(struct napi_struct *napi, int budget)
2798{
2799        struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2800        int pkts_processed = 0;
2801        int ring_pkts_processed, i;
2802        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2803        int budget_org = budget;
2804        struct config_param *config = &nic->config;
2805        struct mac_info *mac_control = &nic->mac_control;
2806
2807        if (unlikely(!is_s2io_card_up(nic)))
2808                return 0;
2809
2810        for (i = 0; i < config->rx_ring_num; i++) {
2811                struct ring_info *ring = &mac_control->rings[i];
2812                ring_pkts_processed = rx_intr_handler(ring, budget);
2813                s2io_chk_rx_buffers(nic, ring);
2814                pkts_processed += ring_pkts_processed;
2815                budget -= ring_pkts_processed;
2816                if (budget <= 0)
2817                        break;
2818        }
2819        if (pkts_processed < budget_org) {
2820                napi_complete(napi);
2821                /* Re enable the Rx interrupts for the ring */
2822                writeq(0, &bar0->rx_traffic_mask);
2823                readl(&bar0->rx_traffic_mask);
2824        }
2825        return pkts_processed;
2826}
2827
2828#ifdef CONFIG_NET_POLL_CONTROLLER
2829/**
2830 * s2io_netpoll - netpoll event handler entry point
2831 * @dev : pointer to the device structure.
2832 * Description:
2833 *      This function will be called by upper layer to check for events on the
2834 * interface in situations where interrupts are disabled. It is used for
2835 * specific in-kernel networking tasks, such as remote consoles and kernel
2836 * debugging over the network (example netdump in RedHat).
2837 */
2838static void s2io_netpoll(struct net_device *dev)
2839{
2840        struct s2io_nic *nic = netdev_priv(dev);
2841        const int irq = nic->pdev->irq;
2842        struct XENA_dev_config __iomem *bar0 = nic->bar0;
2843        u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2844        int i;
2845        struct config_param *config = &nic->config;
2846        struct mac_info *mac_control = &nic->mac_control;
2847
2848        if (pci_channel_offline(nic->pdev))
2849                return;
2850
2851        disable_irq(irq);
2852
2853        writeq(val64, &bar0->rx_traffic_int);
2854        writeq(val64, &bar0->tx_traffic_int);
2855
2856        /* we need to free up the transmitted skbufs or else netpoll will
2857         * run out of skbs and will fail and eventually netpoll application such
2858         * as netdump will fail.
2859         */
2860        for (i = 0; i < config->tx_fifo_num; i++)
2861                tx_intr_handler(&mac_control->fifos[i]);
2862
2863        /* check for received packet and indicate up to network */
2864        for (i = 0; i < config->rx_ring_num; i++) {
2865                struct ring_info *ring = &mac_control->rings[i];
2866
2867                rx_intr_handler(ring, 0);
2868        }
2869
2870        for (i = 0; i < config->rx_ring_num; i++) {
2871                struct ring_info *ring = &mac_control->rings[i];
2872
2873                if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
2874                        DBG_PRINT(INFO_DBG,
2875                                  "%s: Out of memory in Rx Netpoll!!\n",
2876                                  dev->name);
2877                        break;
2878                }
2879        }
2880        enable_irq(irq);
2881}
2882#endif
2883
2884/**
2885 *  rx_intr_handler - Rx interrupt handler
2886 *  @ring_info: per ring structure.
2887 *  @budget: budget for napi processing.
2888 *  Description:
2889 *  If the interrupt is because of a received frame or if the
2890 *  receive ring contains fresh as yet un-processed frames,this function is
2891 *  called. It picks out the RxD at which place the last Rx processing had
2892 *  stopped and sends the skb to the OSM's Rx handler and then increments
2893 *  the offset.
2894 *  Return Value:
2895 *  No. of napi packets processed.
2896 */
2897static int rx_intr_handler(struct ring_info *ring_data, int budget)
2898{
2899        int get_block, put_block;
2900        struct rx_curr_get_info get_info, put_info;
2901        struct RxD_t *rxdp;
2902        struct sk_buff *skb;
2903        int pkt_cnt = 0, napi_pkts = 0;
2904        int i;
2905        struct RxD1 *rxdp1;
2906        struct RxD3 *rxdp3;
2907
2908        if (budget <= 0)
2909                return napi_pkts;
2910
2911        get_info = ring_data->rx_curr_get_info;
2912        get_block = get_info.block_index;
2913        memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2914        put_block = put_info.block_index;
2915        rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2916
2917        while (RXD_IS_UP2DT(rxdp)) {
2918                /*
2919                 * If your are next to put index then it's
2920                 * FIFO full condition
2921                 */
2922                if ((get_block == put_block) &&
2923                    (get_info.offset + 1) == put_info.offset) {
2924                        DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2925                                  ring_data->dev->name);
2926                        break;
2927                }
2928                skb = (struct sk_buff *)((unsigned long)rxdp->Host_Control);
2929                if (skb == NULL) {
2930                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Rx Intr\n",
2931                                  ring_data->dev->name);
2932                        return 0;
2933                }
2934                if (ring_data->rxd_mode == RXD_MODE_1) {
2935                        rxdp1 = (struct RxD1 *)rxdp;
2936                        pci_unmap_single(ring_data->pdev, (dma_addr_t)
2937                                         rxdp1->Buffer0_ptr,
2938                                         ring_data->mtu +
2939                                         HEADER_ETHERNET_II_802_3_SIZE +
2940                                         HEADER_802_2_SIZE +
2941                                         HEADER_SNAP_SIZE,
2942                                         PCI_DMA_FROMDEVICE);
2943                } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2944                        rxdp3 = (struct RxD3 *)rxdp;
2945                        pci_dma_sync_single_for_cpu(ring_data->pdev,
2946                                                    (dma_addr_t)rxdp3->Buffer0_ptr,
2947                                                    BUF0_LEN,
2948                                                    PCI_DMA_FROMDEVICE);
2949                        pci_unmap_single(ring_data->pdev,
2950                                         (dma_addr_t)rxdp3->Buffer2_ptr,
2951                                         ring_data->mtu + 4,
2952                                         PCI_DMA_FROMDEVICE);
2953                }
2954                prefetch(skb->data);
2955                rx_osm_handler(ring_data, rxdp);
2956                get_info.offset++;
2957                ring_data->rx_curr_get_info.offset = get_info.offset;
2958                rxdp = ring_data->rx_blocks[get_block].
2959                        rxds[get_info.offset].virt_addr;
2960                if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
2961                        get_info.offset = 0;
2962                        ring_data->rx_curr_get_info.offset = get_info.offset;
2963                        get_block++;
2964                        if (get_block == ring_data->block_count)
2965                                get_block = 0;
2966                        ring_data->rx_curr_get_info.block_index = get_block;
2967                        rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2968                }
2969
2970                if (ring_data->nic->config.napi) {
2971                        budget--;
2972                        napi_pkts++;
2973                        if (!budget)
2974                                break;
2975                }
2976                pkt_cnt++;
2977                if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2978                        break;
2979        }
2980        if (ring_data->lro) {
2981                /* Clear all LRO sessions before exiting */
2982                for (i = 0; i < MAX_LRO_SESSIONS; i++) {
2983                        struct lro *lro = &ring_data->lro0_n[i];
2984                        if (lro->in_use) {
2985                                update_L3L4_header(ring_data->nic, lro);
2986                                queue_rx_frame(lro->parent, lro->vlan_tag);
2987                                clear_lro_session(lro);
2988                        }
2989                }
2990        }
2991        return napi_pkts;
2992}
2993
2994/**
2995 *  tx_intr_handler - Transmit interrupt handler
2996 *  @nic : device private variable
2997 *  Description:
2998 *  If an interrupt was raised to indicate DMA complete of the
2999 *  Tx packet, this function is called. It identifies the last TxD
3000 *  whose buffer was freed and frees all skbs whose data have already
3001 *  DMA'ed into the NICs internal memory.
3002 *  Return Value:
3003 *  NONE
3004 */
3005
3006static void tx_intr_handler(struct fifo_info *fifo_data)
3007{
3008        struct s2io_nic *nic = fifo_data->nic;
3009        struct tx_curr_get_info get_info, put_info;
3010        struct sk_buff *skb = NULL;
3011        struct TxD *txdlp;
3012        int pkt_cnt = 0;
3013        unsigned long flags = 0;
3014        u8 err_mask;
3015        struct stat_block *stats = nic->mac_control.stats_info;
3016        struct swStat *swstats = &stats->sw_stat;
3017
3018        if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3019                return;
3020
3021        get_info = fifo_data->tx_curr_get_info;
3022        memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3023        txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3024        while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3025               (get_info.offset != put_info.offset) &&
3026               (txdlp->Host_Control)) {
3027                /* Check for TxD errors */
3028                if (txdlp->Control_1 & TXD_T_CODE) {
3029                        unsigned long long err;
3030                        err = txdlp->Control_1 & TXD_T_CODE;
3031                        if (err & 0x1) {
3032                                swstats->parity_err_cnt++;
3033                        }
3034
3035                        /* update t_code statistics */
3036                        err_mask = err >> 48;
3037                        switch (err_mask) {
3038                        case 2:
3039                                swstats->tx_buf_abort_cnt++;
3040                                break;
3041
3042                        case 3:
3043                                swstats->tx_desc_abort_cnt++;
3044                                break;
3045
3046                        case 7:
3047                                swstats->tx_parity_err_cnt++;
3048                                break;
3049
3050                        case 10:
3051                                swstats->tx_link_loss_cnt++;
3052                                break;
3053
3054                        case 15:
3055                                swstats->tx_list_proc_err_cnt++;
3056                                break;
3057                        }
3058                }
3059
3060                skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3061                if (skb == NULL) {
3062                        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3063                        DBG_PRINT(ERR_DBG, "%s: NULL skb in Tx Free Intr\n",
3064                                  __func__);
3065                        return;
3066                }
3067                pkt_cnt++;
3068
3069                /* Updating the statistics block */
3070                swstats->mem_freed += skb->truesize;
3071                dev_kfree_skb_irq(skb);
3072
3073                get_info.offset++;
3074                if (get_info.offset == get_info.fifo_len + 1)
3075                        get_info.offset = 0;
3076                txdlp = fifo_data->list_info[get_info.offset].list_virt_addr;
3077                fifo_data->tx_curr_get_info.offset = get_info.offset;
3078        }
3079
3080        s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3081
3082        spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3083}
3084
3085/**
3086 *  s2io_mdio_write - Function to write in to MDIO registers
3087 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3088 *  @addr     : address value
3089 *  @value    : data value
3090 *  @dev      : pointer to net_device structure
3091 *  Description:
3092 *  This function is used to write values to the MDIO registers
3093 *  NONE
3094 */
3095static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value,
3096                            struct net_device *dev)
3097{
3098        u64 val64;
3099        struct s2io_nic *sp = netdev_priv(dev);
3100        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3101
3102        /* address transaction */
3103        val64 = MDIO_MMD_INDX_ADDR(addr) |
3104                MDIO_MMD_DEV_ADDR(mmd_type) |
3105                MDIO_MMS_PRT_ADDR(0x0);
3106        writeq(val64, &bar0->mdio_control);
3107        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3108        writeq(val64, &bar0->mdio_control);
3109        udelay(100);
3110
3111        /* Data transaction */
3112        val64 = MDIO_MMD_INDX_ADDR(addr) |
3113                MDIO_MMD_DEV_ADDR(mmd_type) |
3114                MDIO_MMS_PRT_ADDR(0x0) |
3115                MDIO_MDIO_DATA(value) |
3116                MDIO_OP(MDIO_OP_WRITE_TRANS);
3117        writeq(val64, &bar0->mdio_control);
3118        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3119        writeq(val64, &bar0->mdio_control);
3120        udelay(100);
3121
3122        val64 = MDIO_MMD_INDX_ADDR(addr) |
3123                MDIO_MMD_DEV_ADDR(mmd_type) |
3124                MDIO_MMS_PRT_ADDR(0x0) |
3125                MDIO_OP(MDIO_OP_READ_TRANS);
3126        writeq(val64, &bar0->mdio_control);
3127        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3128        writeq(val64, &bar0->mdio_control);
3129        udelay(100);
3130}
3131
3132/**
3133 *  s2io_mdio_read - Function to write in to MDIO registers
3134 *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3135 *  @addr     : address value
3136 *  @dev      : pointer to net_device structure
3137 *  Description:
3138 *  This function is used to read values to the MDIO registers
3139 *  NONE
3140 */
3141static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3142{
3143        u64 val64 = 0x0;
3144        u64 rval64 = 0x0;
3145        struct s2io_nic *sp = netdev_priv(dev);
3146        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3147
3148        /* address transaction */
3149        val64 = val64 | (MDIO_MMD_INDX_ADDR(addr)
3150                         | MDIO_MMD_DEV_ADDR(mmd_type)
3151                         | MDIO_MMS_PRT_ADDR(0x0));
3152        writeq(val64, &bar0->mdio_control);
3153        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3154        writeq(val64, &bar0->mdio_control);
3155        udelay(100);
3156
3157        /* Data transaction */
3158        val64 = MDIO_MMD_INDX_ADDR(addr) |
3159                MDIO_MMD_DEV_ADDR(mmd_type) |
3160                MDIO_MMS_PRT_ADDR(0x0) |
3161                MDIO_OP(MDIO_OP_READ_TRANS);
3162        writeq(val64, &bar0->mdio_control);
3163        val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3164        writeq(val64, &bar0->mdio_control);
3165        udelay(100);
3166
3167        /* Read the value from regs */
3168        rval64 = readq(&bar0->mdio_control);
3169        rval64 = rval64 & 0xFFFF0000;
3170        rval64 = rval64 >> 16;
3171        return rval64;
3172}
3173
3174/**
3175 *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3176 *  @counter      : counter value to be updated
3177 *  @flag         : flag to indicate the status
3178 *  @type         : counter type
3179 *  Description:
3180 *  This function is to check the status of the xpak counters value
3181 *  NONE
3182 */
3183
3184static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index,
3185                                  u16 flag, u16 type)
3186{
3187        u64 mask = 0x3;
3188        u64 val64;
3189        int i;
3190        for (i = 0; i < index; i++)
3191                mask = mask << 0x2;
3192
3193        if (flag > 0) {
3194                *counter = *counter + 1;
3195                val64 = *regs_stat & mask;
3196                val64 = val64 >> (index * 0x2);
3197                val64 = val64 + 1;
3198                if (val64 == 3) {
3199                        switch (type) {
3200                        case 1:
3201                                DBG_PRINT(ERR_DBG,
3202                                          "Take Xframe NIC out of service.\n");
3203                                DBG_PRINT(ERR_DBG,
3204"Excessive temperatures may result in premature transceiver failure.\n");
3205                                break;
3206                        case 2:
3207                                DBG_PRINT(ERR_DBG,
3208                                          "Take Xframe NIC out of service.\n");
3209                                DBG_PRINT(ERR_DBG,
3210"Excessive bias currents may indicate imminent laser diode failure.\n");
3211                                break;
3212                        case 3:
3213                                DBG_PRINT(ERR_DBG,
3214                                          "Take Xframe NIC out of service.\n");
3215                                DBG_PRINT(ERR_DBG,
3216"Excessive laser output power may saturate far-end receiver.\n");
3217                                break;
3218                        default:
3219                                DBG_PRINT(ERR_DBG,
3220                                          "Incorrect XPAK Alarm type\n");
3221                        }
3222                        val64 = 0x0;
3223                }
3224                val64 = val64 << (index * 0x2);
3225                *regs_stat = (*regs_stat & (~mask)) | (val64);
3226
3227        } else {
3228                *regs_stat = *regs_stat & (~mask);
3229        }
3230}
3231
3232/**
3233 *  s2io_updt_xpak_counter - Function to update the xpak counters
3234 *  @dev         : pointer to net_device struct
3235 *  Description:
3236 *  This function is to upate the status of the xpak counters value
3237 *  NONE
3238 */
3239static void s2io_updt_xpak_counter(struct net_device *dev)
3240{
3241        u16 flag  = 0x0;
3242        u16 type  = 0x0;
3243        u16 val16 = 0x0;
3244        u64 val64 = 0x0;
3245        u64 addr  = 0x0;
3246
3247        struct s2io_nic *sp = netdev_priv(dev);
3248        struct stat_block *stats = sp->mac_control.stats_info;
3249        struct xpakStat *xstats = &stats->xpak_stat;
3250
3251        /* Check the communication with the MDIO slave */
3252        addr = MDIO_CTRL1;
3253        val64 = 0x0;
3254        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3255        if ((val64 == 0xFFFF) || (val64 == 0x0000)) {
3256                DBG_PRINT(ERR_DBG,
3257                          "ERR: MDIO slave access failed - Returned %llx\n",
3258                          (unsigned long long)val64);
3259                return;
3260        }
3261
3262        /* Check for the expected value of control reg 1 */
3263        if (val64 != MDIO_CTRL1_SPEED10G) {
3264                DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - "
3265                          "Returned: %llx- Expected: 0x%x\n",
3266                          (unsigned long long)val64, MDIO_CTRL1_SPEED10G);
3267                return;
3268        }
3269
3270        /* Loading the DOM register to MDIO register */
3271        addr = 0xA100;
3272        s2io_mdio_write(MDIO_MMD_PMAPMD, addr, val16, dev);
3273        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3274
3275        /* Reading the Alarm flags */
3276        addr = 0xA070;
3277        val64 = 0x0;
3278        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3279
3280        flag = CHECKBIT(val64, 0x7);
3281        type = 1;
3282        s2io_chk_xpak_counter(&xstats->alarm_transceiver_temp_high,
3283                              &xstats->xpak_regs_stat,
3284                              0x0, flag, type);
3285
3286        if (CHECKBIT(val64, 0x6))
3287                xstats->alarm_transceiver_temp_low++;
3288
3289        flag = CHECKBIT(val64, 0x3);
3290        type = 2;
3291        s2io_chk_xpak_counter(&xstats->alarm_laser_bias_current_high,
3292                              &xstats->xpak_regs_stat,
3293                              0x2, flag, type);
3294
3295        if (CHECKBIT(val64, 0x2))
3296                xstats->alarm_laser_bias_current_low++;
3297
3298        flag = CHECKBIT(val64, 0x1);
3299        type = 3;
3300        s2io_chk_xpak_counter(&xstats->alarm_laser_output_power_high,
3301                              &xstats->xpak_regs_stat,
3302                              0x4, flag, type);
3303
3304        if (CHECKBIT(val64, 0x0))
3305                xstats->alarm_laser_output_power_low++;
3306
3307        /* Reading the Warning flags */
3308        addr = 0xA074;
3309        val64 = 0x0;
3310        val64 = s2io_mdio_read(MDIO_MMD_PMAPMD, addr, dev);
3311
3312        if (CHECKBIT(val64, 0x7))
3313                xstats->warn_transceiver_temp_high++;
3314
3315        if (CHECKBIT(val64, 0x6))
3316                xstats->warn_transceiver_temp_low++;
3317
3318        if (CHECKBIT(val64, 0x3))
3319                xstats->warn_laser_bias_current_high++;
3320
3321        if (CHECKBIT(val64, 0x2))
3322                xstats->warn_laser_bias_current_low++;
3323
3324        if (CHECKBIT(val64, 0x1))
3325                xstats->warn_laser_output_power_high++;
3326
3327        if (CHECKBIT(val64, 0x0))
3328                xstats->warn_laser_output_power_low++;
3329}
3330
3331/**
3332 *  wait_for_cmd_complete - waits for a command to complete.
3333 *  @sp : private member of the device structure, which is a pointer to the
3334 *  s2io_nic structure.
3335 *  Description: Function that waits for a command to Write into RMAC
3336 *  ADDR DATA registers to be completed and returns either success or
3337 *  error depending on whether the command was complete or not.
3338 *  Return value:
3339 *   SUCCESS on success and FAILURE on failure.
3340 */
3341
3342static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3343                                 int bit_state)
3344{
3345        int ret = FAILURE, cnt = 0, delay = 1;
3346        u64 val64;
3347
3348        if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3349                return FAILURE;
3350
3351        do {
3352                val64 = readq(addr);
3353                if (bit_state == S2IO_BIT_RESET) {
3354                        if (!(val64 & busy_bit)) {
3355                                ret = SUCCESS;
3356                                break;
3357                        }
3358                } else {
3359                        if (val64 & busy_bit) {
3360                                ret = SUCCESS;
3361                                break;
3362                        }
3363                }
3364
3365                if (in_interrupt())
3366                        mdelay(delay);
3367                else
3368                        msleep(delay);
3369
3370                if (++cnt >= 10)
3371                        delay = 50;
3372        } while (cnt < 20);
3373        return ret;
3374}
3375/**
3376 * check_pci_device_id - Checks if the device id is supported
3377 * @id : device id
3378 * Description: Function to check if the pci device id is supported by driver.
3379 * Return value: Actual device id if supported else PCI_ANY_ID
3380 */
3381static u16 check_pci_device_id(u16 id)
3382{
3383        switch (id) {
3384        case PCI_DEVICE_ID_HERC_WIN:
3385        case PCI_DEVICE_ID_HERC_UNI:
3386                return XFRAME_II_DEVICE;
3387        case PCI_DEVICE_ID_S2IO_UNI:
3388        case PCI_DEVICE_ID_S2IO_WIN:
3389                return XFRAME_I_DEVICE;
3390        default:
3391                return PCI_ANY_ID;
3392        }
3393}
3394
3395/**
3396 *  s2io_reset - Resets the card.
3397 *  @sp : private member of the device structure.
3398 *  Description: Function to Reset the card. This function then also
3399 *  restores the previously saved PCI configuration space registers as
3400 *  the card reset also resets the configuration space.
3401 *  Return value:
3402 *  void.
3403 */
3404
3405static void s2io_reset(struct s2io_nic *sp)
3406{
3407        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3408        u64 val64;
3409        u16 subid, pci_cmd;
3410        int i;
3411        u16 val16;
3412        unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3413        unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3414        struct stat_block *stats;
3415        struct swStat *swstats;
3416
3417        DBG_PRINT(INIT_DBG, "%s: Resetting XFrame card %s\n",
3418                  __func__, pci_name(sp->pdev));
3419
3420        /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3421        pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3422
3423        val64 = SW_RESET_ALL;
3424        writeq(val64, &bar0->sw_reset);
3425        if (strstr(sp->product_name, "CX4"))
3426                msleep(750);
3427        msleep(250);
3428        for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3429
3430                /* Restore the PCI state saved during initialization. */
3431                pci_restore_state(sp->pdev);
3432                pci_save_state(sp->pdev);
3433                pci_read_config_word(sp->pdev, 0x2, &val16);
3434                if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3435                        break;
3436                msleep(200);
3437        }
3438
3439        if (check_pci_device_id(val16) == (u16)PCI_ANY_ID)
3440                DBG_PRINT(ERR_DBG, "%s SW_Reset failed!\n", __func__);
3441
3442        pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3443
3444        s2io_init_pci(sp);
3445
3446        /* Set swapper to enable I/O register access */
3447        s2io_set_swapper(sp);
3448
3449        /* restore mac_addr entries */
3450        do_s2io_restore_unicast_mc(sp);
3451
3452        /* Restore the MSIX table entries from local variables */
3453        restore_xmsi_data(sp);
3454
3455        /* Clear certain PCI/PCI-X fields after reset */
3456        if (sp->device_type == XFRAME_II_DEVICE) {
3457                /* Clear "detected parity error" bit */
3458                pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3459
3460                /* Clearing PCIX Ecc status register */
3461                pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3462
3463                /* Clearing PCI_STATUS error reflected here */
3464                writeq(s2BIT(62), &bar0->txpic_int_reg);
3465        }
3466
3467        /* Reset device statistics maintained by OS */
3468        memset(&sp->stats, 0, sizeof(struct net_device_stats));
3469
3470        stats = sp->mac_control.stats_info;
3471        swstats = &stats->sw_stat;
3472
3473        /* save link up/down time/cnt, reset/memory/watchdog cnt */
3474        up_cnt = swstats->link_up_cnt;
3475        down_cnt = swstats->link_down_cnt;
3476        up_time = swstats->link_up_time;
3477        down_time = swstats->link_down_time;
3478        reset_cnt = swstats->soft_reset_cnt;
3479        mem_alloc_cnt = swstats->mem_allocated;
3480        mem_free_cnt = swstats->mem_freed;
3481        watchdog_cnt = swstats->watchdog_timer_cnt;
3482
3483        memset(stats, 0, sizeof(struct stat_block));
3484
3485        /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3486        swstats->link_up_cnt = up_cnt;
3487        swstats->link_down_cnt = down_cnt;
3488        swstats->link_up_time = up_time;
3489        swstats->link_down_time = down_time;
3490        swstats->soft_reset_cnt = reset_cnt;
3491        swstats->mem_allocated = mem_alloc_cnt;
3492        swstats->mem_freed = mem_free_cnt;
3493        swstats->watchdog_timer_cnt = watchdog_cnt;
3494
3495        /* SXE-002: Configure link and activity LED to turn it off */
3496        subid = sp->pdev->subsystem_device;
3497        if (((subid & 0xFF) >= 0x07) &&
3498            (sp->device_type == XFRAME_I_DEVICE)) {
3499                val64 = readq(&bar0->gpio_control);
3500                val64 |= 0x0000800000000000ULL;
3501                writeq(val64, &bar0->gpio_control);
3502                val64 = 0x0411040400000000ULL;
3503                writeq(val64, (void __iomem *)bar0 + 0x2700);
3504        }
3505
3506        /*
3507         * Clear spurious ECC interrupts that would have occurred on
3508         * XFRAME II cards after reset.
3509         */
3510        if (sp->device_type == XFRAME_II_DEVICE) {
3511                val64 = readq(&bar0->pcc_err_reg);
3512                writeq(val64, &bar0->pcc_err_reg);
3513        }
3514
3515        sp->device_enabled_once = false;
3516}
3517
3518/**
3519 *  s2io_set_swapper - to set the swapper controle on the card
3520 *  @sp : private member of the device structure,
3521 *  pointer to the s2io_nic structure.
3522 *  Description: Function to set the swapper control on the card
3523 *  correctly depending on the 'endianness' of the system.
3524 *  Return value:
3525 *  SUCCESS on success and FAILURE on failure.
3526 */
3527
3528static int s2io_set_swapper(struct s2io_nic *sp)
3529{
3530        struct net_device *dev = sp->dev;
3531        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3532        u64 val64, valt, valr;
3533
3534        /*
3535         * Set proper endian settings and verify the same by reading
3536         * the PIF Feed-back register.
3537         */
3538
3539        val64 = readq(&bar0->pif_rd_swapper_fb);
3540        if (val64 != 0x0123456789ABCDEFULL) {
3541                int i = 0;
3542                static const u64 value[] = {
3543                        0xC30000C3C30000C3ULL,  /* FE=1, SE=1 */
3544                        0x8100008181000081ULL,  /* FE=1, SE=0 */
3545                        0x4200004242000042ULL,  /* FE=0, SE=1 */
3546                        0                       /* FE=0, SE=0 */
3547                };
3548
3549                while (i < 4) {
3550                        writeq(value[i], &bar0->swapper_ctrl);
3551                        val64 = readq(&bar0->pif_rd_swapper_fb);
3552                        if (val64 == 0x0123456789ABCDEFULL)
3553                                break;
3554                        i++;
3555                }
3556                if (i == 4) {
3557                        DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, "
3558                                  "feedback read %llx\n",
3559                                  dev->name, (unsigned long long)val64);
3560                        return FAILURE;
3561                }
3562                valr = value[i];
3563        } else {
3564                valr = readq(&bar0->swapper_ctrl);
3565        }
3566
3567        valt = 0x0123456789ABCDEFULL;
3568        writeq(valt, &bar0->xmsi_address);
3569        val64 = readq(&bar0->xmsi_address);
3570
3571        if (val64 != valt) {
3572                int i = 0;
3573                static const u64 value[] = {
3574                        0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3575                        0x0081810000818100ULL,  /* FE=1, SE=0 */
3576                        0x0042420000424200ULL,  /* FE=0, SE=1 */
3577                        0                       /* FE=0, SE=0 */
3578                };
3579
3580                while (i < 4) {
3581                        writeq((value[i] | valr), &bar0->swapper_ctrl);
3582                        writeq(valt, &bar0->xmsi_address);
3583                        val64 = readq(&bar0->xmsi_address);
3584                        if (val64 == valt)
3585                                break;
3586                        i++;
3587                }
3588                if (i == 4) {
3589                        unsigned long long x = val64;
3590                        DBG_PRINT(ERR_DBG,
3591                                  "Write failed, Xmsi_addr reads:0x%llx\n", x);
3592                        return FAILURE;
3593                }
3594        }
3595        val64 = readq(&bar0->swapper_ctrl);
3596        val64 &= 0xFFFF000000000000ULL;
3597
3598#ifdef __BIG_ENDIAN
3599        /*
3600         * The device by default set to a big endian format, so a
3601         * big endian driver need not set anything.
3602         */
3603        val64 |= (SWAPPER_CTRL_TXP_FE |
3604                  SWAPPER_CTRL_TXP_SE |
3605                  SWAPPER_CTRL_TXD_R_FE |
3606                  SWAPPER_CTRL_TXD_W_FE |
3607                  SWAPPER_CTRL_TXF_R_FE |
3608                  SWAPPER_CTRL_RXD_R_FE |
3609                  SWAPPER_CTRL_RXD_W_FE |
3610                  SWAPPER_CTRL_RXF_W_FE |
3611                  SWAPPER_CTRL_XMSI_FE |
3612                  SWAPPER_CTRL_STATS_FE |
3613                  SWAPPER_CTRL_STATS_SE);
3614        if (sp->config.intr_type == INTA)
3615                val64 |= SWAPPER_CTRL_XMSI_SE;
3616        writeq(val64, &bar0->swapper_ctrl);
3617#else
3618        /*
3619         * Initially we enable all bits to make it accessible by the
3620         * driver, then we selectively enable only those bits that
3621         * we want to set.
3622         */
3623        val64 |= (SWAPPER_CTRL_TXP_FE |
3624                  SWAPPER_CTRL_TXP_SE |
3625                  SWAPPER_CTRL_TXD_R_FE |
3626                  SWAPPER_CTRL_TXD_R_SE |
3627                  SWAPPER_CTRL_TXD_W_FE |
3628                  SWAPPER_CTRL_TXD_W_SE |
3629                  SWAPPER_CTRL_TXF_R_FE |
3630                  SWAPPER_CTRL_RXD_R_FE |
3631                  SWAPPER_CTRL_RXD_R_SE |
3632                  SWAPPER_CTRL_RXD_W_FE |
3633                  SWAPPER_CTRL_RXD_W_SE |
3634                  SWAPPER_CTRL_RXF_W_FE |
3635                  SWAPPER_CTRL_XMSI_FE |
3636                  SWAPPER_CTRL_STATS_FE |
3637                  SWAPPER_CTRL_STATS_SE);
3638        if (sp->config.intr_type == INTA)
3639                val64 |= SWAPPER_CTRL_XMSI_SE;
3640        writeq(val64, &bar0->swapper_ctrl);
3641#endif
3642        val64 = readq(&bar0->swapper_ctrl);
3643
3644        /*
3645         * Verifying if endian settings are accurate by reading a
3646         * feedback register.
3647         */
3648        val64 = readq(&bar0->pif_rd_swapper_fb);
3649        if (val64 != 0x0123456789ABCDEFULL) {
3650                /* Endian settings are incorrect, calls for another dekko. */
3651                DBG_PRINT(ERR_DBG,
3652                          "%s: Endian settings are wrong, feedback read %llx\n",
3653                          dev->name, (unsigned long long)val64);
3654                return FAILURE;
3655        }
3656
3657        return SUCCESS;
3658}
3659
3660static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3661{
3662        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3663        u64 val64;
3664        int ret = 0, cnt = 0;
3665
3666        do {
3667                val64 = readq(&bar0->xmsi_access);
3668                if (!(val64 & s2BIT(15)))
3669                        break;
3670                mdelay(1);
3671                cnt++;
3672        } while (cnt < 5);
3673        if (cnt == 5) {
3674                DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3675                ret = 1;
3676        }
3677
3678        return ret;
3679}
3680
3681static void restore_xmsi_data(struct s2io_nic *nic)
3682{
3683        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3684        u64 val64;
3685        int i, msix_index;
3686
3687        if (nic->device_type == XFRAME_I_DEVICE)
3688                return;
3689
3690        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3691                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3692                writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3693                writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3694                val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
3695                writeq(val64, &bar0->xmsi_access);
3696                if (wait_for_msix_trans(nic, msix_index)) {
3697                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3698                                  __func__, msix_index);
3699                        continue;
3700                }
3701        }
3702}
3703
3704static void store_xmsi_data(struct s2io_nic *nic)
3705{
3706        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3707        u64 val64, addr, data;
3708        int i, msix_index;
3709
3710        if (nic->device_type == XFRAME_I_DEVICE)
3711                return;
3712
3713        /* Store and display */
3714        for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3715                msix_index = (i) ? ((i-1) * 8 + 1) : 0;
3716                val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
3717                writeq(val64, &bar0->xmsi_access);
3718                if (wait_for_msix_trans(nic, msix_index)) {
3719                        DBG_PRINT(ERR_DBG, "%s: index: %d failed\n",
3720                                  __func__, msix_index);
3721                        continue;
3722                }
3723                addr = readq(&bar0->xmsi_address);
3724                data = readq(&bar0->xmsi_data);
3725                if (addr && data) {
3726                        nic->msix_info[i].addr = addr;
3727                        nic->msix_info[i].data = data;
3728                }
3729        }
3730}
3731
3732static int s2io_enable_msi_x(struct s2io_nic *nic)
3733{
3734        struct XENA_dev_config __iomem *bar0 = nic->bar0;
3735        u64 rx_mat;
3736        u16 msi_control; /* Temp variable */
3737        int ret, i, j, msix_indx = 1;
3738        int size;
3739        struct stat_block *stats = nic->mac_control.stats_info;
3740        struct swStat *swstats = &stats->sw_stat;
3741
3742        size = nic->num_entries * sizeof(struct msix_entry);
3743        nic->entries = kzalloc(size, GFP_KERNEL);
3744        if (!nic->entries) {
3745                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3746                          __func__);
3747                swstats->mem_alloc_fail_cnt++;
3748                return -ENOMEM;
3749        }
3750        swstats->mem_allocated += size;
3751
3752        size = nic->num_entries * sizeof(struct s2io_msix_entry);
3753        nic->s2io_entries = kzalloc(size, GFP_KERNEL);
3754        if (!nic->s2io_entries) {
3755                DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3756                          __func__);
3757                swstats->mem_alloc_fail_cnt++;
3758                kfree(nic->entries);
3759                swstats->mem_freed
3760                        += (nic->num_entries * sizeof(struct msix_entry));
3761                return -ENOMEM;
3762        }
3763        swstats->mem_allocated += size;
3764
3765        nic->entries[0].entry = 0;
3766        nic->s2io_entries[0].entry = 0;
3767        nic->s2io_entries[0].in_use = MSIX_FLG;
3768        nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3769        nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3770
3771        for (i = 1; i < nic->num_entries; i++) {
3772                nic->entries[i].entry = ((i - 1) * 8) + 1;
3773                nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
3774                nic->s2io_entries[i].arg = NULL;
3775                nic->s2io_entries[i].in_use = 0;
3776        }
3777
3778        rx_mat = readq(&bar0->rx_mat);
3779        for (j = 0; j < nic->config.rx_ring_num; j++) {
3780                rx_mat |= RX_MAT_SET(j, msix_indx);
3781                nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3782                nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3783                nic->s2io_entries[j+1].in_use = MSIX_FLG;
3784                msix_indx += 8;
3785        }
3786        writeq(rx_mat, &bar0->rx_mat);
3787        readq(&bar0->rx_mat);
3788
3789        ret = pci_enable_msix_range(nic->pdev, nic->entries,
3790                                    nic->num_entries, nic->num_entries);
3791        /* We fail init if error or we get less vectors than min required */
3792        if (ret < 0) {
3793                DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3794                kfree(nic->entries);
3795                swstats->mem_freed += nic->num_entries *
3796                        sizeof(struct msix_entry);
3797                kfree(nic->s2io_entries);
3798                swstats->mem_freed += nic->num_entries *
3799                        sizeof(struct s2io_msix_entry);
3800                nic->entries = NULL;
3801                nic->s2io_entries = NULL;
3802                return -ENOMEM;
3803        }
3804
3805        /*
3806         * To enable MSI-X, MSI also needs to be enabled, due to a bug
3807         * in the herc NIC. (Temp change, needs to be removed later)
3808         */
3809        pci_read_config_word(nic->pdev, 0x42, &msi_control);
3810        msi_control |= 0x1; /* Enable MSI */
3811        pci_write_config_word(nic->pdev, 0x42, msi_control);
3812
3813        return 0;
3814}
3815
3816/* Handle software interrupt used during MSI(X) test */
3817static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3818{
3819        struct s2io_nic *sp = dev_id;
3820
3821        sp->msi_detected = 1;
3822        wake_up(&sp->msi_wait);
3823
3824        return IRQ_HANDLED;
3825}
3826
3827/* Test interrupt path by forcing a a software IRQ */
3828static int s2io_test_msi(struct s2io_nic *sp)
3829{
3830        struct pci_dev *pdev = sp->pdev;
3831        struct XENA_dev_config __iomem *bar0 = sp->bar0;
3832        int err;
3833        u64 val64, saved64;
3834
3835        err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3836                          sp->name, sp);
3837        if (err) {
3838                DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3839                          sp->dev->name, pci_name(pdev), pdev->irq);
3840                return err;
3841        }
3842
3843        init_waitqueue_head(&sp->msi_wait);
3844        sp->msi_detected = 0;
3845
3846        saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3847        val64 |= SCHED_INT_CTRL_ONE_SHOT;
3848        val64 |= SCHED_INT_CTRL_TIMER_EN;
3849        val64 |= SCHED_INT_CTRL_INT2MSI(1);
3850        writeq(val64, &bar0->scheduled_int_ctrl);
3851
3852        wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3853
3854        if (!sp->msi_detected) {
3855                /* MSI(X) test failed, go back to INTx mode */
3856                DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3857                          "using MSI(X) during test\n",
3858                          sp->dev->name, pci_name(pdev));
3859
3860                err = -EOPNOTSUPP;
3861        }
3862
3863        free_irq(sp->entries[1].vector, sp);
3864
3865        writeq(saved64, &bar0->scheduled_int_ctrl);
3866
3867        return err;
3868}
3869
3870static void remove_msix_isr(struct s2io_nic *sp)
3871{
3872        int i;
3873        u16 msi_control;
3874
3875        for (i = 0; i < sp->num_entries; i++) {
3876                if (sp->s2io_entries[i].in_use == MSIX_REGISTERED_SUCCESS) {
3877                        int vector = sp->entries[i].vector;
3878                        void *arg = sp->s2io_entries[i].arg;
3879                        free_irq(vector, arg);
3880                }
3881        }
3882
3883        kfree(sp->entries);
3884        kfree(sp->s2io_entries);
3885        sp->entries = NULL;
3886        sp->s2io_entries = NULL;
3887
3888        pci_read_config_word(sp->pdev, 0x42, &msi_control);
3889        msi_control &= 0xFFFE; /* Disable MSI */
3890        pci_write_config_word(sp->pdev, 0x42, msi_control);
3891
3892        pci_disable_msix(sp->pdev);
3893}
3894
3895static void remove_inta_isr(struct s2io_nic *sp)
3896{
3897        free_irq(sp->pdev->irq, sp->dev);
3898}
3899
3900/* ********************************************************* *
3901 * Functions defined below concern the OS part of the driver *
3902 * ********************************************************* */
3903
3904/**
3905 *  s2io_open - open entry point of the driver
3906 *  @dev : pointer to the device structure.
3907 *  Description:
3908 *  This function is the open entry point of the driver. It mainly calls a
3909 *  function to allocate Rx buffers and inserts them into the buffer
3910 *  descriptors and then enables the Rx part of the NIC.
3911 *  Return value:
3912 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3913 *   file on failure.
3914 */
3915
3916static int s2io_open(struct net_device *dev)
3917{
3918        struct s2io_nic *sp = netdev_priv(dev);
3919        struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
3920        int err = 0;
3921
3922        /*
3923         * Make sure you have link off by default every time
3924         * Nic is initialized
3925         */
3926        netif_carrier_off(dev);
3927        sp->last_link_state = 0;
3928
3929        /* Initialize H/W and enable interrupts */
3930        err = s2io_card_up(sp);
3931        if (err) {
3932                DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3933                          dev->name);
3934                goto hw_init_failed;
3935        }
3936
3937        if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
3938                DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3939                s2io_card_down(sp);
3940                err = -ENODEV;
3941                goto hw_init_failed;
3942        }
3943        s2io_start_all_tx_queue(sp);
3944        return 0;
3945
3946hw_init_failed:
3947        if (sp->config.intr_type == MSI_X) {
3948                if (sp->entries) {
3949                        kfree(sp->entries);
3950                        swstats->mem_freed += sp->num_entries *
3951                                sizeof(struct msix_entry);
3952                }
3953                if (sp->s2io_entries) {
3954                        kfree(sp->s2io_entries);
3955                        swstats->mem_freed += sp->num_entries *
3956                                sizeof(struct s2io_msix_entry);
3957                }
3958        }
3959        return err;
3960}
3961
3962/**
3963 *  s2io_close -close entry point of the driver
3964 *  @dev : device pointer.
3965 *  Description:
3966 *  This is the stop entry point of the driver. It needs to undo exactly
3967 *  whatever was done by the open entry point,thus it's usually referred to
3968 *  as the close function.Among other things this function mainly stops the
3969 *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3970 *  Return value:
3971 *  0 on success and an appropriate (-)ve integer as defined in errno.h
3972 *  file on failure.
3973 */
3974
3975static int s2io_close(struct net_device *dev)
3976{
3977        struct s2io_nic *sp = netdev_priv(dev);
3978        struct config_param *config = &sp->config;
3979        u64 tmp64;
3980        int offset;
3981
3982        /* Return if the device is already closed               *
3983         *  Can happen when s2io_card_up failed in change_mtu    *
3984         */
3985        if (!is_s2io_card_up(sp))
3986                return 0;
3987
3988        s2io_stop_all_tx_queue(sp);
3989        /* delete all populated mac entries */
3990        for (offset = 1; offset < config->max_mc_addr; offset++) {
3991                tmp64 = do_s2io_read_unicast_mc(sp, offset);
3992                if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
3993                        do_s2io_delete_unicast_mc(sp, tmp64);
3994        }
3995
3996        s2io_card_down(sp);
3997
3998        return 0;
3999}
4000
4001/**
4002 *  s2io_xmit - Tx entry point of te driver
4003 *  @skb : the socket buffer containing the Tx data.
4004 *  @dev : device pointer.
4005 *  Description :
4006 *  This function is the Tx entry point of the driver. S2IO NIC supports
4007 *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4008 *  NOTE: when device can't queue the pkt,just the trans_start variable will
4009 *  not be upadted.
4010 *  Return value:
4011 *  0 on success & 1 on failure.
4012 */
4013
4014static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4015{
4016        struct s2io_nic *sp = netdev_priv(dev);
4017        u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4018        register u64 val64;
4019        struct TxD *txdp;
4020        struct TxFIFO_element __iomem *tx_fifo;
4021        unsigned long flags = 0;
4022        u16 vlan_tag = 0;
4023        struct fifo_info *fifo = NULL;
4024        int do_spin_lock = 1;
4025        int offload_type;
4026        int enable_per_list_interrupt = 0;
4027        struct config_param *config = &sp->config;
4028        struct mac_info *mac_control = &sp->mac_control;
4029        struct stat_block *stats = mac_control->stats_info;
4030        struct swStat *swstats = &stats->sw_stat;
4031
4032        DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4033
4034        if (unlikely(skb->len <= 0)) {
4035                DBG_PRINT(TX_DBG, "%s: Buffer has no data..\n", dev->name);
4036                dev_kfree_skb_any(skb);
4037                return NETDEV_TX_OK;
4038        }
4039
4040        if (!is_s2io_card_up(sp)) {
4041                DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4042                          dev->name);
4043                dev_kfree_skb_any(skb);
4044                return NETDEV_TX_OK;
4045        }
4046
4047        queue = 0;
4048        if (vlan_tx_tag_present(skb))
4049                vlan_tag = vlan_tx_tag_get(skb);
4050        if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4051                if (skb->protocol == htons(ETH_P_IP)) {
4052                        struct iphdr *ip;
4053                        struct tcphdr *th;
4054                        ip = ip_hdr(skb);
4055
4056                        if (!ip_is_fragment(ip)) {
4057                                th = (struct tcphdr *)(((unsigned char *)ip) +
4058                                                       ip->ihl*4);
4059
4060                                if (ip->protocol == IPPROTO_TCP) {
4061                                        queue_len = sp->total_tcp_fifos;
4062                                        queue = (ntohs(th->source) +
4063                                                 ntohs(th->dest)) &
4064                                                sp->fifo_selector[queue_len - 1];
4065                                        if (queue >= queue_len)
4066                                                queue = queue_len - 1;
4067                                } else if (ip->protocol == IPPROTO_UDP) {
4068                                        queue_len = sp->total_udp_fifos;
4069                                        queue = (ntohs(th->source) +
4070                                                 ntohs(th->dest)) &
4071                                                sp->fifo_selector[queue_len - 1];
4072                                        if (queue >= queue_len)
4073                                                queue = queue_len - 1;
4074                                        queue += sp->udp_fifo_idx;
4075                                        if (skb->len > 1024)
4076                                                enable_per_list_interrupt = 1;
4077                                        do_spin_lock = 0;
4078                                }
4079                        }
4080                }
4081        } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4082                /* get fifo number based on skb->priority value */
4083                queue = config->fifo_mapping
4084                        [skb->priority & (MAX_TX_FIFOS - 1)];
4085        fifo = &mac_control->fifos[queue];
4086
4087        if (do_spin_lock)
4088                spin_lock_irqsave(&fifo->tx_lock, flags);
4089        else {
4090                if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4091                        return NETDEV_TX_LOCKED;
4092        }
4093
4094        if (sp->config.multiq) {
4095                if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4096                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4097                        return NETDEV_TX_BUSY;
4098                }
4099        } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4100                if (netif_queue_stopped(dev)) {
4101                        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4102                        return NETDEV_TX_BUSY;
4103                }
4104        }
4105
4106        put_off = (u16)fifo->tx_curr_put_info.offset;
4107        get_off = (u16)fifo->tx_curr_get_info.offset;
4108        txdp = fifo->list_info[put_off].list_virt_addr;
4109
4110        queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4111        /* Avoid "put" pointer going beyond "get" pointer */
4112        if (txdp->Host_Control ||
4113            ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4114                DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4115                s2io_stop_tx_queue(sp, fifo->fifo_no);
4116                dev_kfree_skb_any(skb);
4117                spin_unlock_irqrestore(&fifo->tx_lock, flags);
4118                return NETDEV_TX_OK;
4119        }
4120
4121        offload_type = s2io_offload_type(skb);
4122        if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4123                txdp->Control_1 |= TXD_TCP_LSO_EN;
4124                txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4125        }
4126        if (skb->ip_summed == CHECKSUM_PARTIAL) {
4127                txdp->Control_2 |= (TXD_TX_CKO_IPV4_EN |
4128                                    TXD_TX_CKO_TCP_EN |
4129                                    TXD_TX_CKO_UDP_EN);
4130        }
4131        txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4132        txdp->Control_1 |= TXD_LIST_OWN_XENA;
4133        txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4134        if (enable_per_list_interrupt)
4135                if (put_off & (queue_len >> 5))
4136                        txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4137        if (vlan_tag) {
4138                txdp->Control_2 |= TXD_VLAN_ENABLE;
4139                txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4140        }
4141
4142        frg_len = skb_headlen(skb);
4143        if (offload_type == SKB_GSO_UDP) {
4144                int ufo_size;
4145
4146                ufo_size = s2io_udp_mss(skb);
4147                ufo_size &= ~7;
4148                txdp->Control_1 |= TXD_UFO_EN;
4149                txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4150                txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4151#ifdef __BIG_ENDIAN
4152                /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4153                fifo->ufo_in_band_v[put_off] =
4154                        (__force u64)skb_shinfo(skb)->ip6_frag_id;
4155#else
4156                fifo->ufo_in_band_v[put_off] =
4157                        (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4158#endif
4159                txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4160                txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4161                                                      fifo->ufo_in_band_v,
4162                                                      sizeof(u64),
4163                                                      PCI_DMA_TODEVICE);
4164                if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4165                        goto pci_map_failed;
4166                txdp++;
4167        }
4168
4169        txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
4170                                              frg_len, PCI_DMA_TODEVICE);
4171        if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
4172                goto pci_map_failed;
4173
4174        txdp->Host_Control = (unsigned long)skb;
4175        txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4176        if (offload_type == SKB_GSO_UDP)
4177                txdp->Control_1 |= TXD_UFO_EN;
4178
4179        frg_cnt = skb_shinfo(skb)->nr_frags;
4180        /* For fragmented SKB. */
4181        for (i = 0; i < frg_cnt; i++) {
4182                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4183                /* A '0' length fragment will be ignored */
4184                if (!skb_frag_size(frag))
4185                        continue;
4186                txdp++;
4187                txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev,
4188                                                             frag, 0,
4189                                                             skb_frag_size(frag),
4190                                                             DMA_TO_DEVICE);
4191                txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag));
4192                if (offload_type == SKB_GSO_UDP)
4193                        txdp->Control_1 |= TXD_UFO_EN;
4194        }
4195        txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4196
4197        if (offload_type == SKB_GSO_UDP)
4198                frg_cnt++; /* as Txd0 was used for inband header */
4199
4200        tx_fifo = mac_control->tx_FIFO_start[queue];
4201        val64 = fifo->list_info[put_off].list_phy_addr;
4202        writeq(val64, &tx_fifo->TxDL_Pointer);
4203
4204        val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4205                 TX_FIFO_LAST_LIST);
4206        if (offload_type)
4207                val64 |= TX_FIFO_SPECIAL_FUNC;
4208
4209        writeq(val64, &tx_fifo->List_Control);
4210
4211        mmiowb();
4212
4213        put_off++;
4214        if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4215                put_off = 0;
4216        fifo->tx_curr_put_info.offset = put_off;
4217
4218        /* Avoid "put" pointer going beyond "get" pointer */
4219        if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4220                swstats->fifo_full_cnt++;
4221                DBG_PRINT(TX_DBG,
4222                          "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4223                          put_off, get_off);
4224                s2io_stop_tx_queue(sp, fifo->fifo_no);
4225        }
4226        swstats->mem_allocated += skb->truesize;
4227        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4228
4229        if (sp->config.intr_type == MSI_X)
4230                tx_intr_handler(fifo);
4231
4232        return NETDEV_TX_OK;
4233
4234pci_map_failed:
4235        swstats->pci_map_fail_cnt++;
4236        s2io_stop_tx_queue(sp, fifo->fifo_no);
4237        swstats->mem_freed += skb->truesize;
4238        dev_kfree_skb_any(skb);
4239        spin_unlock_irqrestore(&fifo->tx_lock, flags);
4240        return NETDEV_TX_OK;
4241}
4242
4243static void
4244s2io_alarm_handle(unsigned long data)
4245{
4246        struct s2io_nic *sp = (struct s2io_nic *)data;
4247        struct net_device *dev = sp->dev;
4248
4249        s2io_handle_errors(dev);
4250        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4251}
4252
4253static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4254{
4255        struct ring_info *ring = (struct ring_info *)dev_id;
4256        struct s2io_nic *sp = ring->nic;
4257        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4258
4259        if (unlikely(!is_s2io_card_up(sp)))
4260                return IRQ_HANDLED;
4261
4262        if (sp->config.napi) {
4263                u8 __iomem *addr = NULL;
4264                u8 val8 = 0;
4265
4266                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
4267                addr += (7 - ring->ring_no);
4268                val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4269                writeb(val8, addr);
4270                val8 = readb(addr);
4271                napi_schedule(&ring->napi);
4272        } else {
4273                rx_intr_handler(ring, 0);
4274                s2io_chk_rx_buffers(sp, ring);
4275        }
4276
4277        return IRQ_HANDLED;
4278}
4279
4280static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4281{
4282        int i;
4283        struct fifo_info *fifos = (struct fifo_info *)dev_id;
4284        struct s2io_nic *sp = fifos->nic;
4285        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4286        struct config_param *config  = &sp->config;
4287        u64 reason;
4288
4289        if (unlikely(!is_s2io_card_up(sp)))
4290                return IRQ_NONE;
4291
4292        reason = readq(&bar0->general_int_status);
4293        if (unlikely(reason == S2IO_MINUS_ONE))
4294                /* Nothing much can be done. Get out */
4295                return IRQ_HANDLED;
4296
4297        if (reason & (GEN_INTR_TXPIC | GEN_INTR_TXTRAFFIC)) {
4298                writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4299
4300                if (reason & GEN_INTR_TXPIC)
4301                        s2io_txpic_intr_handle(sp);
4302
4303                if (reason & GEN_INTR_TXTRAFFIC)
4304                        writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4305
4306                for (i = 0; i < config->tx_fifo_num; i++)
4307                        tx_intr_handler(&fifos[i]);
4308
4309                writeq(sp->general_int_mask, &bar0->general_int_mask);
4310                readl(&bar0->general_int_status);
4311                return IRQ_HANDLED;
4312        }
4313        /* The interrupt was not raised by us */
4314        return IRQ_NONE;
4315}
4316
4317static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4318{
4319        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4320        u64 val64;
4321
4322        val64 = readq(&bar0->pic_int_status);
4323        if (val64 & PIC_INT_GPIO) {
4324                val64 = readq(&bar0->gpio_int_reg);
4325                if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4326                    (val64 & GPIO_INT_REG_LINK_UP)) {
4327                        /*
4328                         * This is unstable state so clear both up/down
4329                         * interrupt and adapter to re-evaluate the link state.
4330                         */
4331                        val64 |= GPIO_INT_REG_LINK_DOWN;
4332                        val64 |= GPIO_INT_REG_LINK_UP;
4333                        writeq(val64, &bar0->gpio_int_reg);
4334                        val64 = readq(&bar0->gpio_int_mask);
4335                        val64 &= ~(GPIO_INT_MASK_LINK_UP |
4336                                   GPIO_INT_MASK_LINK_DOWN);
4337                        writeq(val64, &bar0->gpio_int_mask);
4338                } else if (val64 & GPIO_INT_REG_LINK_UP) {
4339                        val64 = readq(&bar0->adapter_status);
4340                        /* Enable Adapter */
4341                        val64 = readq(&bar0->adapter_control);
4342                        val64 |= ADAPTER_CNTL_EN;
4343                        writeq(val64, &bar0->adapter_control);
4344                        val64 |= ADAPTER_LED_ON;
4345                        writeq(val64, &bar0->adapter_control);
4346                        if (!sp->device_enabled_once)
4347                                sp->device_enabled_once = 1;
4348
4349                        s2io_link(sp, LINK_UP);
4350                        /*
4351                         * unmask link down interrupt and mask link-up
4352                         * intr
4353                         */
4354                        val64 = readq(&bar0->gpio_int_mask);
4355                        val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4356                        val64 |= GPIO_INT_MASK_LINK_UP;
4357                        writeq(val64, &bar0->gpio_int_mask);
4358
4359                } else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4360                        val64 = readq(&bar0->adapter_status);
4361                        s2io_link(sp, LINK_DOWN);
4362                        /* Link is down so unmaks link up interrupt */
4363                        val64 = readq(&bar0->gpio_int_mask);
4364                        val64 &= ~GPIO_INT_MASK_LINK_UP;
4365                        val64 |= GPIO_INT_MASK_LINK_DOWN;
4366                        writeq(val64, &bar0->gpio_int_mask);
4367
4368                        /* turn off LED */
4369                        val64 = readq(&bar0->adapter_control);
4370                        val64 = val64 & (~ADAPTER_LED_ON);
4371                        writeq(val64, &bar0->adapter_control);
4372                }
4373        }
4374        val64 = readq(&bar0->gpio_int_mask);
4375}
4376
4377/**
4378 *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4379 *  @value: alarm bits
4380 *  @addr: address value
4381 *  @cnt: counter variable
4382 *  Description: Check for alarm and increment the counter
4383 *  Return Value:
4384 *  1 - if alarm bit set
4385 *  0 - if alarm bit is not set
4386 */
4387static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
4388                                 unsigned long long *cnt)
4389{
4390        u64 val64;
4391        val64 = readq(addr);
4392        if (val64 & value) {
4393                writeq(val64, addr);
4394                (*cnt)++;
4395                return 1;
4396        }
4397        return 0;
4398
4399}
4400
4401/**
4402 *  s2io_handle_errors - Xframe error indication handler
4403 *  @nic: device private variable
4404 *  Description: Handle alarms such as loss of link, single or
4405 *  double ECC errors, critical and serious errors.
4406 *  Return Value:
4407 *  NONE
4408 */
4409static void s2io_handle_errors(void *dev_id)
4410{
4411        struct net_device *dev = (struct net_device *)dev_id;
4412        struct s2io_nic *sp = netdev_priv(dev);
4413        struct XENA_dev_config __iomem *bar0 = sp->bar0;
4414        u64 temp64 = 0, val64 = 0;
4415        int i = 0;
4416
4417        struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4418        struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4419
4420        if (!is_s2io_card_up(sp))
4421                return;
4422
4423        if (pci_channel_offline(sp->pdev))
4424                return;
4425
4426        memset(&sw_stat->ring_full_cnt, 0,
4427               sizeof(sw_stat->ring_full_cnt));
4428
4429        /* Handling the XPAK counters update */
4430        if (stats->xpak_timer_count < 72000) {
4431                /* waiting for an hour */
4432                stats->xpak_timer_count++;
4433        } else {
4434                s2io_updt_xpak_counter(dev);
4435                /* reset the count to zero */
4436                stats->xpak_timer_count = 0;
4437        }
4438
4439        /* Handling link status change error Intr */
4440        if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4441                val64 = readq(&bar0->mac_rmac_err_reg);
4442                writeq(val64, &bar0->mac_rmac_err_reg);
4443                if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4444                        schedule_work(&sp->set_link_task);
4445        }
4446
4447        /* In case of a serious error, the device will be Reset. */
4448        if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4449                                  &sw_stat->serious_err_cnt))
4450                goto reset;
4451
4452        /* Check for data parity error */
4453        if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4454                                  &sw_stat->parity_err_cnt))
4455                goto reset;
4456
4457        /* Check for ring full counter */
4458        if (sp->device_type == XFRAME_II_DEVICE) {
4459                val64 = readq(&bar0->ring_bump_counter1);
4460                for (i = 0; i < 4; i++) {
4461                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4462                        temp64 >>= 64 - ((i+1)*16);
4463                        sw_stat->ring_full_cnt[i] += temp64;
4464                }
4465
4466                val64 = readq(&bar0->ring_bump_counter2);
4467                for (i = 0; i < 4; i++) {
4468                        temp64 = (val64 & vBIT(0xFFFF, (i*16), 16));
4469                        temp64 >>= 64 - ((i+1)*16);
4470                        sw_stat->ring_full_cnt[i+4] += temp64;
4471                }
4472        }
4473
4474        val64 = readq(&bar0->txdma_int_status);
4475        /*check for pfc_err*/
4476        if (val64 & TXDMA_PFC_INT) {
4477                if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
4478                                          PFC_MISC_0_ERR | PFC_MISC_1_ERR |
4479                                          PFC_PCIX_ERR,
4480                                          &bar0->pfc_err_reg,
4481                                          &sw_stat->pfc_err_cnt))
4482                        goto reset;
4483                do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR,
4484                                      &bar0->pfc_err_reg,
4485                                      &sw_stat->pfc_err_cnt);
4486        }
4487
4488        /*check for tda_err*/
4489        if (val64 & TXDMA_TDA_INT) {
4490                if (do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR |
4491                                          TDA_SM0_ERR_ALARM |
4492                                          TDA_SM1_ERR_ALARM,
4493                                          &bar0->tda_err_reg,
4494                                          &sw_stat->tda_err_cnt))
4495                        goto reset;
4496                do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4497                                      &bar0->tda_err_reg,
4498                                      &sw_stat->tda_err_cnt);
4499        }
4500        /*check for pcc_err*/
4501        if (val64 & TXDMA_PCC_INT) {
4502                if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
4503                                          PCC_N_SERR | PCC_6_COF_OV_ERR |
4504                                          PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
4505                                          PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR |
4506                                          PCC_TXB_ECC_DB_ERR,
4507                                          &bar0->pcc_err_reg,
4508                                          &sw_stat->pcc_err_cnt))
4509                        goto reset;
4510                do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4511                                      &bar0->pcc_err_reg,
4512                                      &sw_stat->pcc_err_cnt);
4513        }
4514
4515        /*check for tti_err*/
4516        if (val64 & TXDMA_TTI_INT) {
4517                if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM,
4518                                          &bar0->tti_err_reg,
4519                                          &sw_stat->tti_err_cnt))
4520                        goto reset;
4521                do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4522                                      &bar0->tti_err_reg,
4523                                      &sw_stat->tti_err_cnt);
4524        }
4525
4526        /*check for lso_err*/
4527        if (val64 & TXDMA_LSO_INT) {
4528                if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT |
4529                                          LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4530                                          &bar0->lso_err_reg,
4531                                          &sw_stat->lso_err_cnt))
4532                        goto reset;
4533                do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4534                                      &bar0->lso_err_reg,
4535                                      &sw_stat->lso_err_cnt);
4536        }
4537
4538        /*check for tpa_err*/
4539        if (val64 & TXDMA_TPA_INT) {
4540                if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM,
4541                                          &bar0->tpa_err_reg,
4542                                          &sw_stat->tpa_err_cnt))
4543                        goto reset;
4544                do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP,
4545                                      &bar0->tpa_err_reg,
4546                                      &sw_stat->tpa_err_cnt);
4547        }
4548
4549        /*check for sm_err*/
4550        if (val64 & TXDMA_SM_INT) {
4551                if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM,
4552                                          &bar0->sm_err_reg,
4553                                          &sw_stat->sm_err_cnt))
4554                        goto reset;
4555        }
4556
4557        val64 = readq(&bar0->mac_int_status);
4558        if (val64 & MAC_INT_STATUS_TMAC_INT) {
4559                if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4560                                          &bar0->mac_tmac_err_reg,
4561                                          &sw_stat->mac_tmac_err_cnt))
4562                        goto reset;
4563                do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
4564                                      TMAC_DESC_ECC_SG_ERR |
4565                                      TMAC_DESC_ECC_DB_ERR,
4566                                      &bar0->mac_tmac_err_reg,
4567                                      &sw_stat->mac_tmac_err_cnt);
4568        }
4569
4570        val64 = readq(&bar0->xgxs_int_status);
4571        if (val64 & XGXS_INT_STATUS_TXGXS) {
4572                if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4573                                          &bar0->xgxs_txgxs_err_reg,
4574                                          &sw_stat->xgxs_txgxs_err_cnt))
4575                        goto reset;
4576                do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4577                                      &bar0->xgxs_txgxs_err_reg,
4578                                      &sw_stat->xgxs_txgxs_err_cnt);
4579        }
4580
4581        val64 = readq(&bar0->rxdma_int_status);
4582        if (val64 & RXDMA_INT_RC_INT_M) {
4583                if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR |
4584                                          RC_FTC_ECC_DB_ERR |
4585                                          RC_PRCn_SM_ERR_ALARM |
4586                                          RC_FTC_SM_ERR_ALARM,
4587                                          &bar0->rc_err_reg,
4588                                          &sw_stat->rc_err_cnt))
4589                        goto reset;
4590                do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR |
4591                                      RC_FTC_ECC_SG_ERR |
4592                                      RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4593                                      &sw_stat->rc_err_cnt);
4594                if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn |
4595                                          PRC_PCI_AB_WR_Rn |
4596                                          PRC_PCI_AB_F_WR_Rn,
4597                                          &bar0->prc_pcix_err_reg,
4598                                          &sw_stat->prc_pcix_err_cnt))
4599                        goto reset;
4600                do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn |
4601                                      PRC_PCI_DP_WR_Rn |
4602                                      PRC_PCI_DP_F_WR_Rn,
4603                                      &bar0->prc_pcix_err_reg,
4604                                      &sw_stat->prc_pcix_err_cnt);
4605        }
4606
4607        if (val64 & RXDMA_INT_RPA_INT_M) {
4608                if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4609                                          &bar0->rpa_err_reg,
4610                                          &sw_stat->rpa_err_cnt))
4611                        goto reset;
4612                do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4613                                      &bar0->rpa_err_reg,
4614                                      &sw_stat->rpa_err_cnt);
4615        }
4616
4617        if (val64 & RXDMA_INT_RDA_INT_M) {
4618                if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR |
4619                                          RDA_FRM_ECC_DB_N_AERR |
4620                                          RDA_SM1_ERR_ALARM |
4621                                          RDA_SM0_ERR_ALARM |
4622                                          RDA_RXD_ECC_DB_SERR,
4623                                          &bar0->rda_err_reg,
4624                                          &sw_stat->rda_err_cnt))
4625                        goto reset;
4626                do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR |
4627                                      RDA_FRM_ECC_SG_ERR |
4628                                      RDA_MISC_ERR |
4629                                      RDA_PCIX_ERR,
4630                                      &bar0->rda_err_reg,
4631                                      &sw_stat->rda_err_cnt);
4632        }
4633
4634        if (val64 & RXDMA_INT_RTI_INT_M) {
4635                if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM,
4636                                          &bar0->rti_err_reg,
4637                                          &sw_stat->rti_err_cnt))
4638                        goto reset;
4639                do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4640                                      &bar0->rti_err_reg,
4641                                      &sw_stat->rti_err_cnt);
4642        }
4643
4644        val64 = readq(&bar0->mac_int_status);
4645        if (val64 & MAC_INT_STATUS_RMAC_INT) {
4646                if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4647                                          &bar0->mac_rmac_err_reg,
4648                                          &sw_stat->mac_rmac_err_cnt))
4649                        goto reset;
4650                do_s2io_chk_alarm_bit(RMAC_UNUSED_INT |
4651                                      RMAC_SINGLE_ECC_ERR |
4652                                      RMAC_DOUBLE_ECC_ERR,
4653                                      &bar0->mac_rmac_err_reg,
4654                                      &sw_stat->mac_rmac_err_cnt);
4655        }
4656
4657        val64 = readq(&bar0->xgxs_int_status);
4658        if (val64 & XGXS_INT_STATUS_RXGXS) {
4659                if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4660                                          &bar0->xgxs_rxgxs_err_reg,
4661                                          &sw_stat->xgxs_rxgxs_err_cnt))
4662                        goto reset;
4663        }
4664
4665        val64 = readq(&bar0->mc_int_status);
4666        if (val64 & MC_INT_STATUS_MC_INT) {
4667                if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR,