linux/drivers/net/ethernet/sfc/efx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/****************************************************************************
   3 * Driver for Solarflare network controllers and boards
   4 * Copyright 2005-2006 Fen Systems Ltd.
   5 * Copyright 2005-2013 Solarflare Communications Inc.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/pci.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/notifier.h>
  14#include <linux/ip.h>
  15#include <linux/tcp.h>
  16#include <linux/in.h>
  17#include <linux/ethtool.h>
  18#include <linux/topology.h>
  19#include <linux/gfp.h>
  20#include <linux/aer.h>
  21#include <linux/interrupt.h>
  22#include "net_driver.h"
  23#include <net/gre.h>
  24#include <net/udp_tunnel.h>
  25#include "efx.h"
  26#include "efx_common.h"
  27#include "efx_channels.h"
  28#include "ef100.h"
  29#include "rx_common.h"
  30#include "tx_common.h"
  31#include "nic.h"
  32#include "io.h"
  33#include "selftest.h"
  34#include "sriov.h"
  35
  36#include "mcdi_port_common.h"
  37#include "mcdi_pcol.h"
  38#include "workarounds.h"
  39
  40/**************************************************************************
  41 *
  42 * Configurable values
  43 *
  44 *************************************************************************/
  45
  46module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
  47MODULE_PARM_DESC(interrupt_mode,
  48                 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
  49
  50module_param(rss_cpus, uint, 0444);
  51MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
  52
  53/*
  54 * Use separate channels for TX and RX events
  55 *
  56 * Set this to 1 to use separate channels for TX and RX. It allows us
  57 * to control interrupt affinity separately for TX and RX.
  58 *
  59 * This is only used in MSI-X interrupt mode
  60 */
  61bool efx_separate_tx_channels;
  62module_param(efx_separate_tx_channels, bool, 0444);
  63MODULE_PARM_DESC(efx_separate_tx_channels,
  64                 "Use separate channels for TX and RX");
  65
  66/* Initial interrupt moderation settings.  They can be modified after
  67 * module load with ethtool.
  68 *
  69 * The default for RX should strike a balance between increasing the
  70 * round-trip latency and reducing overhead.
  71 */
  72static unsigned int rx_irq_mod_usec = 60;
  73
  74/* Initial interrupt moderation settings.  They can be modified after
  75 * module load with ethtool.
  76 *
  77 * This default is chosen to ensure that a 10G link does not go idle
  78 * while a TX queue is stopped after it has become full.  A queue is
  79 * restarted when it drops below half full.  The time this takes (assuming
  80 * worst case 3 descriptors per packet and 1024 descriptors) is
  81 *   512 / 3 * 1.2 = 205 usec.
  82 */
  83static unsigned int tx_irq_mod_usec = 150;
  84
  85static bool phy_flash_cfg;
  86module_param(phy_flash_cfg, bool, 0644);
  87MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
  88
  89static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  90                         NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
  91                         NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
  92                         NETIF_MSG_TX_ERR | NETIF_MSG_HW);
  93module_param(debug, uint, 0);
  94MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
  95
  96/**************************************************************************
  97 *
  98 * Utility functions and prototypes
  99 *
 100 *************************************************************************/
 101
 102static void efx_remove_port(struct efx_nic *efx);
 103static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
 104static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
 105static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
 106                        u32 flags);
 107
 108#define EFX_ASSERT_RESET_SERIALISED(efx)                \
 109        do {                                            \
 110                if ((efx->state == STATE_READY) ||      \
 111                    (efx->state == STATE_RECOVERY) ||   \
 112                    (efx->state == STATE_DISABLED))     \
 113                        ASSERT_RTNL();                  \
 114        } while (0)
 115
 116/**************************************************************************
 117 *
 118 * Port handling
 119 *
 120 **************************************************************************/
 121
 122static void efx_fini_port(struct efx_nic *efx);
 123
 124static int efx_probe_port(struct efx_nic *efx)
 125{
 126        int rc;
 127
 128        netif_dbg(efx, probe, efx->net_dev, "create port\n");
 129
 130        if (phy_flash_cfg)
 131                efx->phy_mode = PHY_MODE_SPECIAL;
 132
 133        /* Connect up MAC/PHY operations table */
 134        rc = efx->type->probe_port(efx);
 135        if (rc)
 136                return rc;
 137
 138        /* Initialise MAC address to permanent address */
 139        ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
 140
 141        return 0;
 142}
 143
 144static int efx_init_port(struct efx_nic *efx)
 145{
 146        int rc;
 147
 148        netif_dbg(efx, drv, efx->net_dev, "init port\n");
 149
 150        mutex_lock(&efx->mac_lock);
 151
 152        efx->port_initialized = true;
 153
 154        /* Ensure the PHY advertises the correct flow control settings */
 155        rc = efx_mcdi_port_reconfigure(efx);
 156        if (rc && rc != -EPERM)
 157                goto fail;
 158
 159        mutex_unlock(&efx->mac_lock);
 160        return 0;
 161
 162fail:
 163        mutex_unlock(&efx->mac_lock);
 164        return rc;
 165}
 166
 167static void efx_fini_port(struct efx_nic *efx)
 168{
 169        netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
 170
 171        if (!efx->port_initialized)
 172                return;
 173
 174        efx->port_initialized = false;
 175
 176        efx->link_state.up = false;
 177        efx_link_status_changed(efx);
 178}
 179
 180static void efx_remove_port(struct efx_nic *efx)
 181{
 182        netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
 183
 184        efx->type->remove_port(efx);
 185}
 186
 187/**************************************************************************
 188 *
 189 * NIC handling
 190 *
 191 **************************************************************************/
 192
 193static LIST_HEAD(efx_primary_list);
 194static LIST_HEAD(efx_unassociated_list);
 195
 196static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
 197{
 198        return left->type == right->type &&
 199                left->vpd_sn && right->vpd_sn &&
 200                !strcmp(left->vpd_sn, right->vpd_sn);
 201}
 202
 203static void efx_associate(struct efx_nic *efx)
 204{
 205        struct efx_nic *other, *next;
 206
 207        if (efx->primary == efx) {
 208                /* Adding primary function; look for secondaries */
 209
 210                netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
 211                list_add_tail(&efx->node, &efx_primary_list);
 212
 213                list_for_each_entry_safe(other, next, &efx_unassociated_list,
 214                                         node) {
 215                        if (efx_same_controller(efx, other)) {
 216                                list_del(&other->node);
 217                                netif_dbg(other, probe, other->net_dev,
 218                                          "moving to secondary list of %s %s\n",
 219                                          pci_name(efx->pci_dev),
 220                                          efx->net_dev->name);
 221                                list_add_tail(&other->node,
 222                                              &efx->secondary_list);
 223                                other->primary = efx;
 224                        }
 225                }
 226        } else {
 227                /* Adding secondary function; look for primary */
 228
 229                list_for_each_entry(other, &efx_primary_list, node) {
 230                        if (efx_same_controller(efx, other)) {
 231                                netif_dbg(efx, probe, efx->net_dev,
 232                                          "adding to secondary list of %s %s\n",
 233                                          pci_name(other->pci_dev),
 234                                          other->net_dev->name);
 235                                list_add_tail(&efx->node,
 236                                              &other->secondary_list);
 237                                efx->primary = other;
 238                                return;
 239                        }
 240                }
 241
 242                netif_dbg(efx, probe, efx->net_dev,
 243                          "adding to unassociated list\n");
 244                list_add_tail(&efx->node, &efx_unassociated_list);
 245        }
 246}
 247
 248static void efx_dissociate(struct efx_nic *efx)
 249{
 250        struct efx_nic *other, *next;
 251
 252        list_del(&efx->node);
 253        efx->primary = NULL;
 254
 255        list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
 256                list_del(&other->node);
 257                netif_dbg(other, probe, other->net_dev,
 258                          "moving to unassociated list\n");
 259                list_add_tail(&other->node, &efx_unassociated_list);
 260                other->primary = NULL;
 261        }
 262}
 263
 264static int efx_probe_nic(struct efx_nic *efx)
 265{
 266        int rc;
 267
 268        netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
 269
 270        /* Carry out hardware-type specific initialisation */
 271        rc = efx->type->probe(efx);
 272        if (rc)
 273                return rc;
 274
 275        do {
 276                if (!efx->max_channels || !efx->max_tx_channels) {
 277                        netif_err(efx, drv, efx->net_dev,
 278                                  "Insufficient resources to allocate"
 279                                  " any channels\n");
 280                        rc = -ENOSPC;
 281                        goto fail1;
 282                }
 283
 284                /* Determine the number of channels and queues by trying
 285                 * to hook in MSI-X interrupts.
 286                 */
 287                rc = efx_probe_interrupts(efx);
 288                if (rc)
 289                        goto fail1;
 290
 291                rc = efx_set_channels(efx);
 292                if (rc)
 293                        goto fail1;
 294
 295                /* dimension_resources can fail with EAGAIN */
 296                rc = efx->type->dimension_resources(efx);
 297                if (rc != 0 && rc != -EAGAIN)
 298                        goto fail2;
 299
 300                if (rc == -EAGAIN)
 301                        /* try again with new max_channels */
 302                        efx_remove_interrupts(efx);
 303
 304        } while (rc == -EAGAIN);
 305
 306        if (efx->n_channels > 1)
 307                netdev_rss_key_fill(efx->rss_context.rx_hash_key,
 308                                    sizeof(efx->rss_context.rx_hash_key));
 309        efx_set_default_rx_indir_table(efx, &efx->rss_context);
 310
 311        /* Initialise the interrupt moderation settings */
 312        efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
 313        efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
 314                                true);
 315
 316        return 0;
 317
 318fail2:
 319        efx_remove_interrupts(efx);
 320fail1:
 321        efx->type->remove(efx);
 322        return rc;
 323}
 324
 325static void efx_remove_nic(struct efx_nic *efx)
 326{
 327        netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
 328
 329        efx_remove_interrupts(efx);
 330        efx->type->remove(efx);
 331}
 332
 333/**************************************************************************
 334 *
 335 * NIC startup/shutdown
 336 *
 337 *************************************************************************/
 338
 339static int efx_probe_all(struct efx_nic *efx)
 340{
 341        int rc;
 342
 343        rc = efx_probe_nic(efx);
 344        if (rc) {
 345                netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
 346                goto fail1;
 347        }
 348
 349        rc = efx_probe_port(efx);
 350        if (rc) {
 351                netif_err(efx, probe, efx->net_dev, "failed to create port\n");
 352                goto fail2;
 353        }
 354
 355        BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
 356        if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
 357                rc = -EINVAL;
 358                goto fail3;
 359        }
 360
 361#ifdef CONFIG_SFC_SRIOV
 362        rc = efx->type->vswitching_probe(efx);
 363        if (rc) /* not fatal; the PF will still work fine */
 364                netif_warn(efx, probe, efx->net_dev,
 365                           "failed to setup vswitching rc=%d;"
 366                           " VFs may not function\n", rc);
 367#endif
 368
 369        rc = efx_probe_filters(efx);
 370        if (rc) {
 371                netif_err(efx, probe, efx->net_dev,
 372                          "failed to create filter tables\n");
 373                goto fail4;
 374        }
 375
 376        rc = efx_probe_channels(efx);
 377        if (rc)
 378                goto fail5;
 379
 380        return 0;
 381
 382 fail5:
 383        efx_remove_filters(efx);
 384 fail4:
 385#ifdef CONFIG_SFC_SRIOV
 386        efx->type->vswitching_remove(efx);
 387#endif
 388 fail3:
 389        efx_remove_port(efx);
 390 fail2:
 391        efx_remove_nic(efx);
 392 fail1:
 393        return rc;
 394}
 395
 396static void efx_remove_all(struct efx_nic *efx)
 397{
 398        rtnl_lock();
 399        efx_xdp_setup_prog(efx, NULL);
 400        rtnl_unlock();
 401
 402        efx_remove_channels(efx);
 403        efx_remove_filters(efx);
 404#ifdef CONFIG_SFC_SRIOV
 405        efx->type->vswitching_remove(efx);
 406#endif
 407        efx_remove_port(efx);
 408        efx_remove_nic(efx);
 409}
 410
 411/**************************************************************************
 412 *
 413 * Interrupt moderation
 414 *
 415 **************************************************************************/
 416unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
 417{
 418        if (usecs == 0)
 419                return 0;
 420        if (usecs * 1000 < efx->timer_quantum_ns)
 421                return 1; /* never round down to 0 */
 422        return usecs * 1000 / efx->timer_quantum_ns;
 423}
 424
 425unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
 426{
 427        /* We must round up when converting ticks to microseconds
 428         * because we round down when converting the other way.
 429         */
 430        return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
 431}
 432
 433/* Set interrupt moderation parameters */
 434int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
 435                            unsigned int rx_usecs, bool rx_adaptive,
 436                            bool rx_may_override_tx)
 437{
 438        struct efx_channel *channel;
 439        unsigned int timer_max_us;
 440
 441        EFX_ASSERT_RESET_SERIALISED(efx);
 442
 443        timer_max_us = efx->timer_max_ns / 1000;
 444
 445        if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
 446                return -EINVAL;
 447
 448        if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
 449            !rx_may_override_tx) {
 450                netif_err(efx, drv, efx->net_dev, "Channels are shared. "
 451                          "RX and TX IRQ moderation must be equal\n");
 452                return -EINVAL;
 453        }
 454
 455        efx->irq_rx_adaptive = rx_adaptive;
 456        efx->irq_rx_moderation_us = rx_usecs;
 457        efx_for_each_channel(channel, efx) {
 458                if (efx_channel_has_rx_queue(channel))
 459                        channel->irq_moderation_us = rx_usecs;
 460                else if (efx_channel_has_tx_queues(channel))
 461                        channel->irq_moderation_us = tx_usecs;
 462                else if (efx_channel_is_xdp_tx(channel))
 463                        channel->irq_moderation_us = tx_usecs;
 464        }
 465
 466        return 0;
 467}
 468
 469void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
 470                            unsigned int *rx_usecs, bool *rx_adaptive)
 471{
 472        *rx_adaptive = efx->irq_rx_adaptive;
 473        *rx_usecs = efx->irq_rx_moderation_us;
 474
 475        /* If channels are shared between RX and TX, so is IRQ
 476         * moderation.  Otherwise, IRQ moderation is the same for all
 477         * TX channels and is not adaptive.
 478         */
 479        if (efx->tx_channel_offset == 0) {
 480                *tx_usecs = *rx_usecs;
 481        } else {
 482                struct efx_channel *tx_channel;
 483
 484                tx_channel = efx->channel[efx->tx_channel_offset];
 485                *tx_usecs = tx_channel->irq_moderation_us;
 486        }
 487}
 488
 489/**************************************************************************
 490 *
 491 * ioctls
 492 *
 493 *************************************************************************/
 494
 495/* Net device ioctl
 496 * Context: process, rtnl_lock() held.
 497 */
 498static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
 499{
 500        struct efx_nic *efx = netdev_priv(net_dev);
 501        struct mii_ioctl_data *data = if_mii(ifr);
 502
 503        if (cmd == SIOCSHWTSTAMP)
 504                return efx_ptp_set_ts_config(efx, ifr);
 505        if (cmd == SIOCGHWTSTAMP)
 506                return efx_ptp_get_ts_config(efx, ifr);
 507
 508        /* Convert phy_id from older PRTAD/DEVAD format */
 509        if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
 510            (data->phy_id & 0xfc00) == 0x0400)
 511                data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
 512
 513        return mdio_mii_ioctl(&efx->mdio, data, cmd);
 514}
 515
 516/**************************************************************************
 517 *
 518 * Kernel net device interface
 519 *
 520 *************************************************************************/
 521
 522/* Context: process, rtnl_lock() held. */
 523int efx_net_open(struct net_device *net_dev)
 524{
 525        struct efx_nic *efx = netdev_priv(net_dev);
 526        int rc;
 527
 528        netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
 529                  raw_smp_processor_id());
 530
 531        rc = efx_check_disabled(efx);
 532        if (rc)
 533                return rc;
 534        if (efx->phy_mode & PHY_MODE_SPECIAL)
 535                return -EBUSY;
 536        if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
 537                return -EIO;
 538
 539        /* Notify the kernel of the link state polled during driver load,
 540         * before the monitor starts running */
 541        efx_link_status_changed(efx);
 542
 543        efx_start_all(efx);
 544        if (efx->state == STATE_DISABLED || efx->reset_pending)
 545                netif_device_detach(efx->net_dev);
 546        efx_selftest_async_start(efx);
 547        return 0;
 548}
 549
 550/* Context: process, rtnl_lock() held.
 551 * Note that the kernel will ignore our return code; this method
 552 * should really be a void.
 553 */
 554int efx_net_stop(struct net_device *net_dev)
 555{
 556        struct efx_nic *efx = netdev_priv(net_dev);
 557
 558        netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
 559                  raw_smp_processor_id());
 560
 561        /* Stop the device and flush all the channels */
 562        efx_stop_all(efx);
 563
 564        return 0;
 565}
 566
 567static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
 568{
 569        struct efx_nic *efx = netdev_priv(net_dev);
 570
 571        if (efx->type->vlan_rx_add_vid)
 572                return efx->type->vlan_rx_add_vid(efx, proto, vid);
 573        else
 574                return -EOPNOTSUPP;
 575}
 576
 577static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
 578{
 579        struct efx_nic *efx = netdev_priv(net_dev);
 580
 581        if (efx->type->vlan_rx_kill_vid)
 582                return efx->type->vlan_rx_kill_vid(efx, proto, vid);
 583        else
 584                return -EOPNOTSUPP;
 585}
 586
 587static const struct net_device_ops efx_netdev_ops = {
 588        .ndo_open               = efx_net_open,
 589        .ndo_stop               = efx_net_stop,
 590        .ndo_get_stats64        = efx_net_stats,
 591        .ndo_tx_timeout         = efx_watchdog,
 592        .ndo_start_xmit         = efx_hard_start_xmit,
 593        .ndo_validate_addr      = eth_validate_addr,
 594        .ndo_do_ioctl           = efx_ioctl,
 595        .ndo_change_mtu         = efx_change_mtu,
 596        .ndo_set_mac_address    = efx_set_mac_address,
 597        .ndo_set_rx_mode        = efx_set_rx_mode,
 598        .ndo_set_features       = efx_set_features,
 599        .ndo_features_check     = efx_features_check,
 600        .ndo_vlan_rx_add_vid    = efx_vlan_rx_add_vid,
 601        .ndo_vlan_rx_kill_vid   = efx_vlan_rx_kill_vid,
 602#ifdef CONFIG_SFC_SRIOV
 603        .ndo_set_vf_mac         = efx_sriov_set_vf_mac,
 604        .ndo_set_vf_vlan        = efx_sriov_set_vf_vlan,
 605        .ndo_set_vf_spoofchk    = efx_sriov_set_vf_spoofchk,
 606        .ndo_get_vf_config      = efx_sriov_get_vf_config,
 607        .ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
 608#endif
 609        .ndo_get_phys_port_id   = efx_get_phys_port_id,
 610        .ndo_get_phys_port_name = efx_get_phys_port_name,
 611        .ndo_setup_tc           = efx_setup_tc,
 612#ifdef CONFIG_RFS_ACCEL
 613        .ndo_rx_flow_steer      = efx_filter_rfs,
 614#endif
 615        .ndo_xdp_xmit           = efx_xdp_xmit,
 616        .ndo_bpf                = efx_xdp
 617};
 618
 619static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
 620{
 621        struct bpf_prog *old_prog;
 622
 623        if (efx->xdp_rxq_info_failed) {
 624                netif_err(efx, drv, efx->net_dev,
 625                          "Unable to bind XDP program due to previous failure of rxq_info\n");
 626                return -EINVAL;
 627        }
 628
 629        if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
 630                netif_err(efx, drv, efx->net_dev,
 631                          "Unable to configure XDP with MTU of %d (max: %d)\n",
 632                          efx->net_dev->mtu, efx_xdp_max_mtu(efx));
 633                return -EINVAL;
 634        }
 635
 636        old_prog = rtnl_dereference(efx->xdp_prog);
 637        rcu_assign_pointer(efx->xdp_prog, prog);
 638        /* Release the reference that was originally passed by the caller. */
 639        if (old_prog)
 640                bpf_prog_put(old_prog);
 641
 642        return 0;
 643}
 644
 645/* Context: process, rtnl_lock() held. */
 646static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
 647{
 648        struct efx_nic *efx = netdev_priv(dev);
 649
 650        switch (xdp->command) {
 651        case XDP_SETUP_PROG:
 652                return efx_xdp_setup_prog(efx, xdp->prog);
 653        default:
 654                return -EINVAL;
 655        }
 656}
 657
 658static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
 659                        u32 flags)
 660{
 661        struct efx_nic *efx = netdev_priv(dev);
 662
 663        if (!netif_running(dev))
 664                return -EINVAL;
 665
 666        return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
 667}
 668
 669static void efx_update_name(struct efx_nic *efx)
 670{
 671        strcpy(efx->name, efx->net_dev->name);
 672        efx_mtd_rename(efx);
 673        efx_set_channel_names(efx);
 674}
 675
 676static int efx_netdev_event(struct notifier_block *this,
 677                            unsigned long event, void *ptr)
 678{
 679        struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
 680
 681        if ((net_dev->netdev_ops == &efx_netdev_ops) &&
 682            event == NETDEV_CHANGENAME)
 683                efx_update_name(netdev_priv(net_dev));
 684
 685        return NOTIFY_DONE;
 686}
 687
 688static struct notifier_block efx_netdev_notifier = {
 689        .notifier_call = efx_netdev_event,
 690};
 691
 692static ssize_t
 693show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
 694{
 695        struct efx_nic *efx = dev_get_drvdata(dev);
 696        return sprintf(buf, "%d\n", efx->phy_type);
 697}
 698static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
 699
 700static int efx_register_netdev(struct efx_nic *efx)
 701{
 702        struct net_device *net_dev = efx->net_dev;
 703        struct efx_channel *channel;
 704        int rc;
 705
 706        net_dev->watchdog_timeo = 5 * HZ;
 707        net_dev->irq = efx->pci_dev->irq;
 708        net_dev->netdev_ops = &efx_netdev_ops;
 709        if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
 710                net_dev->priv_flags |= IFF_UNICAST_FLT;
 711        net_dev->ethtool_ops = &efx_ethtool_ops;
 712        net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
 713        net_dev->min_mtu = EFX_MIN_MTU;
 714        net_dev->max_mtu = EFX_MAX_MTU;
 715
 716        rtnl_lock();
 717
 718        /* Enable resets to be scheduled and check whether any were
 719         * already requested.  If so, the NIC is probably hosed so we
 720         * abort.
 721         */
 722        efx->state = STATE_READY;
 723        smp_mb(); /* ensure we change state before checking reset_pending */
 724        if (efx->reset_pending) {
 725                netif_err(efx, probe, efx->net_dev,
 726                          "aborting probe due to scheduled reset\n");
 727                rc = -EIO;
 728                goto fail_locked;
 729        }
 730
 731        rc = dev_alloc_name(net_dev, net_dev->name);
 732        if (rc < 0)
 733                goto fail_locked;
 734        efx_update_name(efx);
 735
 736        /* Always start with carrier off; PHY events will detect the link */
 737        netif_carrier_off(net_dev);
 738
 739        rc = register_netdevice(net_dev);
 740        if (rc)
 741                goto fail_locked;
 742
 743        efx_for_each_channel(channel, efx) {
 744                struct efx_tx_queue *tx_queue;
 745                efx_for_each_channel_tx_queue(tx_queue, channel)
 746                        efx_init_tx_queue_core_txq(tx_queue);
 747        }
 748
 749        efx_associate(efx);
 750
 751        rtnl_unlock();
 752
 753        rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 754        if (rc) {
 755                netif_err(efx, drv, efx->net_dev,
 756                          "failed to init net dev attributes\n");
 757                goto fail_registered;
 758        }
 759
 760        efx_init_mcdi_logging(efx);
 761
 762        return 0;
 763
 764fail_registered:
 765        rtnl_lock();
 766        efx_dissociate(efx);
 767        unregister_netdevice(net_dev);
 768fail_locked:
 769        efx->state = STATE_UNINIT;
 770        rtnl_unlock();
 771        netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
 772        return rc;
 773}
 774
 775static void efx_unregister_netdev(struct efx_nic *efx)
 776{
 777        if (!efx->net_dev)
 778                return;
 779
 780        BUG_ON(netdev_priv(efx->net_dev) != efx);
 781
 782        if (efx_dev_registered(efx)) {
 783                strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
 784                efx_fini_mcdi_logging(efx);
 785                device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
 786                unregister_netdev(efx->net_dev);
 787        }
 788}
 789
 790/**************************************************************************
 791 *
 792 * List of NICs we support
 793 *
 794 **************************************************************************/
 795
 796/* PCI device ID table */
 797static const struct pci_device_id efx_pci_table[] = {
 798        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),  /* SFC9020 */
 799         .driver_data = (unsigned long) &siena_a0_nic_type},
 800        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),  /* SFL9021 */
 801         .driver_data = (unsigned long) &siena_a0_nic_type},
 802        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
 803         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 804        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
 805         .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 806        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
 807         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 808        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),  /* SFC9140 VF */
 809         .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 810        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),  /* SFC9220 PF */
 811         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 812        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
 813         .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 814        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
 815         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
 816        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
 817         .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
 818        {0}                     /* end of list */
 819};
 820
 821/**************************************************************************
 822 *
 823 * Data housekeeping
 824 *
 825 **************************************************************************/
 826
 827void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
 828{
 829        u64 n_rx_nodesc_trunc = 0;
 830        struct efx_channel *channel;
 831
 832        efx_for_each_channel(channel, efx)
 833                n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
 834        stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
 835        stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
 836}
 837
 838/**************************************************************************
 839 *
 840 * PCI interface
 841 *
 842 **************************************************************************/
 843
 844/* Main body of final NIC shutdown code
 845 * This is called only at module unload (or hotplug removal).
 846 */
 847static void efx_pci_remove_main(struct efx_nic *efx)
 848{
 849        /* Flush reset_work. It can no longer be scheduled since we
 850         * are not READY.
 851         */
 852        BUG_ON(efx->state == STATE_READY);
 853        efx_flush_reset_workqueue(efx);
 854
 855        efx_disable_interrupts(efx);
 856        efx_clear_interrupt_affinity(efx);
 857        efx_nic_fini_interrupt(efx);
 858        efx_fini_port(efx);
 859        efx->type->fini(efx);
 860        efx_fini_napi(efx);
 861        efx_remove_all(efx);
 862}
 863
 864/* Final NIC shutdown
 865 * This is called only at module unload (or hotplug removal).  A PF can call
 866 * this on its VFs to ensure they are unbound first.
 867 */
 868static void efx_pci_remove(struct pci_dev *pci_dev)
 869{
 870        struct efx_nic *efx;
 871
 872        efx = pci_get_drvdata(pci_dev);
 873        if (!efx)
 874                return;
 875
 876        /* Mark the NIC as fini, then stop the interface */
 877        rtnl_lock();
 878        efx_dissociate(efx);
 879        dev_close(efx->net_dev);
 880        efx_disable_interrupts(efx);
 881        efx->state = STATE_UNINIT;
 882        rtnl_unlock();
 883
 884        if (efx->type->sriov_fini)
 885                efx->type->sriov_fini(efx);
 886
 887        efx_unregister_netdev(efx);
 888
 889        efx_mtd_remove(efx);
 890
 891        efx_pci_remove_main(efx);
 892
 893        efx_fini_io(efx);
 894        netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 895
 896        efx_fini_struct(efx);
 897        free_netdev(efx->net_dev);
 898
 899        pci_disable_pcie_error_reporting(pci_dev);
 900};
 901
 902/* NIC VPD information
 903 * Called during probe to display the part number of the
 904 * installed NIC.  VPD is potentially very large but this should
 905 * always appear within the first 512 bytes.
 906 */
 907#define SFC_VPD_LEN 512
 908static void efx_probe_vpd_strings(struct efx_nic *efx)
 909{
 910        struct pci_dev *dev = efx->pci_dev;
 911        char vpd_data[SFC_VPD_LEN];
 912        ssize_t vpd_size;
 913        int ro_start, ro_size, i, j;
 914
 915        /* Get the vpd data from the device */
 916        vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
 917        if (vpd_size <= 0) {
 918                netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
 919                return;
 920        }
 921
 922        /* Get the Read only section */
 923        ro_start = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
 924        if (ro_start < 0) {
 925                netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
 926                return;
 927        }
 928
 929        ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
 930        j = ro_size;
 931        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
 932        if (i + j > vpd_size)
 933                j = vpd_size - i;
 934
 935        /* Get the Part number */
 936        i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
 937        if (i < 0) {
 938                netif_err(efx, drv, efx->net_dev, "Part number not found\n");
 939                return;
 940        }
 941
 942        j = pci_vpd_info_field_size(&vpd_data[i]);
 943        i += PCI_VPD_INFO_FLD_HDR_SIZE;
 944        if (i + j > vpd_size) {
 945                netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
 946                return;
 947        }
 948
 949        netif_info(efx, drv, efx->net_dev,
 950                   "Part Number : %.*s\n", j, &vpd_data[i]);
 951
 952        i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
 953        j = ro_size;
 954        i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
 955        if (i < 0) {
 956                netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
 957                return;
 958        }
 959
 960        j = pci_vpd_info_field_size(&vpd_data[i]);
 961        i += PCI_VPD_INFO_FLD_HDR_SIZE;
 962        if (i + j > vpd_size) {
 963                netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
 964                return;
 965        }
 966
 967        efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
 968        if (!efx->vpd_sn)
 969                return;
 970
 971        snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
 972}
 973
 974
 975/* Main body of NIC initialisation
 976 * This is called at module load (or hotplug insertion, theoretically).
 977 */
 978static int efx_pci_probe_main(struct efx_nic *efx)
 979{
 980        int rc;
 981
 982        /* Do start-of-day initialisation */
 983        rc = efx_probe_all(efx);
 984        if (rc)
 985                goto fail1;
 986
 987        efx_init_napi(efx);
 988
 989        down_write(&efx->filter_sem);
 990        rc = efx->type->init(efx);
 991        up_write(&efx->filter_sem);
 992        if (rc) {
 993                netif_err(efx, probe, efx->net_dev,
 994                          "failed to initialise NIC\n");
 995                goto fail3;
 996        }
 997
 998        rc = efx_init_port(efx);
 999        if (rc) {
1000                netif_err(efx, probe, efx->net_dev,
1001                          "failed to initialise port\n");
1002                goto fail4;
1003        }
1004
1005        rc = efx_nic_init_interrupt(efx);
1006        if (rc)
1007                goto fail5;
1008
1009        efx_set_interrupt_affinity(efx);
1010        rc = efx_enable_interrupts(efx);
1011        if (rc)
1012                goto fail6;
1013
1014        return 0;
1015
1016 fail6:
1017        efx_clear_interrupt_affinity(efx);
1018        efx_nic_fini_interrupt(efx);
1019 fail5:
1020        efx_fini_port(efx);
1021 fail4:
1022        efx->type->fini(efx);
1023 fail3:
1024        efx_fini_napi(efx);
1025        efx_remove_all(efx);
1026 fail1:
1027        return rc;
1028}
1029
1030static int efx_pci_probe_post_io(struct efx_nic *efx)
1031{
1032        struct net_device *net_dev = efx->net_dev;
1033        int rc = efx_pci_probe_main(efx);
1034
1035        if (rc)
1036                return rc;
1037
1038        if (efx->type->sriov_init) {
1039                rc = efx->type->sriov_init(efx);
1040                if (rc)
1041                        netif_err(efx, probe, efx->net_dev,
1042                                  "SR-IOV can't be enabled rc %d\n", rc);
1043        }
1044
1045        /* Determine netdevice features */
1046        net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
1047                              NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
1048        if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1049                net_dev->features |= NETIF_F_TSO6;
1050        /* Check whether device supports TSO */
1051        if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
1052                net_dev->features &= ~NETIF_F_ALL_TSO;
1053        /* Mask for features that also apply to VLAN devices */
1054        net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
1055                                   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
1056                                   NETIF_F_RXCSUM);
1057
1058        net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
1059
1060        /* Disable receiving frames with bad FCS, by default. */
1061        net_dev->features &= ~NETIF_F_RXALL;
1062
1063        /* Disable VLAN filtering by default.  It may be enforced if
1064         * the feature is fixed (i.e. VLAN filters are required to
1065         * receive VLAN tagged packets due to vPort restrictions).
1066         */
1067        net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
1068        net_dev->features |= efx->fixed_features;
1069
1070        rc = efx_register_netdev(efx);
1071        if (!rc)
1072                return 0;
1073
1074        efx_pci_remove_main(efx);
1075        return rc;
1076}
1077
1078/* NIC initialisation
1079 *
1080 * This is called at module load (or hotplug insertion,
1081 * theoretically).  It sets up PCI mappings, resets the NIC,
1082 * sets up and registers the network devices with the kernel and hooks
1083 * the interrupt service routine.  It does not prepare the device for
1084 * transmission; this is left to the first time one of the network
1085 * interfaces is brought up (i.e. efx_net_open).
1086 */
1087static int efx_pci_probe(struct pci_dev *pci_dev,
1088                         const struct pci_device_id *entry)
1089{
1090        struct net_device *net_dev;
1091        struct efx_nic *efx;
1092        int rc;
1093
1094        /* Allocate and initialise a struct net_device and struct efx_nic */
1095        net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
1096                                     EFX_MAX_RX_QUEUES);
1097        if (!net_dev)
1098                return -ENOMEM;
1099        efx = netdev_priv(net_dev);
1100        efx->type = (const struct efx_nic_type *) entry->driver_data;
1101        efx->fixed_features |= NETIF_F_HIGHDMA;
1102
1103        pci_set_drvdata(pci_dev, efx);
1104        SET_NETDEV_DEV(net_dev, &pci_dev->dev);
1105        rc = efx_init_struct(efx, pci_dev, net_dev);
1106        if (rc)
1107                goto fail1;
1108
1109        netif_info(efx, probe, efx->net_dev,
1110                   "Solarflare NIC detected\n");
1111
1112        if (!efx->type->is_vf)
1113                efx_probe_vpd_strings(efx);
1114
1115        /* Set up basic I/O (BAR mappings etc) */
1116        rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
1117                         efx->type->mem_map_size(efx));
1118        if (rc)
1119                goto fail2;
1120
1121        rc = efx_pci_probe_post_io(efx);
1122        if (rc) {
1123                /* On failure, retry once immediately.
1124                 * If we aborted probe due to a scheduled reset, dismiss it.
1125                 */
1126                efx->reset_pending = 0;
1127                rc = efx_pci_probe_post_io(efx);
1128                if (rc) {
1129                        /* On another failure, retry once more
1130                         * after a 50-305ms delay.
1131                         */
1132                        unsigned char r;
1133
1134                        get_random_bytes(&r, 1);
1135                        msleep((unsigned int)r + 50);
1136                        efx->reset_pending = 0;
1137                        rc = efx_pci_probe_post_io(efx);
1138                }
1139        }
1140        if (rc)
1141                goto fail3;
1142
1143        netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
1144
1145        /* Try to create MTDs, but allow this to fail */
1146        rtnl_lock();
1147        rc = efx_mtd_probe(efx);
1148        rtnl_unlock();
1149        if (rc && rc != -EPERM)
1150                netif_warn(efx, probe, efx->net_dev,
1151                           "failed to create MTDs (%d)\n", rc);
1152
1153        (void)pci_enable_pcie_error_reporting(pci_dev);
1154
1155        if (efx->type->udp_tnl_push_ports)
1156                efx->type->udp_tnl_push_ports(efx);
1157
1158        return 0;
1159
1160 fail3:
1161        efx_fini_io(efx);
1162 fail2:
1163        efx_fini_struct(efx);
1164 fail1:
1165        WARN_ON(rc > 0);
1166        netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
1167        free_netdev(net_dev);
1168        return rc;
1169}
1170
1171/* efx_pci_sriov_configure returns the actual number of Virtual Functions
1172 * enabled on success
1173 */
1174#ifdef CONFIG_SFC_SRIOV
1175static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
1176{
1177        int rc;
1178        struct efx_nic *efx = pci_get_drvdata(dev);
1179
1180        if (efx->type->sriov_configure) {
1181                rc = efx->type->sriov_configure(efx, num_vfs);
1182                if (rc)
1183                        return rc;
1184                else
1185                        return num_vfs;
1186        } else
1187                return -EOPNOTSUPP;
1188}
1189#endif
1190
1191static int efx_pm_freeze(struct device *dev)
1192{
1193        struct efx_nic *efx = dev_get_drvdata(dev);
1194
1195        rtnl_lock();
1196
1197        if (efx->state != STATE_DISABLED) {
1198                efx->state = STATE_UNINIT;
1199
1200                efx_device_detach_sync(efx);
1201
1202                efx_stop_all(efx);
1203                efx_disable_interrupts(efx);
1204        }
1205
1206        rtnl_unlock();
1207
1208        return 0;
1209}
1210
1211static int efx_pm_thaw(struct device *dev)
1212{
1213        int rc;
1214        struct efx_nic *efx = dev_get_drvdata(dev);
1215
1216        rtnl_lock();
1217
1218        if (efx->state != STATE_DISABLED) {
1219                rc = efx_enable_interrupts(efx);
1220                if (rc)
1221                        goto fail;
1222
1223                mutex_lock(&efx->mac_lock);
1224                efx_mcdi_port_reconfigure(efx);
1225                mutex_unlock(&efx->mac_lock);
1226
1227                efx_start_all(efx);
1228
1229                efx_device_attach_if_not_resetting(efx);
1230
1231                efx->state = STATE_READY;
1232
1233                efx->type->resume_wol(efx);
1234        }
1235
1236        rtnl_unlock();
1237
1238        /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
1239        efx_queue_reset_work(efx);
1240
1241        return 0;
1242
1243fail:
1244        rtnl_unlock();
1245
1246        return rc;
1247}
1248
1249static int efx_pm_poweroff(struct device *dev)
1250{
1251        struct pci_dev *pci_dev = to_pci_dev(dev);
1252        struct efx_nic *efx = pci_get_drvdata(pci_dev);
1253
1254        efx->type->fini(efx);
1255
1256        efx->reset_pending = 0;
1257
1258        pci_save_state(pci_dev);
1259        return pci_set_power_state(pci_dev, PCI_D3hot);
1260}
1261
1262/* Used for both resume and restore */
1263static int efx_pm_resume(struct device *dev)
1264{
1265        struct pci_dev *pci_dev = to_pci_dev(dev);
1266        struct efx_nic *efx = pci_get_drvdata(pci_dev);
1267        int rc;
1268
1269        rc = pci_set_power_state(pci_dev, PCI_D0);
1270        if (rc)
1271                return rc;
1272        pci_restore_state(pci_dev);
1273        rc = pci_enable_device(pci_dev);
1274        if (rc)
1275                return rc;
1276        pci_set_master(efx->pci_dev);
1277        rc = efx->type->reset(efx, RESET_TYPE_ALL);
1278        if (rc)
1279                return rc;
1280        down_write(&efx->filter_sem);
1281        rc = efx->type->init(efx);
1282        up_write(&efx->filter_sem);
1283        if (rc)
1284                return rc;
1285        rc = efx_pm_thaw(dev);
1286        return rc;
1287}
1288
1289static int efx_pm_suspend(struct device *dev)
1290{
1291        int rc;
1292
1293        efx_pm_freeze(dev);
1294        rc = efx_pm_poweroff(dev);
1295        if (rc)
1296                efx_pm_resume(dev);
1297        return rc;
1298}
1299
1300static const struct dev_pm_ops efx_pm_ops = {
1301        .suspend        = efx_pm_suspend,
1302        .resume         = efx_pm_resume,
1303        .freeze         = efx_pm_freeze,
1304        .thaw           = efx_pm_thaw,
1305        .poweroff       = efx_pm_poweroff,
1306        .restore        = efx_pm_resume,
1307};
1308
1309static struct pci_driver efx_pci_driver = {
1310        .name           = KBUILD_MODNAME,
1311        .id_table       = efx_pci_table,
1312        .probe          = efx_pci_probe,
1313        .remove         = efx_pci_remove,
1314        .driver.pm      = &efx_pm_ops,
1315        .err_handler    = &efx_err_handlers,
1316#ifdef CONFIG_SFC_SRIOV
1317        .sriov_configure = efx_pci_sriov_configure,
1318#endif
1319};
1320
1321/**************************************************************************
1322 *
1323 * Kernel module interface
1324 *
1325 *************************************************************************/
1326
1327static int __init efx_init_module(void)
1328{
1329        int rc;
1330
1331        printk(KERN_INFO "Solarflare NET driver\n");
1332
1333        rc = register_netdevice_notifier(&efx_netdev_notifier);
1334        if (rc)
1335                goto err_notifier;
1336
1337#ifdef CONFIG_SFC_SRIOV
1338        rc = efx_init_sriov();
1339        if (rc)
1340                goto err_sriov;
1341#endif
1342
1343        rc = efx_create_reset_workqueue();
1344        if (rc)
1345                goto err_reset;
1346
1347        rc = pci_register_driver(&efx_pci_driver);
1348        if (rc < 0)
1349                goto err_pci;
1350
1351        rc = pci_register_driver(&ef100_pci_driver);
1352        if (rc < 0)
1353                goto err_pci_ef100;
1354
1355        return 0;
1356
1357 err_pci_ef100:
1358        pci_unregister_driver(&efx_pci_driver);
1359 err_pci:
1360        efx_destroy_reset_workqueue();
1361 err_reset:
1362#ifdef CONFIG_SFC_SRIOV
1363        efx_fini_sriov();
1364 err_sriov:
1365#endif
1366        unregister_netdevice_notifier(&efx_netdev_notifier);
1367 err_notifier:
1368        return rc;
1369}
1370
1371static void __exit efx_exit_module(void)
1372{
1373        printk(KERN_INFO "Solarflare NET driver unloading\n");
1374
1375        pci_unregister_driver(&ef100_pci_driver);
1376        pci_unregister_driver(&efx_pci_driver);
1377        efx_destroy_reset_workqueue();
1378#ifdef CONFIG_SFC_SRIOV
1379        efx_fini_sriov();
1380#endif
1381        unregister_netdevice_notifier(&efx_netdev_notifier);
1382
1383}
1384
1385module_init(efx_init_module);
1386module_exit(efx_exit_module);
1387
1388MODULE_AUTHOR("Solarflare Communications and "
1389              "Michael Brown <mbrown@fensystems.co.uk>");
1390MODULE_DESCRIPTION("Solarflare network driver");
1391MODULE_LICENSE("GPL");
1392MODULE_DEVICE_TABLE(pci, efx_pci_table);
1393