linux/drivers/net/ethernet/intel/igc/igc_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c)  2018 Intel Corporation */
   3
   4#include <linux/module.h>
   5#include <linux/types.h>
   6#include <linux/if_vlan.h>
   7#include <linux/aer.h>
   8#include <linux/tcp.h>
   9#include <linux/udp.h>
  10#include <linux/ip.h>
  11#include <linux/pm_runtime.h>
  12#include <net/pkt_sched.h>
  13#include <linux/bpf_trace.h>
  14#include <net/xdp_sock_drv.h>
  15#include <net/ipv6.h>
  16
  17#include "igc.h"
  18#include "igc_hw.h"
  19#include "igc_tsn.h"
  20#include "igc_xdp.h"
  21
  22#define DRV_SUMMARY     "Intel(R) 2.5G Ethernet Linux Driver"
  23
  24#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
  25
  26#define IGC_XDP_PASS            0
  27#define IGC_XDP_CONSUMED        BIT(0)
  28#define IGC_XDP_TX              BIT(1)
  29#define IGC_XDP_REDIRECT        BIT(2)
  30
  31static int debug = -1;
  32
  33MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
  34MODULE_DESCRIPTION(DRV_SUMMARY);
  35MODULE_LICENSE("GPL v2");
  36module_param(debug, int, 0);
  37MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  38
  39char igc_driver_name[] = "igc";
  40static const char igc_driver_string[] = DRV_SUMMARY;
  41static const char igc_copyright[] =
  42        "Copyright(c) 2018 Intel Corporation.";
  43
  44static const struct igc_info *igc_info_tbl[] = {
  45        [board_base] = &igc_base_info,
  46};
  47
  48static const struct pci_device_id igc_pci_tbl[] = {
  49        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
  50        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
  51        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
  52        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
  53        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
  54        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
  55        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
  56        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
  57        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
  58        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
  59        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
  60        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
  61        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
  62        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
  63        { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
  64        /* required last entry */
  65        {0, }
  66};
  67
  68MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
  69
  70enum latency_range {
  71        lowest_latency = 0,
  72        low_latency = 1,
  73        bulk_latency = 2,
  74        latency_invalid = 255
  75};
  76
  77void igc_reset(struct igc_adapter *adapter)
  78{
  79        struct net_device *dev = adapter->netdev;
  80        struct igc_hw *hw = &adapter->hw;
  81        struct igc_fc_info *fc = &hw->fc;
  82        u32 pba, hwm;
  83
  84        /* Repartition PBA for greater than 9k MTU if required */
  85        pba = IGC_PBA_34K;
  86
  87        /* flow control settings
  88         * The high water mark must be low enough to fit one full frame
  89         * after transmitting the pause frame.  As such we must have enough
  90         * space to allow for us to complete our current transmit and then
  91         * receive the frame that is in progress from the link partner.
  92         * Set it to:
  93         * - the full Rx FIFO size minus one full Tx plus one full Rx frame
  94         */
  95        hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
  96
  97        fc->high_water = hwm & 0xFFFFFFF0;      /* 16-byte granularity */
  98        fc->low_water = fc->high_water - 16;
  99        fc->pause_time = 0xFFFF;
 100        fc->send_xon = 1;
 101        fc->current_mode = fc->requested_mode;
 102
 103        hw->mac.ops.reset_hw(hw);
 104
 105        if (hw->mac.ops.init_hw(hw))
 106                netdev_err(dev, "Error on hardware initialization\n");
 107
 108        /* Re-establish EEE setting */
 109        igc_set_eee_i225(hw, true, true, true);
 110
 111        if (!netif_running(adapter->netdev))
 112                igc_power_down_phy_copper_base(&adapter->hw);
 113
 114        /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
 115        wr32(IGC_VET, ETH_P_8021Q);
 116
 117        /* Re-enable PTP, where applicable. */
 118        igc_ptp_reset(adapter);
 119
 120        /* Re-enable TSN offloading, where applicable. */
 121        igc_tsn_offload_apply(adapter);
 122
 123        igc_get_phy_info(hw);
 124}
 125
 126/**
 127 * igc_power_up_link - Power up the phy link
 128 * @adapter: address of board private structure
 129 */
 130static void igc_power_up_link(struct igc_adapter *adapter)
 131{
 132        igc_reset_phy(&adapter->hw);
 133
 134        igc_power_up_phy_copper(&adapter->hw);
 135
 136        igc_setup_link(&adapter->hw);
 137}
 138
 139/**
 140 * igc_release_hw_control - release control of the h/w to f/w
 141 * @adapter: address of board private structure
 142 *
 143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
 144 * For ASF and Pass Through versions of f/w this means that the
 145 * driver is no longer loaded.
 146 */
 147static void igc_release_hw_control(struct igc_adapter *adapter)
 148{
 149        struct igc_hw *hw = &adapter->hw;
 150        u32 ctrl_ext;
 151
 152        if (!pci_device_is_present(adapter->pdev))
 153                return;
 154
 155        /* Let firmware take over control of h/w */
 156        ctrl_ext = rd32(IGC_CTRL_EXT);
 157        wr32(IGC_CTRL_EXT,
 158             ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
 159}
 160
 161/**
 162 * igc_get_hw_control - get control of the h/w from f/w
 163 * @adapter: address of board private structure
 164 *
 165 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
 166 * For ASF and Pass Through versions of f/w this means that
 167 * the driver is loaded.
 168 */
 169static void igc_get_hw_control(struct igc_adapter *adapter)
 170{
 171        struct igc_hw *hw = &adapter->hw;
 172        u32 ctrl_ext;
 173
 174        /* Let firmware know the driver has taken over */
 175        ctrl_ext = rd32(IGC_CTRL_EXT);
 176        wr32(IGC_CTRL_EXT,
 177             ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
 178}
 179
 180static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
 181{
 182        dma_unmap_single(dev, dma_unmap_addr(buf, dma),
 183                         dma_unmap_len(buf, len), DMA_TO_DEVICE);
 184
 185        dma_unmap_len_set(buf, len, 0);
 186}
 187
 188/**
 189 * igc_clean_tx_ring - Free Tx Buffers
 190 * @tx_ring: ring to be cleaned
 191 */
 192static void igc_clean_tx_ring(struct igc_ring *tx_ring)
 193{
 194        u16 i = tx_ring->next_to_clean;
 195        struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 196        u32 xsk_frames = 0;
 197
 198        while (i != tx_ring->next_to_use) {
 199                union igc_adv_tx_desc *eop_desc, *tx_desc;
 200
 201                switch (tx_buffer->type) {
 202                case IGC_TX_BUFFER_TYPE_XSK:
 203                        xsk_frames++;
 204                        break;
 205                case IGC_TX_BUFFER_TYPE_XDP:
 206                        xdp_return_frame(tx_buffer->xdpf);
 207                        igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 208                        break;
 209                case IGC_TX_BUFFER_TYPE_SKB:
 210                        dev_kfree_skb_any(tx_buffer->skb);
 211                        igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 212                        break;
 213                default:
 214                        netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
 215                        break;
 216                }
 217
 218                /* check for eop_desc to determine the end of the packet */
 219                eop_desc = tx_buffer->next_to_watch;
 220                tx_desc = IGC_TX_DESC(tx_ring, i);
 221
 222                /* unmap remaining buffers */
 223                while (tx_desc != eop_desc) {
 224                        tx_buffer++;
 225                        tx_desc++;
 226                        i++;
 227                        if (unlikely(i == tx_ring->count)) {
 228                                i = 0;
 229                                tx_buffer = tx_ring->tx_buffer_info;
 230                                tx_desc = IGC_TX_DESC(tx_ring, 0);
 231                        }
 232
 233                        /* unmap any remaining paged data */
 234                        if (dma_unmap_len(tx_buffer, len))
 235                                igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
 236                }
 237
 238                tx_buffer->next_to_watch = NULL;
 239
 240                /* move us one more past the eop_desc for start of next pkt */
 241                tx_buffer++;
 242                i++;
 243                if (unlikely(i == tx_ring->count)) {
 244                        i = 0;
 245                        tx_buffer = tx_ring->tx_buffer_info;
 246                }
 247        }
 248
 249        if (tx_ring->xsk_pool && xsk_frames)
 250                xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
 251
 252        /* reset BQL for queue */
 253        netdev_tx_reset_queue(txring_txq(tx_ring));
 254
 255        /* reset next_to_use and next_to_clean */
 256        tx_ring->next_to_use = 0;
 257        tx_ring->next_to_clean = 0;
 258}
 259
 260/**
 261 * igc_free_tx_resources - Free Tx Resources per Queue
 262 * @tx_ring: Tx descriptor ring for a specific queue
 263 *
 264 * Free all transmit software resources
 265 */
 266void igc_free_tx_resources(struct igc_ring *tx_ring)
 267{
 268        igc_clean_tx_ring(tx_ring);
 269
 270        vfree(tx_ring->tx_buffer_info);
 271        tx_ring->tx_buffer_info = NULL;
 272
 273        /* if not set, then don't free */
 274        if (!tx_ring->desc)
 275                return;
 276
 277        dma_free_coherent(tx_ring->dev, tx_ring->size,
 278                          tx_ring->desc, tx_ring->dma);
 279
 280        tx_ring->desc = NULL;
 281}
 282
 283/**
 284 * igc_free_all_tx_resources - Free Tx Resources for All Queues
 285 * @adapter: board private structure
 286 *
 287 * Free all transmit software resources
 288 */
 289static void igc_free_all_tx_resources(struct igc_adapter *adapter)
 290{
 291        int i;
 292
 293        for (i = 0; i < adapter->num_tx_queues; i++)
 294                igc_free_tx_resources(adapter->tx_ring[i]);
 295}
 296
 297/**
 298 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
 299 * @adapter: board private structure
 300 */
 301static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
 302{
 303        int i;
 304
 305        for (i = 0; i < adapter->num_tx_queues; i++)
 306                if (adapter->tx_ring[i])
 307                        igc_clean_tx_ring(adapter->tx_ring[i]);
 308}
 309
 310/**
 311 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
 312 * @tx_ring: tx descriptor ring (for a specific queue) to setup
 313 *
 314 * Return 0 on success, negative on failure
 315 */
 316int igc_setup_tx_resources(struct igc_ring *tx_ring)
 317{
 318        struct net_device *ndev = tx_ring->netdev;
 319        struct device *dev = tx_ring->dev;
 320        int size = 0;
 321
 322        size = sizeof(struct igc_tx_buffer) * tx_ring->count;
 323        tx_ring->tx_buffer_info = vzalloc(size);
 324        if (!tx_ring->tx_buffer_info)
 325                goto err;
 326
 327        /* round up to nearest 4K */
 328        tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
 329        tx_ring->size = ALIGN(tx_ring->size, 4096);
 330
 331        tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
 332                                           &tx_ring->dma, GFP_KERNEL);
 333
 334        if (!tx_ring->desc)
 335                goto err;
 336
 337        tx_ring->next_to_use = 0;
 338        tx_ring->next_to_clean = 0;
 339
 340        return 0;
 341
 342err:
 343        vfree(tx_ring->tx_buffer_info);
 344        netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
 345        return -ENOMEM;
 346}
 347
 348/**
 349 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
 350 * @adapter: board private structure
 351 *
 352 * Return 0 on success, negative on failure
 353 */
 354static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
 355{
 356        struct net_device *dev = adapter->netdev;
 357        int i, err = 0;
 358
 359        for (i = 0; i < adapter->num_tx_queues; i++) {
 360                err = igc_setup_tx_resources(adapter->tx_ring[i]);
 361                if (err) {
 362                        netdev_err(dev, "Error on Tx queue %u setup\n", i);
 363                        for (i--; i >= 0; i--)
 364                                igc_free_tx_resources(adapter->tx_ring[i]);
 365                        break;
 366                }
 367        }
 368
 369        return err;
 370}
 371
 372static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
 373{
 374        u16 i = rx_ring->next_to_clean;
 375
 376        dev_kfree_skb(rx_ring->skb);
 377        rx_ring->skb = NULL;
 378
 379        /* Free all the Rx ring sk_buffs */
 380        while (i != rx_ring->next_to_alloc) {
 381                struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
 382
 383                /* Invalidate cache lines that may have been written to by
 384                 * device so that we avoid corrupting memory.
 385                 */
 386                dma_sync_single_range_for_cpu(rx_ring->dev,
 387                                              buffer_info->dma,
 388                                              buffer_info->page_offset,
 389                                              igc_rx_bufsz(rx_ring),
 390                                              DMA_FROM_DEVICE);
 391
 392                /* free resources associated with mapping */
 393                dma_unmap_page_attrs(rx_ring->dev,
 394                                     buffer_info->dma,
 395                                     igc_rx_pg_size(rx_ring),
 396                                     DMA_FROM_DEVICE,
 397                                     IGC_RX_DMA_ATTR);
 398                __page_frag_cache_drain(buffer_info->page,
 399                                        buffer_info->pagecnt_bias);
 400
 401                i++;
 402                if (i == rx_ring->count)
 403                        i = 0;
 404        }
 405}
 406
 407static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
 408{
 409        struct igc_rx_buffer *bi;
 410        u16 i;
 411
 412        for (i = 0; i < ring->count; i++) {
 413                bi = &ring->rx_buffer_info[i];
 414                if (!bi->xdp)
 415                        continue;
 416
 417                xsk_buff_free(bi->xdp);
 418                bi->xdp = NULL;
 419        }
 420}
 421
 422/**
 423 * igc_clean_rx_ring - Free Rx Buffers per Queue
 424 * @ring: ring to free buffers from
 425 */
 426static void igc_clean_rx_ring(struct igc_ring *ring)
 427{
 428        if (ring->xsk_pool)
 429                igc_clean_rx_ring_xsk_pool(ring);
 430        else
 431                igc_clean_rx_ring_page_shared(ring);
 432
 433        clear_ring_uses_large_buffer(ring);
 434
 435        ring->next_to_alloc = 0;
 436        ring->next_to_clean = 0;
 437        ring->next_to_use = 0;
 438}
 439
 440/**
 441 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
 442 * @adapter: board private structure
 443 */
 444static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
 445{
 446        int i;
 447
 448        for (i = 0; i < adapter->num_rx_queues; i++)
 449                if (adapter->rx_ring[i])
 450                        igc_clean_rx_ring(adapter->rx_ring[i]);
 451}
 452
 453/**
 454 * igc_free_rx_resources - Free Rx Resources
 455 * @rx_ring: ring to clean the resources from
 456 *
 457 * Free all receive software resources
 458 */
 459void igc_free_rx_resources(struct igc_ring *rx_ring)
 460{
 461        igc_clean_rx_ring(rx_ring);
 462
 463        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 464
 465        vfree(rx_ring->rx_buffer_info);
 466        rx_ring->rx_buffer_info = NULL;
 467
 468        /* if not set, then don't free */
 469        if (!rx_ring->desc)
 470                return;
 471
 472        dma_free_coherent(rx_ring->dev, rx_ring->size,
 473                          rx_ring->desc, rx_ring->dma);
 474
 475        rx_ring->desc = NULL;
 476}
 477
 478/**
 479 * igc_free_all_rx_resources - Free Rx Resources for All Queues
 480 * @adapter: board private structure
 481 *
 482 * Free all receive software resources
 483 */
 484static void igc_free_all_rx_resources(struct igc_adapter *adapter)
 485{
 486        int i;
 487
 488        for (i = 0; i < adapter->num_rx_queues; i++)
 489                igc_free_rx_resources(adapter->rx_ring[i]);
 490}
 491
 492/**
 493 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
 494 * @rx_ring:    rx descriptor ring (for a specific queue) to setup
 495 *
 496 * Returns 0 on success, negative on failure
 497 */
 498int igc_setup_rx_resources(struct igc_ring *rx_ring)
 499{
 500        struct net_device *ndev = rx_ring->netdev;
 501        struct device *dev = rx_ring->dev;
 502        u8 index = rx_ring->queue_index;
 503        int size, desc_len, res;
 504
 505        res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
 506                               rx_ring->q_vector->napi.napi_id);
 507        if (res < 0) {
 508                netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
 509                           index);
 510                return res;
 511        }
 512
 513        size = sizeof(struct igc_rx_buffer) * rx_ring->count;
 514        rx_ring->rx_buffer_info = vzalloc(size);
 515        if (!rx_ring->rx_buffer_info)
 516                goto err;
 517
 518        desc_len = sizeof(union igc_adv_rx_desc);
 519
 520        /* Round up to nearest 4K */
 521        rx_ring->size = rx_ring->count * desc_len;
 522        rx_ring->size = ALIGN(rx_ring->size, 4096);
 523
 524        rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 525                                           &rx_ring->dma, GFP_KERNEL);
 526
 527        if (!rx_ring->desc)
 528                goto err;
 529
 530        rx_ring->next_to_alloc = 0;
 531        rx_ring->next_to_clean = 0;
 532        rx_ring->next_to_use = 0;
 533
 534        return 0;
 535
 536err:
 537        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 538        vfree(rx_ring->rx_buffer_info);
 539        rx_ring->rx_buffer_info = NULL;
 540        netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
 541        return -ENOMEM;
 542}
 543
 544/**
 545 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
 546 *                                (Descriptors) for all queues
 547 * @adapter: board private structure
 548 *
 549 * Return 0 on success, negative on failure
 550 */
 551static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
 552{
 553        struct net_device *dev = adapter->netdev;
 554        int i, err = 0;
 555
 556        for (i = 0; i < adapter->num_rx_queues; i++) {
 557                err = igc_setup_rx_resources(adapter->rx_ring[i]);
 558                if (err) {
 559                        netdev_err(dev, "Error on Rx queue %u setup\n", i);
 560                        for (i--; i >= 0; i--)
 561                                igc_free_rx_resources(adapter->rx_ring[i]);
 562                        break;
 563                }
 564        }
 565
 566        return err;
 567}
 568
 569static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
 570                                              struct igc_ring *ring)
 571{
 572        if (!igc_xdp_is_enabled(adapter) ||
 573            !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
 574                return NULL;
 575
 576        return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
 577}
 578
 579/**
 580 * igc_configure_rx_ring - Configure a receive ring after Reset
 581 * @adapter: board private structure
 582 * @ring: receive ring to be configured
 583 *
 584 * Configure the Rx unit of the MAC after a reset.
 585 */
 586static void igc_configure_rx_ring(struct igc_adapter *adapter,
 587                                  struct igc_ring *ring)
 588{
 589        struct igc_hw *hw = &adapter->hw;
 590        union igc_adv_rx_desc *rx_desc;
 591        int reg_idx = ring->reg_idx;
 592        u32 srrctl = 0, rxdctl = 0;
 593        u64 rdba = ring->dma;
 594        u32 buf_size;
 595
 596        xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
 597        ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
 598        if (ring->xsk_pool) {
 599                WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
 600                                                   MEM_TYPE_XSK_BUFF_POOL,
 601                                                   NULL));
 602                xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
 603        } else {
 604                WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
 605                                                   MEM_TYPE_PAGE_SHARED,
 606                                                   NULL));
 607        }
 608
 609        if (igc_xdp_is_enabled(adapter))
 610                set_ring_uses_large_buffer(ring);
 611
 612        /* disable the queue */
 613        wr32(IGC_RXDCTL(reg_idx), 0);
 614
 615        /* Set DMA base address registers */
 616        wr32(IGC_RDBAL(reg_idx),
 617             rdba & 0x00000000ffffffffULL);
 618        wr32(IGC_RDBAH(reg_idx), rdba >> 32);
 619        wr32(IGC_RDLEN(reg_idx),
 620             ring->count * sizeof(union igc_adv_rx_desc));
 621
 622        /* initialize head and tail */
 623        ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
 624        wr32(IGC_RDH(reg_idx), 0);
 625        writel(0, ring->tail);
 626
 627        /* reset next-to- use/clean to place SW in sync with hardware */
 628        ring->next_to_clean = 0;
 629        ring->next_to_use = 0;
 630
 631        if (ring->xsk_pool)
 632                buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
 633        else if (ring_uses_large_buffer(ring))
 634                buf_size = IGC_RXBUFFER_3072;
 635        else
 636                buf_size = IGC_RXBUFFER_2048;
 637
 638        srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
 639        srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
 640        srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
 641
 642        wr32(IGC_SRRCTL(reg_idx), srrctl);
 643
 644        rxdctl |= IGC_RX_PTHRESH;
 645        rxdctl |= IGC_RX_HTHRESH << 8;
 646        rxdctl |= IGC_RX_WTHRESH << 16;
 647
 648        /* initialize rx_buffer_info */
 649        memset(ring->rx_buffer_info, 0,
 650               sizeof(struct igc_rx_buffer) * ring->count);
 651
 652        /* initialize Rx descriptor 0 */
 653        rx_desc = IGC_RX_DESC(ring, 0);
 654        rx_desc->wb.upper.length = 0;
 655
 656        /* enable receive descriptor fetching */
 657        rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
 658
 659        wr32(IGC_RXDCTL(reg_idx), rxdctl);
 660}
 661
 662/**
 663 * igc_configure_rx - Configure receive Unit after Reset
 664 * @adapter: board private structure
 665 *
 666 * Configure the Rx unit of the MAC after a reset.
 667 */
 668static void igc_configure_rx(struct igc_adapter *adapter)
 669{
 670        int i;
 671
 672        /* Setup the HW Rx Head and Tail Descriptor Pointers and
 673         * the Base and Length of the Rx Descriptor Ring
 674         */
 675        for (i = 0; i < adapter->num_rx_queues; i++)
 676                igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
 677}
 678
 679/**
 680 * igc_configure_tx_ring - Configure transmit ring after Reset
 681 * @adapter: board private structure
 682 * @ring: tx ring to configure
 683 *
 684 * Configure a transmit ring after a reset.
 685 */
 686static void igc_configure_tx_ring(struct igc_adapter *adapter,
 687                                  struct igc_ring *ring)
 688{
 689        struct igc_hw *hw = &adapter->hw;
 690        int reg_idx = ring->reg_idx;
 691        u64 tdba = ring->dma;
 692        u32 txdctl = 0;
 693
 694        ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
 695
 696        /* disable the queue */
 697        wr32(IGC_TXDCTL(reg_idx), 0);
 698        wrfl();
 699        mdelay(10);
 700
 701        wr32(IGC_TDLEN(reg_idx),
 702             ring->count * sizeof(union igc_adv_tx_desc));
 703        wr32(IGC_TDBAL(reg_idx),
 704             tdba & 0x00000000ffffffffULL);
 705        wr32(IGC_TDBAH(reg_idx), tdba >> 32);
 706
 707        ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
 708        wr32(IGC_TDH(reg_idx), 0);
 709        writel(0, ring->tail);
 710
 711        txdctl |= IGC_TX_PTHRESH;
 712        txdctl |= IGC_TX_HTHRESH << 8;
 713        txdctl |= IGC_TX_WTHRESH << 16;
 714
 715        txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
 716        wr32(IGC_TXDCTL(reg_idx), txdctl);
 717}
 718
 719/**
 720 * igc_configure_tx - Configure transmit Unit after Reset
 721 * @adapter: board private structure
 722 *
 723 * Configure the Tx unit of the MAC after a reset.
 724 */
 725static void igc_configure_tx(struct igc_adapter *adapter)
 726{
 727        int i;
 728
 729        for (i = 0; i < adapter->num_tx_queues; i++)
 730                igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
 731}
 732
 733/**
 734 * igc_setup_mrqc - configure the multiple receive queue control registers
 735 * @adapter: Board private structure
 736 */
 737static void igc_setup_mrqc(struct igc_adapter *adapter)
 738{
 739        struct igc_hw *hw = &adapter->hw;
 740        u32 j, num_rx_queues;
 741        u32 mrqc, rxcsum;
 742        u32 rss_key[10];
 743
 744        netdev_rss_key_fill(rss_key, sizeof(rss_key));
 745        for (j = 0; j < 10; j++)
 746                wr32(IGC_RSSRK(j), rss_key[j]);
 747
 748        num_rx_queues = adapter->rss_queues;
 749
 750        if (adapter->rss_indir_tbl_init != num_rx_queues) {
 751                for (j = 0; j < IGC_RETA_SIZE; j++)
 752                        adapter->rss_indir_tbl[j] =
 753                        (j * num_rx_queues) / IGC_RETA_SIZE;
 754                adapter->rss_indir_tbl_init = num_rx_queues;
 755        }
 756        igc_write_rss_indir_tbl(adapter);
 757
 758        /* Disable raw packet checksumming so that RSS hash is placed in
 759         * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
 760         * offloads as they are enabled by default
 761         */
 762        rxcsum = rd32(IGC_RXCSUM);
 763        rxcsum |= IGC_RXCSUM_PCSD;
 764
 765        /* Enable Receive Checksum Offload for SCTP */
 766        rxcsum |= IGC_RXCSUM_CRCOFL;
 767
 768        /* Don't need to set TUOFL or IPOFL, they default to 1 */
 769        wr32(IGC_RXCSUM, rxcsum);
 770
 771        /* Generate RSS hash based on packet types, TCP/UDP
 772         * port numbers and/or IPv4/v6 src and dst addresses
 773         */
 774        mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
 775               IGC_MRQC_RSS_FIELD_IPV4_TCP |
 776               IGC_MRQC_RSS_FIELD_IPV6 |
 777               IGC_MRQC_RSS_FIELD_IPV6_TCP |
 778               IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
 779
 780        if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
 781                mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
 782        if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
 783                mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
 784
 785        mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
 786
 787        wr32(IGC_MRQC, mrqc);
 788}
 789
 790/**
 791 * igc_setup_rctl - configure the receive control registers
 792 * @adapter: Board private structure
 793 */
 794static void igc_setup_rctl(struct igc_adapter *adapter)
 795{
 796        struct igc_hw *hw = &adapter->hw;
 797        u32 rctl;
 798
 799        rctl = rd32(IGC_RCTL);
 800
 801        rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
 802        rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
 803
 804        rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
 805                (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
 806
 807        /* enable stripping of CRC. Newer features require
 808         * that the HW strips the CRC.
 809         */
 810        rctl |= IGC_RCTL_SECRC;
 811
 812        /* disable store bad packets and clear size bits. */
 813        rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
 814
 815        /* enable LPE to allow for reception of jumbo frames */
 816        rctl |= IGC_RCTL_LPE;
 817
 818        /* disable queue 0 to prevent tail write w/o re-config */
 819        wr32(IGC_RXDCTL(0), 0);
 820
 821        /* This is useful for sniffing bad packets. */
 822        if (adapter->netdev->features & NETIF_F_RXALL) {
 823                /* UPE and MPE will be handled by normal PROMISC logic
 824                 * in set_rx_mode
 825                 */
 826                rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
 827                         IGC_RCTL_BAM | /* RX All Bcast Pkts */
 828                         IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
 829
 830                rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
 831                          IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
 832        }
 833
 834        wr32(IGC_RCTL, rctl);
 835}
 836
 837/**
 838 * igc_setup_tctl - configure the transmit control registers
 839 * @adapter: Board private structure
 840 */
 841static void igc_setup_tctl(struct igc_adapter *adapter)
 842{
 843        struct igc_hw *hw = &adapter->hw;
 844        u32 tctl;
 845
 846        /* disable queue 0 which icould be enabled by default */
 847        wr32(IGC_TXDCTL(0), 0);
 848
 849        /* Program the Transmit Control Register */
 850        tctl = rd32(IGC_TCTL);
 851        tctl &= ~IGC_TCTL_CT;
 852        tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
 853                (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
 854
 855        /* Enable transmits */
 856        tctl |= IGC_TCTL_EN;
 857
 858        wr32(IGC_TCTL, tctl);
 859}
 860
 861/**
 862 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
 863 * @adapter: Pointer to adapter where the filter should be set
 864 * @index: Filter index
 865 * @type: MAC address filter type (source or destination)
 866 * @addr: MAC address
 867 * @queue: If non-negative, queue assignment feature is enabled and frames
 868 *         matching the filter are enqueued onto 'queue'. Otherwise, queue
 869 *         assignment is disabled.
 870 */
 871static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
 872                                  enum igc_mac_filter_type type,
 873                                  const u8 *addr, int queue)
 874{
 875        struct net_device *dev = adapter->netdev;
 876        struct igc_hw *hw = &adapter->hw;
 877        u32 ral, rah;
 878
 879        if (WARN_ON(index >= hw->mac.rar_entry_count))
 880                return;
 881
 882        ral = le32_to_cpup((__le32 *)(addr));
 883        rah = le16_to_cpup((__le16 *)(addr + 4));
 884
 885        if (type == IGC_MAC_FILTER_TYPE_SRC) {
 886                rah &= ~IGC_RAH_ASEL_MASK;
 887                rah |= IGC_RAH_ASEL_SRC_ADDR;
 888        }
 889
 890        if (queue >= 0) {
 891                rah &= ~IGC_RAH_QSEL_MASK;
 892                rah |= (queue << IGC_RAH_QSEL_SHIFT);
 893                rah |= IGC_RAH_QSEL_ENABLE;
 894        }
 895
 896        rah |= IGC_RAH_AV;
 897
 898        wr32(IGC_RAL(index), ral);
 899        wr32(IGC_RAH(index), rah);
 900
 901        netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
 902}
 903
 904/**
 905 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
 906 * @adapter: Pointer to adapter where the filter should be cleared
 907 * @index: Filter index
 908 */
 909static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
 910{
 911        struct net_device *dev = adapter->netdev;
 912        struct igc_hw *hw = &adapter->hw;
 913
 914        if (WARN_ON(index >= hw->mac.rar_entry_count))
 915                return;
 916
 917        wr32(IGC_RAL(index), 0);
 918        wr32(IGC_RAH(index), 0);
 919
 920        netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
 921}
 922
 923/* Set default MAC address for the PF in the first RAR entry */
 924static void igc_set_default_mac_filter(struct igc_adapter *adapter)
 925{
 926        struct net_device *dev = adapter->netdev;
 927        u8 *addr = adapter->hw.mac.addr;
 928
 929        netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
 930
 931        igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
 932}
 933
 934/**
 935 * igc_set_mac - Change the Ethernet Address of the NIC
 936 * @netdev: network interface device structure
 937 * @p: pointer to an address structure
 938 *
 939 * Returns 0 on success, negative on failure
 940 */
 941static int igc_set_mac(struct net_device *netdev, void *p)
 942{
 943        struct igc_adapter *adapter = netdev_priv(netdev);
 944        struct igc_hw *hw = &adapter->hw;
 945        struct sockaddr *addr = p;
 946
 947        if (!is_valid_ether_addr(addr->sa_data))
 948                return -EADDRNOTAVAIL;
 949
 950        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
 951        memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
 952
 953        /* set the correct pool for the new PF MAC address in entry 0 */
 954        igc_set_default_mac_filter(adapter);
 955
 956        return 0;
 957}
 958
 959/**
 960 *  igc_write_mc_addr_list - write multicast addresses to MTA
 961 *  @netdev: network interface device structure
 962 *
 963 *  Writes multicast address list to the MTA hash table.
 964 *  Returns: -ENOMEM on failure
 965 *           0 on no addresses written
 966 *           X on writing X addresses to MTA
 967 **/
 968static int igc_write_mc_addr_list(struct net_device *netdev)
 969{
 970        struct igc_adapter *adapter = netdev_priv(netdev);
 971        struct igc_hw *hw = &adapter->hw;
 972        struct netdev_hw_addr *ha;
 973        u8  *mta_list;
 974        int i;
 975
 976        if (netdev_mc_empty(netdev)) {
 977                /* nothing to program, so clear mc list */
 978                igc_update_mc_addr_list(hw, NULL, 0);
 979                return 0;
 980        }
 981
 982        mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
 983        if (!mta_list)
 984                return -ENOMEM;
 985
 986        /* The shared function expects a packed array of only addresses. */
 987        i = 0;
 988        netdev_for_each_mc_addr(ha, netdev)
 989                memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 990
 991        igc_update_mc_addr_list(hw, mta_list, i);
 992        kfree(mta_list);
 993
 994        return netdev_mc_count(netdev);
 995}
 996
 997static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
 998{
 999        ktime_t cycle_time = adapter->cycle_time;
1000        ktime_t base_time = adapter->base_time;
1001        u32 launchtime;
1002
1003        /* FIXME: when using ETF together with taprio, we may have a
1004         * case where 'delta' is larger than the cycle_time, this may
1005         * cause problems if we don't read the current value of
1006         * IGC_BASET, as the value writen into the launchtime
1007         * descriptor field may be misinterpreted.
1008         */
1009        div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
1010
1011        return cpu_to_le32(launchtime);
1012}
1013
1014static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1015                            struct igc_tx_buffer *first,
1016                            u32 vlan_macip_lens, u32 type_tucmd,
1017                            u32 mss_l4len_idx)
1018{
1019        struct igc_adv_tx_context_desc *context_desc;
1020        u16 i = tx_ring->next_to_use;
1021
1022        context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1023
1024        i++;
1025        tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1026
1027        /* set bits to identify this as an advanced context descriptor */
1028        type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1029
1030        /* For i225, context index must be unique per ring. */
1031        if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1032                mss_l4len_idx |= tx_ring->reg_idx << 4;
1033
1034        context_desc->vlan_macip_lens   = cpu_to_le32(vlan_macip_lens);
1035        context_desc->type_tucmd_mlhl   = cpu_to_le32(type_tucmd);
1036        context_desc->mss_l4len_idx     = cpu_to_le32(mss_l4len_idx);
1037
1038        /* We assume there is always a valid Tx time available. Invalid times
1039         * should have been handled by the upper layers.
1040         */
1041        if (tx_ring->launchtime_enable) {
1042                struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1043                ktime_t txtime = first->skb->tstamp;
1044
1045                skb_txtime_consumed(first->skb);
1046                context_desc->launch_time = igc_tx_launchtime(adapter,
1047                                                              txtime);
1048        } else {
1049                context_desc->launch_time = 0;
1050        }
1051}
1052
1053static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1054{
1055        struct sk_buff *skb = first->skb;
1056        u32 vlan_macip_lens = 0;
1057        u32 type_tucmd = 0;
1058
1059        if (skb->ip_summed != CHECKSUM_PARTIAL) {
1060csum_failed:
1061                if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1062                    !tx_ring->launchtime_enable)
1063                        return;
1064                goto no_csum;
1065        }
1066
1067        switch (skb->csum_offset) {
1068        case offsetof(struct tcphdr, check):
1069                type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1070                fallthrough;
1071        case offsetof(struct udphdr, check):
1072                break;
1073        case offsetof(struct sctphdr, checksum):
1074                /* validate that this is actually an SCTP request */
1075                if (skb_csum_is_sctp(skb)) {
1076                        type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1077                        break;
1078                }
1079                fallthrough;
1080        default:
1081                skb_checksum_help(skb);
1082                goto csum_failed;
1083        }
1084
1085        /* update TX checksum flag */
1086        first->tx_flags |= IGC_TX_FLAGS_CSUM;
1087        vlan_macip_lens = skb_checksum_start_offset(skb) -
1088                          skb_network_offset(skb);
1089no_csum:
1090        vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1091        vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1092
1093        igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1094}
1095
1096static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1097{
1098        struct net_device *netdev = tx_ring->netdev;
1099
1100        netif_stop_subqueue(netdev, tx_ring->queue_index);
1101
1102        /* memory barriier comment */
1103        smp_mb();
1104
1105        /* We need to check again in a case another CPU has just
1106         * made room available.
1107         */
1108        if (igc_desc_unused(tx_ring) < size)
1109                return -EBUSY;
1110
1111        /* A reprieve! */
1112        netif_wake_subqueue(netdev, tx_ring->queue_index);
1113
1114        u64_stats_update_begin(&tx_ring->tx_syncp2);
1115        tx_ring->tx_stats.restart_queue2++;
1116        u64_stats_update_end(&tx_ring->tx_syncp2);
1117
1118        return 0;
1119}
1120
1121static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1122{
1123        if (igc_desc_unused(tx_ring) >= size)
1124                return 0;
1125        return __igc_maybe_stop_tx(tx_ring, size);
1126}
1127
1128#define IGC_SET_FLAG(_input, _flag, _result) \
1129        (((_flag) <= (_result)) ?                               \
1130         ((u32)((_input) & (_flag)) * ((_result) / (_flag))) :  \
1131         ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1132
1133static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1134{
1135        /* set type for advanced descriptor with frame checksum insertion */
1136        u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1137                       IGC_ADVTXD_DCMD_DEXT |
1138                       IGC_ADVTXD_DCMD_IFCS;
1139
1140        /* set HW vlan bit if vlan is present */
1141        cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1142                                 IGC_ADVTXD_DCMD_VLE);
1143
1144        /* set segmentation bits for TSO */
1145        cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1146                                 (IGC_ADVTXD_DCMD_TSE));
1147
1148        /* set timestamp bit if present */
1149        cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1150                                 (IGC_ADVTXD_MAC_TSTAMP));
1151
1152        /* insert frame checksum */
1153        cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1154
1155        return cmd_type;
1156}
1157
1158static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1159                                 union igc_adv_tx_desc *tx_desc,
1160                                 u32 tx_flags, unsigned int paylen)
1161{
1162        u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1163
1164        /* insert L4 checksum */
1165        olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1166                          ((IGC_TXD_POPTS_TXSM << 8) /
1167                          IGC_TX_FLAGS_CSUM);
1168
1169        /* insert IPv4 checksum */
1170        olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1171                          (((IGC_TXD_POPTS_IXSM << 8)) /
1172                          IGC_TX_FLAGS_IPV4);
1173
1174        tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1175}
1176
1177static int igc_tx_map(struct igc_ring *tx_ring,
1178                      struct igc_tx_buffer *first,
1179                      const u8 hdr_len)
1180{
1181        struct sk_buff *skb = first->skb;
1182        struct igc_tx_buffer *tx_buffer;
1183        union igc_adv_tx_desc *tx_desc;
1184        u32 tx_flags = first->tx_flags;
1185        skb_frag_t *frag;
1186        u16 i = tx_ring->next_to_use;
1187        unsigned int data_len, size;
1188        dma_addr_t dma;
1189        u32 cmd_type;
1190
1191        cmd_type = igc_tx_cmd_type(skb, tx_flags);
1192        tx_desc = IGC_TX_DESC(tx_ring, i);
1193
1194        igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1195
1196        size = skb_headlen(skb);
1197        data_len = skb->data_len;
1198
1199        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1200
1201        tx_buffer = first;
1202
1203        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1204                if (dma_mapping_error(tx_ring->dev, dma))
1205                        goto dma_error;
1206
1207                /* record length, and DMA address */
1208                dma_unmap_len_set(tx_buffer, len, size);
1209                dma_unmap_addr_set(tx_buffer, dma, dma);
1210
1211                tx_desc->read.buffer_addr = cpu_to_le64(dma);
1212
1213                while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1214                        tx_desc->read.cmd_type_len =
1215                                cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1216
1217                        i++;
1218                        tx_desc++;
1219                        if (i == tx_ring->count) {
1220                                tx_desc = IGC_TX_DESC(tx_ring, 0);
1221                                i = 0;
1222                        }
1223                        tx_desc->read.olinfo_status = 0;
1224
1225                        dma += IGC_MAX_DATA_PER_TXD;
1226                        size -= IGC_MAX_DATA_PER_TXD;
1227
1228                        tx_desc->read.buffer_addr = cpu_to_le64(dma);
1229                }
1230
1231                if (likely(!data_len))
1232                        break;
1233
1234                tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1235
1236                i++;
1237                tx_desc++;
1238                if (i == tx_ring->count) {
1239                        tx_desc = IGC_TX_DESC(tx_ring, 0);
1240                        i = 0;
1241                }
1242                tx_desc->read.olinfo_status = 0;
1243
1244                size = skb_frag_size(frag);
1245                data_len -= size;
1246
1247                dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1248                                       size, DMA_TO_DEVICE);
1249
1250                tx_buffer = &tx_ring->tx_buffer_info[i];
1251        }
1252
1253        /* write last descriptor with RS and EOP bits */
1254        cmd_type |= size | IGC_TXD_DCMD;
1255        tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1256
1257        netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1258
1259        /* set the timestamp */
1260        first->time_stamp = jiffies;
1261
1262        skb_tx_timestamp(skb);
1263
1264        /* Force memory writes to complete before letting h/w know there
1265         * are new descriptors to fetch.  (Only applicable for weak-ordered
1266         * memory model archs, such as IA-64).
1267         *
1268         * We also need this memory barrier to make certain all of the
1269         * status bits have been updated before next_to_watch is written.
1270         */
1271        wmb();
1272
1273        /* set next_to_watch value indicating a packet is present */
1274        first->next_to_watch = tx_desc;
1275
1276        i++;
1277        if (i == tx_ring->count)
1278                i = 0;
1279
1280        tx_ring->next_to_use = i;
1281
1282        /* Make sure there is space in the ring for the next send. */
1283        igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1284
1285        if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1286                writel(i, tx_ring->tail);
1287        }
1288
1289        return 0;
1290dma_error:
1291        netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1292        tx_buffer = &tx_ring->tx_buffer_info[i];
1293
1294        /* clear dma mappings for failed tx_buffer_info map */
1295        while (tx_buffer != first) {
1296                if (dma_unmap_len(tx_buffer, len))
1297                        igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1298
1299                if (i-- == 0)
1300                        i += tx_ring->count;
1301                tx_buffer = &tx_ring->tx_buffer_info[i];
1302        }
1303
1304        if (dma_unmap_len(tx_buffer, len))
1305                igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1306
1307        dev_kfree_skb_any(tx_buffer->skb);
1308        tx_buffer->skb = NULL;
1309
1310        tx_ring->next_to_use = i;
1311
1312        return -1;
1313}
1314
1315static int igc_tso(struct igc_ring *tx_ring,
1316                   struct igc_tx_buffer *first,
1317                   u8 *hdr_len)
1318{
1319        u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1320        struct sk_buff *skb = first->skb;
1321        union {
1322                struct iphdr *v4;
1323                struct ipv6hdr *v6;
1324                unsigned char *hdr;
1325        } ip;
1326        union {
1327                struct tcphdr *tcp;
1328                struct udphdr *udp;
1329                unsigned char *hdr;
1330        } l4;
1331        u32 paylen, l4_offset;
1332        int err;
1333
1334        if (skb->ip_summed != CHECKSUM_PARTIAL)
1335                return 0;
1336
1337        if (!skb_is_gso(skb))
1338                return 0;
1339
1340        err = skb_cow_head(skb, 0);
1341        if (err < 0)
1342                return err;
1343
1344        ip.hdr = skb_network_header(skb);
1345        l4.hdr = skb_checksum_start(skb);
1346
1347        /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1348        type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1349
1350        /* initialize outer IP header fields */
1351        if (ip.v4->version == 4) {
1352                unsigned char *csum_start = skb_checksum_start(skb);
1353                unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1354
1355                /* IP header will have to cancel out any data that
1356                 * is not a part of the outer IP header
1357                 */
1358                ip.v4->check = csum_fold(csum_partial(trans_start,
1359                                                      csum_start - trans_start,
1360                                                      0));
1361                type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1362
1363                ip.v4->tot_len = 0;
1364                first->tx_flags |= IGC_TX_FLAGS_TSO |
1365                                   IGC_TX_FLAGS_CSUM |
1366                                   IGC_TX_FLAGS_IPV4;
1367        } else {
1368                ip.v6->payload_len = 0;
1369                first->tx_flags |= IGC_TX_FLAGS_TSO |
1370                                   IGC_TX_FLAGS_CSUM;
1371        }
1372
1373        /* determine offset of inner transport header */
1374        l4_offset = l4.hdr - skb->data;
1375
1376        /* remove payload length from inner checksum */
1377        paylen = skb->len - l4_offset;
1378        if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1379                /* compute length of segmentation header */
1380                *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1381                csum_replace_by_diff(&l4.tcp->check,
1382                                     (__force __wsum)htonl(paylen));
1383        } else {
1384                /* compute length of segmentation header */
1385                *hdr_len = sizeof(*l4.udp) + l4_offset;
1386                csum_replace_by_diff(&l4.udp->check,
1387                                     (__force __wsum)htonl(paylen));
1388        }
1389
1390        /* update gso size and bytecount with header size */
1391        first->gso_segs = skb_shinfo(skb)->gso_segs;
1392        first->bytecount += (first->gso_segs - 1) * *hdr_len;
1393
1394        /* MSS L4LEN IDX */
1395        mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1396        mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1397
1398        /* VLAN MACLEN IPLEN */
1399        vlan_macip_lens = l4.hdr - ip.hdr;
1400        vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1401        vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1402
1403        igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1404                        type_tucmd, mss_l4len_idx);
1405
1406        return 1;
1407}
1408
1409static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1410                                       struct igc_ring *tx_ring)
1411{
1412        u16 count = TXD_USE_COUNT(skb_headlen(skb));
1413        __be16 protocol = vlan_get_protocol(skb);
1414        struct igc_tx_buffer *first;
1415        u32 tx_flags = 0;
1416        unsigned short f;
1417        u8 hdr_len = 0;
1418        int tso = 0;
1419
1420        /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1421         *      + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1422         *      + 2 desc gap to keep tail from touching head,
1423         *      + 1 desc for context descriptor,
1424         * otherwise try next time
1425         */
1426        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1427                count += TXD_USE_COUNT(skb_frag_size(
1428                                                &skb_shinfo(skb)->frags[f]));
1429
1430        if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1431                /* this is a hard error */
1432                return NETDEV_TX_BUSY;
1433        }
1434
1435        /* record the location of the first descriptor for this packet */
1436        first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1437        first->type = IGC_TX_BUFFER_TYPE_SKB;
1438        first->skb = skb;
1439        first->bytecount = skb->len;
1440        first->gso_segs = 1;
1441
1442        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1443                struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1444
1445                /* FIXME: add support for retrieving timestamps from
1446                 * the other timer registers before skipping the
1447                 * timestamping request.
1448                 */
1449                if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1450                    !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1451                                           &adapter->state)) {
1452                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1453                        tx_flags |= IGC_TX_FLAGS_TSTAMP;
1454
1455                        adapter->ptp_tx_skb = skb_get(skb);
1456                        adapter->ptp_tx_start = jiffies;
1457                } else {
1458                        adapter->tx_hwtstamp_skipped++;
1459                }
1460        }
1461
1462        if (skb_vlan_tag_present(skb)) {
1463                tx_flags |= IGC_TX_FLAGS_VLAN;
1464                tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1465        }
1466
1467        /* record initial flags and protocol */
1468        first->tx_flags = tx_flags;
1469        first->protocol = protocol;
1470
1471        tso = igc_tso(tx_ring, first, &hdr_len);
1472        if (tso < 0)
1473                goto out_drop;
1474        else if (!tso)
1475                igc_tx_csum(tx_ring, first);
1476
1477        igc_tx_map(tx_ring, first, hdr_len);
1478
1479        return NETDEV_TX_OK;
1480
1481out_drop:
1482        dev_kfree_skb_any(first->skb);
1483        first->skb = NULL;
1484
1485        return NETDEV_TX_OK;
1486}
1487
1488static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1489                                                    struct sk_buff *skb)
1490{
1491        unsigned int r_idx = skb->queue_mapping;
1492
1493        if (r_idx >= adapter->num_tx_queues)
1494                r_idx = r_idx % adapter->num_tx_queues;
1495
1496        return adapter->tx_ring[r_idx];
1497}
1498
1499static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1500                                  struct net_device *netdev)
1501{
1502        struct igc_adapter *adapter = netdev_priv(netdev);
1503
1504        /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1505         * in order to meet this minimum size requirement.
1506         */
1507        if (skb->len < 17) {
1508                if (skb_padto(skb, 17))
1509                        return NETDEV_TX_OK;
1510                skb->len = 17;
1511        }
1512
1513        return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1514}
1515
1516static void igc_rx_checksum(struct igc_ring *ring,
1517                            union igc_adv_rx_desc *rx_desc,
1518                            struct sk_buff *skb)
1519{
1520        skb_checksum_none_assert(skb);
1521
1522        /* Ignore Checksum bit is set */
1523        if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1524                return;
1525
1526        /* Rx checksum disabled via ethtool */
1527        if (!(ring->netdev->features & NETIF_F_RXCSUM))
1528                return;
1529
1530        /* TCP/UDP checksum error bit is set */
1531        if (igc_test_staterr(rx_desc,
1532                             IGC_RXDEXT_STATERR_L4E |
1533                             IGC_RXDEXT_STATERR_IPE)) {
1534                /* work around errata with sctp packets where the TCPE aka
1535                 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1536                 * packets (aka let the stack check the crc32c)
1537                 */
1538                if (!(skb->len == 60 &&
1539                      test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1540                        u64_stats_update_begin(&ring->rx_syncp);
1541                        ring->rx_stats.csum_err++;
1542                        u64_stats_update_end(&ring->rx_syncp);
1543                }
1544                /* let the stack verify checksum errors */
1545                return;
1546        }
1547        /* It must be a TCP or UDP packet with a valid checksum */
1548        if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1549                                      IGC_RXD_STAT_UDPCS))
1550                skb->ip_summed = CHECKSUM_UNNECESSARY;
1551
1552        netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1553                   le32_to_cpu(rx_desc->wb.upper.status_error));
1554}
1555
1556static inline void igc_rx_hash(struct igc_ring *ring,
1557                               union igc_adv_rx_desc *rx_desc,
1558                               struct sk_buff *skb)
1559{
1560        if (ring->netdev->features & NETIF_F_RXHASH)
1561                skb_set_hash(skb,
1562                             le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1563                             PKT_HASH_TYPE_L3);
1564}
1565
1566static void igc_rx_vlan(struct igc_ring *rx_ring,
1567                        union igc_adv_rx_desc *rx_desc,
1568                        struct sk_buff *skb)
1569{
1570        struct net_device *dev = rx_ring->netdev;
1571        u16 vid;
1572
1573        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1574            igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1575                if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1576                    test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1577                        vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1578                else
1579                        vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1580
1581                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1582        }
1583}
1584
1585/**
1586 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1587 * @rx_ring: rx descriptor ring packet is being transacted on
1588 * @rx_desc: pointer to the EOP Rx descriptor
1589 * @skb: pointer to current skb being populated
1590 *
1591 * This function checks the ring, descriptor, and packet information in order
1592 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1593 * skb.
1594 */
1595static void igc_process_skb_fields(struct igc_ring *rx_ring,
1596                                   union igc_adv_rx_desc *rx_desc,
1597                                   struct sk_buff *skb)
1598{
1599        igc_rx_hash(rx_ring, rx_desc, skb);
1600
1601        igc_rx_checksum(rx_ring, rx_desc, skb);
1602
1603        igc_rx_vlan(rx_ring, rx_desc, skb);
1604
1605        skb_record_rx_queue(skb, rx_ring->queue_index);
1606
1607        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1608}
1609
1610static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1611{
1612        bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1613        struct igc_adapter *adapter = netdev_priv(netdev);
1614        struct igc_hw *hw = &adapter->hw;
1615        u32 ctrl;
1616
1617        ctrl = rd32(IGC_CTRL);
1618
1619        if (enable) {
1620                /* enable VLAN tag insert/strip */
1621                ctrl |= IGC_CTRL_VME;
1622        } else {
1623                /* disable VLAN tag insert/strip */
1624                ctrl &= ~IGC_CTRL_VME;
1625        }
1626        wr32(IGC_CTRL, ctrl);
1627}
1628
1629static void igc_restore_vlan(struct igc_adapter *adapter)
1630{
1631        igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1632}
1633
1634static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1635                                               const unsigned int size,
1636                                               int *rx_buffer_pgcnt)
1637{
1638        struct igc_rx_buffer *rx_buffer;
1639
1640        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1641        *rx_buffer_pgcnt =
1642#if (PAGE_SIZE < 8192)
1643                page_count(rx_buffer->page);
1644#else
1645                0;
1646#endif
1647        prefetchw(rx_buffer->page);
1648
1649        /* we are reusing so sync this buffer for CPU use */
1650        dma_sync_single_range_for_cpu(rx_ring->dev,
1651                                      rx_buffer->dma,
1652                                      rx_buffer->page_offset,
1653                                      size,
1654                                      DMA_FROM_DEVICE);
1655
1656        rx_buffer->pagecnt_bias--;
1657
1658        return rx_buffer;
1659}
1660
1661static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1662                               unsigned int truesize)
1663{
1664#if (PAGE_SIZE < 8192)
1665        buffer->page_offset ^= truesize;
1666#else
1667        buffer->page_offset += truesize;
1668#endif
1669}
1670
1671static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1672                                              unsigned int size)
1673{
1674        unsigned int truesize;
1675
1676#if (PAGE_SIZE < 8192)
1677        truesize = igc_rx_pg_size(ring) / 2;
1678#else
1679        truesize = ring_uses_build_skb(ring) ?
1680                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1681                   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1682                   SKB_DATA_ALIGN(size);
1683#endif
1684        return truesize;
1685}
1686
1687/**
1688 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1689 * @rx_ring: rx descriptor ring to transact packets on
1690 * @rx_buffer: buffer containing page to add
1691 * @skb: sk_buff to place the data into
1692 * @size: size of buffer to be added
1693 *
1694 * This function will add the data contained in rx_buffer->page to the skb.
1695 */
1696static void igc_add_rx_frag(struct igc_ring *rx_ring,
1697                            struct igc_rx_buffer *rx_buffer,
1698                            struct sk_buff *skb,
1699                            unsigned int size)
1700{
1701        unsigned int truesize;
1702
1703#if (PAGE_SIZE < 8192)
1704        truesize = igc_rx_pg_size(rx_ring) / 2;
1705#else
1706        truesize = ring_uses_build_skb(rx_ring) ?
1707                   SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1708                   SKB_DATA_ALIGN(size);
1709#endif
1710        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1711                        rx_buffer->page_offset, size, truesize);
1712
1713        igc_rx_buffer_flip(rx_buffer, truesize);
1714}
1715
1716static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1717                                     struct igc_rx_buffer *rx_buffer,
1718                                     union igc_adv_rx_desc *rx_desc,
1719                                     unsigned int size)
1720{
1721        void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1722        unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1723        struct sk_buff *skb;
1724
1725        /* prefetch first cache line of first page */
1726        net_prefetch(va);
1727
1728        /* build an skb around the page buffer */
1729        skb = build_skb(va - IGC_SKB_PAD, truesize);
1730        if (unlikely(!skb))
1731                return NULL;
1732
1733        /* update pointers within the skb to store the data */
1734        skb_reserve(skb, IGC_SKB_PAD);
1735        __skb_put(skb, size);
1736
1737        igc_rx_buffer_flip(rx_buffer, truesize);
1738        return skb;
1739}
1740
1741static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1742                                         struct igc_rx_buffer *rx_buffer,
1743                                         struct xdp_buff *xdp,
1744                                         ktime_t timestamp)
1745{
1746        unsigned int size = xdp->data_end - xdp->data;
1747        unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1748        void *va = xdp->data;
1749        unsigned int headlen;
1750        struct sk_buff *skb;
1751
1752        /* prefetch first cache line of first page */
1753        net_prefetch(va);
1754
1755        /* allocate a skb to store the frags */
1756        skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1757        if (unlikely(!skb))
1758                return NULL;
1759
1760        if (timestamp)
1761                skb_hwtstamps(skb)->hwtstamp = timestamp;
1762
1763        /* Determine available headroom for copy */
1764        headlen = size;
1765        if (headlen > IGC_RX_HDR_LEN)
1766                headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1767
1768        /* align pull length to size of long to optimize memcpy performance */
1769        memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1770
1771        /* update all of the pointers */
1772        size -= headlen;
1773        if (size) {
1774                skb_add_rx_frag(skb, 0, rx_buffer->page,
1775                                (va + headlen) - page_address(rx_buffer->page),
1776                                size, truesize);
1777                igc_rx_buffer_flip(rx_buffer, truesize);
1778        } else {
1779                rx_buffer->pagecnt_bias++;
1780        }
1781
1782        return skb;
1783}
1784
1785/**
1786 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1787 * @rx_ring: rx descriptor ring to store buffers on
1788 * @old_buff: donor buffer to have page reused
1789 *
1790 * Synchronizes page for reuse by the adapter
1791 */
1792static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1793                              struct igc_rx_buffer *old_buff)
1794{
1795        u16 nta = rx_ring->next_to_alloc;
1796        struct igc_rx_buffer *new_buff;
1797
1798        new_buff = &rx_ring->rx_buffer_info[nta];
1799
1800        /* update, and store next to alloc */
1801        nta++;
1802        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1803
1804        /* Transfer page from old buffer to new buffer.
1805         * Move each member individually to avoid possible store
1806         * forwarding stalls.
1807         */
1808        new_buff->dma           = old_buff->dma;
1809        new_buff->page          = old_buff->page;
1810        new_buff->page_offset   = old_buff->page_offset;
1811        new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
1812}
1813
1814static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1815                                  int rx_buffer_pgcnt)
1816{
1817        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1818        struct page *page = rx_buffer->page;
1819
1820        /* avoid re-using remote and pfmemalloc pages */
1821        if (!dev_page_is_reusable(page))
1822                return false;
1823
1824#if (PAGE_SIZE < 8192)
1825        /* if we are only owner of page we can reuse it */
1826        if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1827                return false;
1828#else
1829#define IGC_LAST_OFFSET \
1830        (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1831
1832        if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1833                return false;
1834#endif
1835
1836        /* If we have drained the page fragment pool we need to update
1837         * the pagecnt_bias and page count so that we fully restock the
1838         * number of references the driver holds.
1839         */
1840        if (unlikely(pagecnt_bias == 1)) {
1841                page_ref_add(page, USHRT_MAX - 1);
1842                rx_buffer->pagecnt_bias = USHRT_MAX;
1843        }
1844
1845        return true;
1846}
1847
1848/**
1849 * igc_is_non_eop - process handling of non-EOP buffers
1850 * @rx_ring: Rx ring being processed
1851 * @rx_desc: Rx descriptor for current buffer
1852 *
1853 * This function updates next to clean.  If the buffer is an EOP buffer
1854 * this function exits returning false, otherwise it will place the
1855 * sk_buff in the next buffer to be chained and return true indicating
1856 * that this is in fact a non-EOP buffer.
1857 */
1858static bool igc_is_non_eop(struct igc_ring *rx_ring,
1859                           union igc_adv_rx_desc *rx_desc)
1860{
1861        u32 ntc = rx_ring->next_to_clean + 1;
1862
1863        /* fetch, update, and store next to clean */
1864        ntc = (ntc < rx_ring->count) ? ntc : 0;
1865        rx_ring->next_to_clean = ntc;
1866
1867        prefetch(IGC_RX_DESC(rx_ring, ntc));
1868
1869        if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1870                return false;
1871
1872        return true;
1873}
1874
1875/**
1876 * igc_cleanup_headers - Correct corrupted or empty headers
1877 * @rx_ring: rx descriptor ring packet is being transacted on
1878 * @rx_desc: pointer to the EOP Rx descriptor
1879 * @skb: pointer to current skb being fixed
1880 *
1881 * Address the case where we are pulling data in on pages only
1882 * and as such no data is present in the skb header.
1883 *
1884 * In addition if skb is not at least 60 bytes we need to pad it so that
1885 * it is large enough to qualify as a valid Ethernet frame.
1886 *
1887 * Returns true if an error was encountered and skb was freed.
1888 */
1889static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1890                                union igc_adv_rx_desc *rx_desc,
1891                                struct sk_buff *skb)
1892{
1893        /* XDP packets use error pointer so abort at this point */
1894        if (IS_ERR(skb))
1895                return true;
1896
1897        if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1898                struct net_device *netdev = rx_ring->netdev;
1899
1900                if (!(netdev->features & NETIF_F_RXALL)) {
1901                        dev_kfree_skb_any(skb);
1902                        return true;
1903                }
1904        }
1905
1906        /* if eth_skb_pad returns an error the skb was freed */
1907        if (eth_skb_pad(skb))
1908                return true;
1909
1910        return false;
1911}
1912
1913static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1914                              struct igc_rx_buffer *rx_buffer,
1915                              int rx_buffer_pgcnt)
1916{
1917        if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
1918                /* hand second half of page back to the ring */
1919                igc_reuse_rx_page(rx_ring, rx_buffer);
1920        } else {
1921                /* We are not reusing the buffer so unmap it and free
1922                 * any references we are holding to it
1923                 */
1924                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1925                                     igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1926                                     IGC_RX_DMA_ATTR);
1927                __page_frag_cache_drain(rx_buffer->page,
1928                                        rx_buffer->pagecnt_bias);
1929        }
1930
1931        /* clear contents of rx_buffer */
1932        rx_buffer->page = NULL;
1933}
1934
1935static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1936{
1937        struct igc_adapter *adapter = rx_ring->q_vector->adapter;
1938
1939        if (ring_uses_build_skb(rx_ring))
1940                return IGC_SKB_PAD;
1941        if (igc_xdp_is_enabled(adapter))
1942                return XDP_PACKET_HEADROOM;
1943
1944        return 0;
1945}
1946
1947static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1948                                  struct igc_rx_buffer *bi)
1949{
1950        struct page *page = bi->page;
1951        dma_addr_t dma;
1952
1953        /* since we are recycling buffers we should seldom need to alloc */
1954        if (likely(page))
1955                return true;
1956
1957        /* alloc new page for storage */
1958        page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1959        if (unlikely(!page)) {
1960                rx_ring->rx_stats.alloc_failed++;
1961                return false;
1962        }
1963
1964        /* map page for use */
1965        dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1966                                 igc_rx_pg_size(rx_ring),
1967                                 DMA_FROM_DEVICE,
1968                                 IGC_RX_DMA_ATTR);
1969
1970        /* if mapping failed free memory back to system since
1971         * there isn't much point in holding memory we can't use
1972         */
1973        if (dma_mapping_error(rx_ring->dev, dma)) {
1974                __free_page(page);
1975
1976                rx_ring->rx_stats.alloc_failed++;
1977                return false;
1978        }
1979
1980        bi->dma = dma;
1981        bi->page = page;
1982        bi->page_offset = igc_rx_offset(rx_ring);
1983        page_ref_add(page, USHRT_MAX - 1);
1984        bi->pagecnt_bias = USHRT_MAX;
1985
1986        return true;
1987}
1988
1989/**
1990 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1991 * @rx_ring: rx descriptor ring
1992 * @cleaned_count: number of buffers to clean
1993 */
1994static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1995{
1996        union igc_adv_rx_desc *rx_desc;
1997        u16 i = rx_ring->next_to_use;
1998        struct igc_rx_buffer *bi;
1999        u16 bufsz;
2000
2001        /* nothing to do */
2002        if (!cleaned_count)
2003                return;
2004
2005        rx_desc = IGC_RX_DESC(rx_ring, i);
2006        bi = &rx_ring->rx_buffer_info[i];
2007        i -= rx_ring->count;
2008
2009        bufsz = igc_rx_bufsz(rx_ring);
2010
2011        do {
2012                if (!igc_alloc_mapped_page(rx_ring, bi))
2013                        break;
2014
2015                /* sync the buffer for use by the device */
2016                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2017                                                 bi->page_offset, bufsz,
2018                                                 DMA_FROM_DEVICE);
2019
2020                /* Refresh the desc even if buffer_addrs didn't change
2021                 * because each write-back erases this info.
2022                 */
2023                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2024
2025                rx_desc++;
2026                bi++;
2027                i++;
2028                if (unlikely(!i)) {
2029                        rx_desc = IGC_RX_DESC(rx_ring, 0);
2030                        bi = rx_ring->rx_buffer_info;
2031                        i -= rx_ring->count;
2032                }
2033
2034                /* clear the length for the next_to_use descriptor */
2035                rx_desc->wb.upper.length = 0;
2036
2037                cleaned_count--;
2038        } while (cleaned_count);
2039
2040        i += rx_ring->count;
2041
2042        if (rx_ring->next_to_use != i) {
2043                /* record the next descriptor to use */
2044                rx_ring->next_to_use = i;
2045
2046                /* update next to alloc since we have filled the ring */
2047                rx_ring->next_to_alloc = i;
2048
2049                /* Force memory writes to complete before letting h/w
2050                 * know there are new descriptors to fetch.  (Only
2051                 * applicable for weak-ordered memory model archs,
2052                 * such as IA-64).
2053                 */
2054                wmb();
2055                writel(i, rx_ring->tail);
2056        }
2057}
2058
2059static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2060{
2061        union igc_adv_rx_desc *desc;
2062        u16 i = ring->next_to_use;
2063        struct igc_rx_buffer *bi;
2064        dma_addr_t dma;
2065        bool ok = true;
2066
2067        if (!count)
2068                return ok;
2069
2070        desc = IGC_RX_DESC(ring, i);
2071        bi = &ring->rx_buffer_info[i];
2072        i -= ring->count;
2073
2074        do {
2075                bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2076                if (!bi->xdp) {
2077                        ok = false;
2078                        break;
2079                }
2080
2081                dma = xsk_buff_xdp_get_dma(bi->xdp);
2082                desc->read.pkt_addr = cpu_to_le64(dma);
2083
2084                desc++;
2085                bi++;
2086                i++;
2087                if (unlikely(!i)) {
2088                        desc = IGC_RX_DESC(ring, 0);
2089                        bi = ring->rx_buffer_info;
2090                        i -= ring->count;
2091                }
2092
2093                /* Clear the length for the next_to_use descriptor. */
2094                desc->wb.upper.length = 0;
2095
2096                count--;
2097        } while (count);
2098
2099        i += ring->count;
2100
2101        if (ring->next_to_use != i) {
2102                ring->next_to_use = i;
2103
2104                /* Force memory writes to complete before letting h/w
2105                 * know there are new descriptors to fetch.  (Only
2106                 * applicable for weak-ordered memory model archs,
2107                 * such as IA-64).
2108                 */
2109                wmb();
2110                writel(i, ring->tail);
2111        }
2112
2113        return ok;
2114}
2115
2116static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2117                                  struct xdp_frame *xdpf,
2118                                  struct igc_ring *ring)
2119{
2120        dma_addr_t dma;
2121
2122        dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2123        if (dma_mapping_error(ring->dev, dma)) {
2124                netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2125                return -ENOMEM;
2126        }
2127
2128        buffer->type = IGC_TX_BUFFER_TYPE_XDP;
2129        buffer->xdpf = xdpf;
2130        buffer->protocol = 0;
2131        buffer->bytecount = xdpf->len;
2132        buffer->gso_segs = 1;
2133        buffer->time_stamp = jiffies;
2134        dma_unmap_len_set(buffer, len, xdpf->len);
2135        dma_unmap_addr_set(buffer, dma, dma);
2136        return 0;
2137}
2138
2139/* This function requires __netif_tx_lock is held by the caller. */
2140static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2141                                      struct xdp_frame *xdpf)
2142{
2143        struct igc_tx_buffer *buffer;
2144        union igc_adv_tx_desc *desc;
2145        u32 cmd_type, olinfo_status;
2146        int err;
2147
2148        if (!igc_desc_unused(ring))
2149                return -EBUSY;
2150
2151        buffer = &ring->tx_buffer_info[ring->next_to_use];
2152        err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2153        if (err)
2154                return err;
2155
2156        cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2157                   IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2158                   buffer->bytecount;
2159        olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2160
2161        desc = IGC_TX_DESC(ring, ring->next_to_use);
2162        desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2163        desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2164        desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
2165
2166        netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2167
2168        buffer->next_to_watch = desc;
2169
2170        ring->next_to_use++;
2171        if (ring->next_to_use == ring->count)
2172                ring->next_to_use = 0;
2173
2174        return 0;
2175}
2176
2177static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2178                                            int cpu)
2179{
2180        int index = cpu;
2181
2182        if (unlikely(index < 0))
2183                index = 0;
2184
2185        while (index >= adapter->num_tx_queues)
2186                index -= adapter->num_tx_queues;
2187
2188        return adapter->tx_ring[index];
2189}
2190
2191static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2192{
2193        struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2194        int cpu = smp_processor_id();
2195        struct netdev_queue *nq;
2196        struct igc_ring *ring;
2197        int res;
2198
2199        if (unlikely(!xdpf))
2200                return -EFAULT;
2201
2202        ring = igc_xdp_get_tx_ring(adapter, cpu);
2203        nq = txring_txq(ring);
2204
2205        __netif_tx_lock(nq, cpu);
2206        res = igc_xdp_init_tx_descriptor(ring, xdpf);
2207        __netif_tx_unlock(nq);
2208        return res;
2209}
2210
2211/* This function assumes rcu_read_lock() is held by the caller. */
2212static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2213                              struct bpf_prog *prog,
2214                              struct xdp_buff *xdp)
2215{
2216        u32 act = bpf_prog_run_xdp(prog, xdp);
2217
2218        switch (act) {
2219        case XDP_PASS:
2220                return IGC_XDP_PASS;
2221        case XDP_TX:
2222                if (igc_xdp_xmit_back(adapter, xdp) < 0)
2223                        goto out_failure;
2224                return IGC_XDP_TX;
2225        case XDP_REDIRECT:
2226                if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2227                        goto out_failure;
2228                return IGC_XDP_REDIRECT;
2229                break;
2230        default:
2231                bpf_warn_invalid_xdp_action(act);
2232                fallthrough;
2233        case XDP_ABORTED:
2234out_failure:
2235                trace_xdp_exception(adapter->netdev, prog, act);
2236                fallthrough;
2237        case XDP_DROP:
2238                return IGC_XDP_CONSUMED;
2239        }
2240}
2241
2242static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2243                                        struct xdp_buff *xdp)
2244{
2245        struct bpf_prog *prog;
2246        int res;
2247
2248        prog = READ_ONCE(adapter->xdp_prog);
2249        if (!prog) {
2250                res = IGC_XDP_PASS;
2251                goto out;
2252        }
2253
2254        res = __igc_xdp_run_prog(adapter, prog, xdp);
2255
2256out:
2257        return ERR_PTR(-res);
2258}
2259
2260/* This function assumes __netif_tx_lock is held by the caller. */
2261static void igc_flush_tx_descriptors(struct igc_ring *ring)
2262{
2263        /* Once tail pointer is updated, hardware can fetch the descriptors
2264         * any time so we issue a write membar here to ensure all memory
2265         * writes are complete before the tail pointer is updated.
2266         */
2267        wmb();
2268        writel(ring->next_to_use, ring->tail);
2269}
2270
2271static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2272{
2273        int cpu = smp_processor_id();
2274        struct netdev_queue *nq;
2275        struct igc_ring *ring;
2276
2277        if (status & IGC_XDP_TX) {
2278                ring = igc_xdp_get_tx_ring(adapter, cpu);
2279                nq = txring_txq(ring);
2280
2281                __netif_tx_lock(nq, cpu);
2282                igc_flush_tx_descriptors(ring);
2283                __netif_tx_unlock(nq);
2284        }
2285
2286        if (status & IGC_XDP_REDIRECT)
2287                xdp_do_flush();
2288}
2289
2290static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2291                                unsigned int packets, unsigned int bytes)
2292{
2293        struct igc_ring *ring = q_vector->rx.ring;
2294
2295        u64_stats_update_begin(&ring->rx_syncp);
2296        ring->rx_stats.packets += packets;
2297        ring->rx_stats.bytes += bytes;
2298        u64_stats_update_end(&ring->rx_syncp);
2299
2300        q_vector->rx.total_packets += packets;
2301        q_vector->rx.total_bytes += bytes;
2302}
2303
2304static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2305{
2306        unsigned int total_bytes = 0, total_packets = 0;
2307        struct igc_adapter *adapter = q_vector->adapter;
2308        struct igc_ring *rx_ring = q_vector->rx.ring;
2309        struct sk_buff *skb = rx_ring->skb;
2310        u16 cleaned_count = igc_desc_unused(rx_ring);
2311        int xdp_status = 0, rx_buffer_pgcnt;
2312
2313        while (likely(total_packets < budget)) {
2314                union igc_adv_rx_desc *rx_desc;
2315                struct igc_rx_buffer *rx_buffer;
2316                unsigned int size, truesize;
2317                ktime_t timestamp = 0;
2318                struct xdp_buff xdp;
2319                int pkt_offset = 0;
2320                void *pktbuf;
2321
2322                /* return some buffers to hardware, one at a time is too slow */
2323                if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2324                        igc_alloc_rx_buffers(rx_ring, cleaned_count);
2325                        cleaned_count = 0;
2326                }
2327
2328                rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2329                size = le16_to_cpu(rx_desc->wb.upper.length);
2330                if (!size)
2331                        break;
2332
2333                /* This memory barrier is needed to keep us from reading
2334                 * any other fields out of the rx_desc until we know the
2335                 * descriptor has been written back
2336                 */
2337                dma_rmb();
2338
2339                rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2340                truesize = igc_get_rx_frame_truesize(rx_ring, size);
2341
2342                pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2343
2344                if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2345                        timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2346                                                        pktbuf);
2347                        pkt_offset = IGC_TS_HDR_LEN;
2348                        size -= IGC_TS_HDR_LEN;
2349                }
2350
2351                if (!skb) {
2352                        xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2353                        xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2354                                         igc_rx_offset(rx_ring) + pkt_offset, size, false);
2355
2356                        skb = igc_xdp_run_prog(adapter, &xdp);
2357                }
2358
2359                if (IS_ERR(skb)) {
2360                        unsigned int xdp_res = -PTR_ERR(skb);
2361
2362                        switch (xdp_res) {
2363                        case IGC_XDP_CONSUMED:
2364                                rx_buffer->pagecnt_bias++;
2365                                break;
2366                        case IGC_XDP_TX:
2367                        case IGC_XDP_REDIRECT:
2368                                igc_rx_buffer_flip(rx_buffer, truesize);
2369                                xdp_status |= xdp_res;
2370                                break;
2371                        }
2372
2373                        total_packets++;
2374                        total_bytes += size;
2375                } else if (skb)
2376                        igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2377                else if (ring_uses_build_skb(rx_ring))
2378                        skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2379                else
2380                        skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2381                                                timestamp);
2382
2383                /* exit if we failed to retrieve a buffer */
2384                if (!skb) {
2385                        rx_ring->rx_stats.alloc_failed++;
2386                        rx_buffer->pagecnt_bias++;
2387                        break;
2388                }
2389
2390                igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2391                cleaned_count++;
2392
2393                /* fetch next buffer in frame if non-eop */
2394                if (igc_is_non_eop(rx_ring, rx_desc))
2395                        continue;
2396
2397                /* verify the packet layout is correct */
2398                if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2399                        skb = NULL;
2400                        continue;
2401                }
2402
2403                /* probably a little skewed due to removing CRC */
2404                total_bytes += skb->len;
2405
2406                /* populate checksum, VLAN, and protocol */
2407                igc_process_skb_fields(rx_ring, rx_desc, skb);
2408
2409                napi_gro_receive(&q_vector->napi, skb);
2410
2411                /* reset skb pointer */
2412                skb = NULL;
2413
2414                /* update budget accounting */
2415                total_packets++;
2416        }
2417
2418        if (xdp_status)
2419                igc_finalize_xdp(adapter, xdp_status);
2420
2421        /* place incomplete frames back on ring for completion */
2422        rx_ring->skb = skb;
2423
2424        igc_update_rx_stats(q_vector, total_packets, total_bytes);
2425
2426        if (cleaned_count)
2427                igc_alloc_rx_buffers(rx_ring, cleaned_count);
2428
2429        return total_packets;
2430}
2431
2432static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2433                                            struct xdp_buff *xdp)
2434{
2435        unsigned int metasize = xdp->data - xdp->data_meta;
2436        unsigned int datasize = xdp->data_end - xdp->data;
2437        unsigned int totalsize = metasize + datasize;
2438        struct sk_buff *skb;
2439
2440        skb = __napi_alloc_skb(&ring->q_vector->napi,
2441                               xdp->data_end - xdp->data_hard_start,
2442                               GFP_ATOMIC | __GFP_NOWARN);
2443        if (unlikely(!skb))
2444                return NULL;
2445
2446        skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
2447        memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
2448        if (metasize)
2449                skb_metadata_set(skb, metasize);
2450
2451        return skb;
2452}
2453
2454static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2455                                union igc_adv_rx_desc *desc,
2456                                struct xdp_buff *xdp,
2457                                ktime_t timestamp)
2458{
2459        struct igc_ring *ring = q_vector->rx.ring;
2460        struct sk_buff *skb;
2461
2462        skb = igc_construct_skb_zc(ring, xdp);
2463        if (!skb) {
2464                ring->rx_stats.alloc_failed++;
2465                return;
2466        }
2467
2468        if (timestamp)
2469                skb_hwtstamps(skb)->hwtstamp = timestamp;
2470
2471        if (igc_cleanup_headers(ring, desc, skb))
2472                return;
2473
2474        igc_process_skb_fields(ring, desc, skb);
2475        napi_gro_receive(&q_vector->napi, skb);
2476}
2477
2478static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2479{
2480        struct igc_adapter *adapter = q_vector->adapter;
2481        struct igc_ring *ring = q_vector->rx.ring;
2482        u16 cleaned_count = igc_desc_unused(ring);
2483        int total_bytes = 0, total_packets = 0;
2484        u16 ntc = ring->next_to_clean;
2485        struct bpf_prog *prog;
2486        bool failure = false;
2487        int xdp_status = 0;
2488
2489        rcu_read_lock();
2490
2491        prog = READ_ONCE(adapter->xdp_prog);
2492
2493        while (likely(total_packets < budget)) {
2494                union igc_adv_rx_desc *desc;
2495                struct igc_rx_buffer *bi;
2496                ktime_t timestamp = 0;
2497                unsigned int size;
2498                int res;
2499
2500                desc = IGC_RX_DESC(ring, ntc);
2501                size = le16_to_cpu(desc->wb.upper.length);
2502                if (!size)
2503                        break;
2504
2505                /* This memory barrier is needed to keep us from reading
2506                 * any other fields out of the rx_desc until we know the
2507                 * descriptor has been written back
2508                 */
2509                dma_rmb();
2510
2511                bi = &ring->rx_buffer_info[ntc];
2512
2513                if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2514                        timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2515                                                        bi->xdp->data);
2516
2517                        bi->xdp->data += IGC_TS_HDR_LEN;
2518
2519                        /* HW timestamp has been copied into local variable. Metadata
2520                         * length when XDP program is called should be 0.
2521                         */
2522                        bi->xdp->data_meta += IGC_TS_HDR_LEN;
2523                        size -= IGC_TS_HDR_LEN;
2524                }
2525
2526                bi->xdp->data_end = bi->xdp->data + size;
2527                xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2528
2529                res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2530                switch (res) {
2531                case IGC_XDP_PASS:
2532                        igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2533                        fallthrough;
2534                case IGC_XDP_CONSUMED:
2535                        xsk_buff_free(bi->xdp);
2536                        break;
2537                case IGC_XDP_TX:
2538                case IGC_XDP_REDIRECT:
2539                        xdp_status |= res;
2540                        break;
2541                }
2542
2543                bi->xdp = NULL;
2544                total_bytes += size;
2545                total_packets++;
2546                cleaned_count++;
2547                ntc++;
2548                if (ntc == ring->count)
2549                        ntc = 0;
2550        }
2551
2552        ring->next_to_clean = ntc;
2553        rcu_read_unlock();
2554
2555        if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2556                failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2557
2558        if (xdp_status)
2559                igc_finalize_xdp(adapter, xdp_status);
2560
2561        igc_update_rx_stats(q_vector, total_packets, total_bytes);
2562
2563        if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2564                if (failure || ring->next_to_clean == ring->next_to_use)
2565                        xsk_set_rx_need_wakeup(ring->xsk_pool);
2566                else
2567                        xsk_clear_rx_need_wakeup(ring->xsk_pool);
2568                return total_packets;
2569        }
2570
2571        return failure ? budget : total_packets;
2572}
2573
2574static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2575                                unsigned int packets, unsigned int bytes)
2576{
2577        struct igc_ring *ring = q_vector->tx.ring;
2578
2579        u64_stats_update_begin(&ring->tx_syncp);
2580        ring->tx_stats.bytes += bytes;
2581        ring->tx_stats.packets += packets;
2582        u64_stats_update_end(&ring->tx_syncp);
2583
2584        q_vector->tx.total_bytes += bytes;
2585        q_vector->tx.total_packets += packets;
2586}
2587
2588static void igc_xdp_xmit_zc(struct igc_ring *ring)
2589{
2590        struct xsk_buff_pool *pool = ring->xsk_pool;
2591        struct netdev_queue *nq = txring_txq(ring);
2592        union igc_adv_tx_desc *tx_desc = NULL;
2593        int cpu = smp_processor_id();
2594        u16 ntu = ring->next_to_use;
2595        struct xdp_desc xdp_desc;
2596        u16 budget;
2597
2598        if (!netif_carrier_ok(ring->netdev))
2599                return;
2600
2601        __netif_tx_lock(nq, cpu);
2602
2603        budget = igc_desc_unused(ring);
2604
2605        while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2606                u32 cmd_type, olinfo_status;
2607                struct igc_tx_buffer *bi;
2608                dma_addr_t dma;
2609
2610                cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2611                           IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2612                           xdp_desc.len;
2613                olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2614
2615                dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2616                xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2617
2618                tx_desc = IGC_TX_DESC(ring, ntu);
2619                tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2620                tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2621                tx_desc->read.buffer_addr = cpu_to_le64(dma);
2622
2623                bi = &ring->tx_buffer_info[ntu];
2624                bi->type = IGC_TX_BUFFER_TYPE_XSK;
2625                bi->protocol = 0;
2626                bi->bytecount = xdp_desc.len;
2627                bi->gso_segs = 1;
2628                bi->time_stamp = jiffies;
2629                bi->next_to_watch = tx_desc;
2630
2631                netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2632
2633                ntu++;
2634                if (ntu == ring->count)
2635                        ntu = 0;
2636        }
2637
2638        ring->next_to_use = ntu;
2639        if (tx_desc) {
2640                igc_flush_tx_descriptors(ring);
2641                xsk_tx_release(pool);
2642        }
2643
2644        __netif_tx_unlock(nq);
2645}
2646
2647/**
2648 * igc_clean_tx_irq - Reclaim resources after transmit completes
2649 * @q_vector: pointer to q_vector containing needed info
2650 * @napi_budget: Used to determine if we are in netpoll
2651 *
2652 * returns true if ring is completely cleaned
2653 */
2654static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2655{
2656        struct igc_adapter *adapter = q_vector->adapter;
2657        unsigned int total_bytes = 0, total_packets = 0;
2658        unsigned int budget = q_vector->tx.work_limit;
2659        struct igc_ring *tx_ring = q_vector->tx.ring;
2660        unsigned int i = tx_ring->next_to_clean;
2661        struct igc_tx_buffer *tx_buffer;
2662        union igc_adv_tx_desc *tx_desc;
2663        u32 xsk_frames = 0;
2664
2665        if (test_bit(__IGC_DOWN, &adapter->state))
2666                return true;
2667
2668        tx_buffer = &tx_ring->tx_buffer_info[i];
2669        tx_desc = IGC_TX_DESC(tx_ring, i);
2670        i -= tx_ring->count;
2671
2672        do {
2673                union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2674
2675                /* if next_to_watch is not set then there is no work pending */
2676                if (!eop_desc)
2677                        break;
2678
2679                /* prevent any other reads prior to eop_desc */
2680                smp_rmb();
2681
2682                /* if DD is not set pending work has not been completed */
2683                if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2684                        break;
2685
2686                /* clear next_to_watch to prevent false hangs */
2687                tx_buffer->next_to_watch = NULL;
2688
2689                /* update the statistics for this packet */
2690                total_bytes += tx_buffer->bytecount;
2691                total_packets += tx_buffer->gso_segs;
2692
2693                switch (tx_buffer->type) {
2694                case IGC_TX_BUFFER_TYPE_XSK:
2695                        xsk_frames++;
2696                        break;
2697                case IGC_TX_BUFFER_TYPE_XDP:
2698                        xdp_return_frame(tx_buffer->xdpf);
2699                        igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2700                        break;
2701                case IGC_TX_BUFFER_TYPE_SKB:
2702                        napi_consume_skb(tx_buffer->skb, napi_budget);
2703                        igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2704                        break;
2705                default:
2706                        netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2707                        break;
2708                }
2709
2710                /* clear last DMA location and unmap remaining buffers */
2711                while (tx_desc != eop_desc) {
2712                        tx_buffer++;
2713                        tx_desc++;
2714                        i++;
2715                        if (unlikely(!i)) {
2716                                i -= tx_ring->count;
2717                                tx_buffer = tx_ring->tx_buffer_info;
2718                                tx_desc = IGC_TX_DESC(tx_ring, 0);
2719                        }
2720
2721                        /* unmap any remaining paged data */
2722                        if (dma_unmap_len(tx_buffer, len))
2723                                igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2724                }
2725
2726                /* move us one more past the eop_desc for start of next pkt */
2727                tx_buffer++;
2728                tx_desc++;
2729                i++;
2730                if (unlikely(!i)) {
2731                        i -= tx_ring->count;
2732                        tx_buffer = tx_ring->tx_buffer_info;
2733                        tx_desc = IGC_TX_DESC(tx_ring, 0);
2734                }
2735
2736                /* issue prefetch for next Tx descriptor */
2737                prefetch(tx_desc);
2738
2739                /* update budget accounting */
2740                budget--;
2741        } while (likely(budget));
2742
2743        netdev_tx_completed_queue(txring_txq(tx_ring),
2744                                  total_packets, total_bytes);
2745
2746        i += tx_ring->count;
2747        tx_ring->next_to_clean = i;
2748
2749        igc_update_tx_stats(q_vector, total_packets, total_bytes);
2750
2751        if (tx_ring->xsk_pool) {
2752                if (xsk_frames)
2753                        xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2754                if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2755                        xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2756                igc_xdp_xmit_zc(tx_ring);
2757        }
2758
2759        if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2760                struct igc_hw *hw = &adapter->hw;
2761
2762                /* Detect a transmit hang in hardware, this serializes the
2763                 * check with the clearing of time_stamp and movement of i
2764                 */
2765                clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2766                if (tx_buffer->next_to_watch &&
2767                    time_after(jiffies, tx_buffer->time_stamp +
2768                    (adapter->tx_timeout_factor * HZ)) &&
2769                    !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2770                        /* detected Tx unit hang */
2771                        netdev_err(tx_ring->netdev,
2772                                   "Detected Tx Unit Hang\n"
2773                                   "  Tx Queue             <%d>\n"
2774                                   "  TDH                  <%x>\n"
2775                                   "  TDT                  <%x>\n"
2776                                   "  next_to_use          <%x>\n"
2777                                   "  next_to_clean        <%x>\n"
2778                                   "buffer_info[next_to_clean]\n"
2779                                   "  time_stamp           <%lx>\n"
2780                                   "  next_to_watch        <%p>\n"
2781                                   "  jiffies              <%lx>\n"
2782                                   "  desc.status          <%x>\n",
2783                                   tx_ring->queue_index,
2784                                   rd32(IGC_TDH(tx_ring->reg_idx)),
2785                                   readl(tx_ring->tail),
2786                                   tx_ring->next_to_use,
2787                                   tx_ring->next_to_clean,
2788                                   tx_buffer->time_stamp,
2789                                   tx_buffer->next_to_watch,
2790                                   jiffies,
2791                                   tx_buffer->next_to_watch->wb.status);
2792                        netif_stop_subqueue(tx_ring->netdev,
2793                                            tx_ring->queue_index);
2794
2795                        /* we are about to reset, no point in enabling stuff */
2796                        return true;
2797                }
2798        }
2799
2800#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2801        if (unlikely(total_packets &&
2802                     netif_carrier_ok(tx_ring->netdev) &&
2803                     igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2804                /* Make sure that anybody stopping the queue after this
2805                 * sees the new next_to_clean.
2806                 */
2807                smp_mb();
2808                if (__netif_subqueue_stopped(tx_ring->netdev,
2809                                             tx_ring->queue_index) &&
2810                    !(test_bit(__IGC_DOWN, &adapter->state))) {
2811                        netif_wake_subqueue(tx_ring->netdev,
2812                                            tx_ring->queue_index);
2813
2814                        u64_stats_update_begin(&tx_ring->tx_syncp);
2815                        tx_ring->tx_stats.restart_queue++;
2816                        u64_stats_update_end(&tx_ring->tx_syncp);
2817                }
2818        }
2819
2820        return !!budget;
2821}
2822
2823static int igc_find_mac_filter(struct igc_adapter *adapter,
2824                               enum igc_mac_filter_type type, const u8 *addr)
2825{
2826        struct igc_hw *hw = &adapter->hw;
2827        int max_entries = hw->mac.rar_entry_count;
2828        u32 ral, rah;
2829        int i;
2830
2831        for (i = 0; i < max_entries; i++) {
2832                ral = rd32(IGC_RAL(i));
2833                rah = rd32(IGC_RAH(i));
2834
2835                if (!(rah & IGC_RAH_AV))
2836                        continue;
2837                if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2838                        continue;
2839                if ((rah & IGC_RAH_RAH_MASK) !=
2840                    le16_to_cpup((__le16 *)(addr + 4)))
2841                        continue;
2842                if (ral != le32_to_cpup((__le32 *)(addr)))
2843                        continue;
2844
2845                return i;
2846        }
2847
2848        return -1;
2849}
2850
2851static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2852{
2853        struct igc_hw *hw = &adapter->hw;
2854        int max_entries = hw->mac.rar_entry_count;
2855        u32 rah;
2856        int i;
2857
2858        for (i = 0; i < max_entries; i++) {
2859                rah = rd32(IGC_RAH(i));
2860
2861                if (!(rah & IGC_RAH_AV))
2862                        return i;
2863        }
2864
2865        return -1;
2866}
2867
2868/**
2869 * igc_add_mac_filter() - Add MAC address filter
2870 * @adapter: Pointer to adapter where the filter should be added
2871 * @type: MAC address filter type (source or destination)
2872 * @addr: MAC address
2873 * @queue: If non-negative, queue assignment feature is enabled and frames
2874 *         matching the filter are enqueued onto 'queue'. Otherwise, queue
2875 *         assignment is disabled.
2876 *
2877 * Return: 0 in case of success, negative errno code otherwise.
2878 */
2879static int igc_add_mac_filter(struct igc_adapter *adapter,
2880                              enum igc_mac_filter_type type, const u8 *addr,
2881                              int queue)
2882{
2883        struct net_device *dev = adapter->netdev;
2884        int index;
2885
2886        index = igc_find_mac_filter(adapter, type, addr);
2887        if (index >= 0)
2888                goto update_filter;
2889
2890        index = igc_get_avail_mac_filter_slot(adapter);
2891        if (index < 0)
2892                return -ENOSPC;
2893
2894        netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2895                   index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2896                   addr, queue);
2897
2898update_filter:
2899        igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2900        return 0;
2901}
2902
2903/**
2904 * igc_del_mac_filter() - Delete MAC address filter
2905 * @adapter: Pointer to adapter where the filter should be deleted from
2906 * @type: MAC address filter type (source or destination)
2907 * @addr: MAC address
2908 */
2909static void igc_del_mac_filter(struct igc_adapter *adapter,
2910                               enum igc_mac_filter_type type, const u8 *addr)
2911{
2912        struct net_device *dev = adapter->netdev;
2913        int index;
2914
2915        index = igc_find_mac_filter(adapter, type, addr);
2916        if (index < 0)
2917                return;
2918
2919        if (index == 0) {
2920                /* If this is the default filter, we don't actually delete it.
2921                 * We just reset to its default value i.e. disable queue
2922                 * assignment.
2923                 */
2924                netdev_dbg(dev, "Disable default MAC filter queue assignment");
2925
2926                igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2927        } else {
2928                netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2929                           index,
2930                           type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2931                           addr);
2932
2933                igc_clear_mac_filter_hw(adapter, index);
2934        }
2935}
2936
2937/**
2938 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2939 * @adapter: Pointer to adapter where the filter should be added
2940 * @prio: VLAN priority value
2941 * @queue: Queue number which matching frames are assigned to
2942 *
2943 * Return: 0 in case of success, negative errno code otherwise.
2944 */
2945static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2946                                    int queue)
2947{
2948        struct net_device *dev = adapter->netdev;
2949        struct igc_hw *hw = &adapter->hw;
2950        u32 vlanpqf;
2951
2952        vlanpqf = rd32(IGC_VLANPQF);
2953
2954        if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2955                netdev_dbg(dev, "VLAN priority filter already in use\n");
2956                return -EEXIST;
2957        }
2958
2959        vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2960        vlanpqf |= IGC_VLANPQF_VALID(prio);
2961
2962        wr32(IGC_VLANPQF, vlanpqf);
2963
2964        netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2965                   prio, queue);
2966        return 0;
2967}
2968
2969/**
2970 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2971 * @adapter: Pointer to adapter where the filter should be deleted from
2972 * @prio: VLAN priority value
2973 */
2974static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2975{
2976        struct igc_hw *hw = &adapter->hw;
2977        u32 vlanpqf;
2978
2979        vlanpqf = rd32(IGC_VLANPQF);
2980
2981        vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2982        vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2983
2984        wr32(IGC_VLANPQF, vlanpqf);
2985
2986        netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2987                   prio);
2988}
2989
2990static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2991{
2992        struct igc_hw *hw = &adapter->hw;
2993        int i;
2994
2995        for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2996                u32 etqf = rd32(IGC_ETQF(i));
2997
2998                if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2999                        return i;
3000        }
3001
3002        return -1;
3003}
3004
3005/**
3006 * igc_add_etype_filter() - Add ethertype filter
3007 * @adapter: Pointer to adapter where the filter should be added
3008 * @etype: Ethertype value
3009 * @queue: If non-negative, queue assignment feature is enabled and frames
3010 *         matching the filter are enqueued onto 'queue'. Otherwise, queue
3011 *         assignment is disabled.
3012 *
3013 * Return: 0 in case of success, negative errno code otherwise.
3014 */
3015static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3016                                int queue)
3017{
3018        struct igc_hw *hw = &adapter->hw;
3019        int index;
3020        u32 etqf;
3021
3022        index = igc_get_avail_etype_filter_slot(adapter);
3023        if (index < 0)
3024                return -ENOSPC;
3025
3026        etqf = rd32(IGC_ETQF(index));
3027
3028        etqf &= ~IGC_ETQF_ETYPE_MASK;
3029        etqf |= etype;
3030
3031        if (queue >= 0) {
3032                etqf &= ~IGC_ETQF_QUEUE_MASK;
3033                etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3034                etqf |= IGC_ETQF_QUEUE_ENABLE;
3035        }
3036
3037        etqf |= IGC_ETQF_FILTER_ENABLE;
3038
3039        wr32(IGC_ETQF(index), etqf);
3040
3041        netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3042                   etype, queue);
3043        return 0;
3044}
3045
3046static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3047{
3048        struct igc_hw *hw = &adapter->hw;
3049        int i;
3050
3051        for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3052                u32 etqf = rd32(IGC_ETQF(i));
3053
3054                if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3055                        return i;
3056        }
3057
3058        return -1;
3059}
3060
3061/**
3062 * igc_del_etype_filter() - Delete ethertype filter
3063 * @adapter: Pointer to adapter where the filter should be deleted from
3064 * @etype: Ethertype value
3065 */
3066static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3067{
3068        struct igc_hw *hw = &adapter->hw;
3069        int index;
3070
3071        index = igc_find_etype_filter(adapter, etype);
3072        if (index < 0)
3073                return;
3074
3075        wr32(IGC_ETQF(index), 0);
3076
3077        netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3078                   etype);
3079}
3080
3081static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3082                               const struct igc_nfc_rule *rule)
3083{
3084        int err;
3085
3086        if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3087                err = igc_add_etype_filter(adapter, rule->filter.etype,
3088                                           rule->action);
3089                if (err)
3090                        return err;
3091        }
3092
3093        if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3094                err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3095                                         rule->filter.src_addr, rule->action);
3096                if (err)
3097                        return err;
3098        }
3099
3100        if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3101                err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3102                                         rule->filter.dst_addr, rule->action);
3103                if (err)
3104                        return err;
3105        }
3106
3107        if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3108                int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3109                           VLAN_PRIO_SHIFT;
3110
3111                err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3112                if (err)
3113                        return err;
3114        }
3115
3116        return 0;
3117}
3118
3119static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3120                                 const struct igc_nfc_rule *rule)
3121{
3122        if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3123                igc_del_etype_filter(adapter, rule->filter.etype);
3124
3125        if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3126                int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3127                           VLAN_PRIO_SHIFT;
3128
3129                igc_del_vlan_prio_filter(adapter, prio);
3130        }
3131
3132        if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3133                igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3134                                   rule->filter.src_addr);
3135
3136        if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3137                igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3138                                   rule->filter.dst_addr);
3139}
3140
3141/**
3142 * igc_get_nfc_rule() - Get NFC rule
3143 * @adapter: Pointer to adapter
3144 * @location: Rule location
3145 *
3146 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3147 *
3148 * Return: Pointer to NFC rule at @location. If not found, NULL.
3149 */
3150struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3151                                      u32 location)
3152{
3153        struct igc_nfc_rule *rule;
3154
3155        list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3156                if (rule->location == location)
3157                        return rule;
3158                if (rule->location > location)
3159                        break;
3160        }
3161
3162        return NULL;
3163}
3164
3165/**
3166 * igc_del_nfc_rule() - Delete NFC rule
3167 * @adapter: Pointer to adapter
3168 * @rule: Pointer to rule to be deleted
3169 *
3170 * Disable NFC rule in hardware and delete it from adapter.
3171 *
3172 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3173 */
3174void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3175{
3176        igc_disable_nfc_rule(adapter, rule);
3177
3178        list_del(&rule->list);
3179        adapter->nfc_rule_count--;
3180
3181        kfree(rule);
3182}
3183
3184static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3185{
3186        struct igc_nfc_rule *rule, *tmp;
3187
3188        mutex_lock(&adapter->nfc_rule_lock);
3189
3190        list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3191                igc_del_nfc_rule(adapter, rule);
3192
3193        mutex_unlock(&adapter->nfc_rule_lock);
3194}
3195
3196/**
3197 * igc_add_nfc_rule() - Add NFC rule
3198 * @adapter: Pointer to adapter
3199 * @rule: Pointer to rule to be added
3200 *
3201 * Enable NFC rule in hardware and add it to adapter.
3202 *
3203 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3204 *
3205 * Return: 0 on success, negative errno on failure.
3206 */
3207int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3208{
3209        struct igc_nfc_rule *pred, *cur;
3210        int err;
3211
3212        err = igc_enable_nfc_rule(adapter, rule);
3213        if (err)
3214                return err;
3215
3216        pred = NULL;
3217        list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3218                if (cur->location >= rule->location)
3219                        break;
3220                pred = cur;
3221        }
3222
3223        list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3224        adapter->nfc_rule_count++;
3225        return 0;
3226}
3227
3228static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3229{
3230        struct igc_nfc_rule *rule;
3231
3232        mutex_lock(&adapter->nfc_rule_lock);
3233
3234        list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3235                igc_enable_nfc_rule(adapter, rule);
3236
3237        mutex_unlock(&adapter->nfc_rule_lock);
3238}
3239
3240static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3241{
3242        struct igc_adapter *adapter = netdev_priv(netdev);
3243
3244        return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3245}
3246
3247static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3248{
3249        struct igc_adapter *adapter = netdev_priv(netdev);
3250
3251        igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3252        return 0;
3253}
3254
3255/**
3256 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3257 * @netdev: network interface device structure
3258 *
3259 * The set_rx_mode entry point is called whenever the unicast or multicast
3260 * address lists or the network interface flags are updated.  This routine is
3261 * responsible for configuring the hardware for proper unicast, multicast,
3262 * promiscuous mode, and all-multi behavior.
3263 */
3264static void igc_set_rx_mode(struct net_device *netdev)
3265{
3266        struct igc_adapter *adapter = netdev_priv(netdev);
3267        struct igc_hw *hw = &adapter->hw;
3268        u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3269        int count;
3270
3271        /* Check for Promiscuous and All Multicast modes */
3272        if (netdev->flags & IFF_PROMISC) {
3273                rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3274        } else {
3275                if (netdev->flags & IFF_ALLMULTI) {
3276                        rctl |= IGC_RCTL_MPE;
3277                } else {
3278                        /* Write addresses to the MTA, if the attempt fails
3279                         * then we should just turn on promiscuous mode so
3280                         * that we can at least receive multicast traffic
3281                         */
3282                        count = igc_write_mc_addr_list(netdev);
3283                        if (count < 0)
3284                                rctl |= IGC_RCTL_MPE;
3285                }
3286        }
3287
3288        /* Write addresses to available RAR registers, if there is not
3289         * sufficient space to store all the addresses then enable
3290         * unicast promiscuous mode
3291         */
3292        if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3293                rctl |= IGC_RCTL_UPE;
3294
3295        /* update state of unicast and multicast */
3296        rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3297        wr32(IGC_RCTL, rctl);
3298
3299#if (PAGE_SIZE < 8192)
3300        if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3301                rlpml = IGC_MAX_FRAME_BUILD_SKB;
3302#endif
3303        wr32(IGC_RLPML, rlpml);
3304}
3305
3306/**
3307 * igc_configure - configure the hardware for RX and TX
3308 * @adapter: private board structure
3309 */
3310static void igc_configure(struct igc_adapter *adapter)
3311{
3312        struct net_device *netdev = adapter->netdev;
3313        int i = 0;
3314
3315        igc_get_hw_control(adapter);
3316        igc_set_rx_mode(netdev);
3317
3318        igc_restore_vlan(adapter);
3319
3320        igc_setup_tctl(adapter);
3321        igc_setup_mrqc(adapter);
3322        igc_setup_rctl(adapter);
3323
3324        igc_set_default_mac_filter(adapter);
3325        igc_restore_nfc_rules(adapter);
3326
3327        igc_configure_tx(adapter);
3328        igc_configure_rx(adapter);
3329
3330        igc_rx_fifo_flush_base(&adapter->hw);
3331
3332        /* call igc_desc_unused which always leaves
3333         * at least 1 descriptor unused to make sure
3334         * next_to_use != next_to_clean
3335         */
3336        for (i = 0; i < adapter->num_rx_queues; i++) {
3337                struct igc_ring *ring = adapter->rx_ring[i];
3338
3339                if (ring->xsk_pool)
3340                        igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3341                else
3342                        igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3343        }
3344}
3345
3346/**
3347 * igc_write_ivar - configure ivar for given MSI-X vector
3348 * @hw: pointer to the HW structure
3349 * @msix_vector: vector number we are allocating to a given ring
3350 * @index: row index of IVAR register to write within IVAR table
3351 * @offset: column offset of in IVAR, should be multiple of 8
3352 *
3353 * The IVAR table consists of 2 columns,
3354 * each containing an cause allocation for an Rx and Tx ring, and a
3355 * variable number of rows depending on the number of queues supported.
3356 */
3357static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3358                           int index, int offset)
3359{
3360        u32 ivar = array_rd32(IGC_IVAR0, index);
3361
3362        /* clear any bits that are currently set */
3363        ivar &= ~((u32)0xFF << offset);
3364
3365        /* write vector and valid bit */
3366        ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3367
3368        array_wr32(IGC_IVAR0, index, ivar);
3369}
3370
3371static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3372{
3373        struct igc_adapter *adapter = q_vector->adapter;
3374        struct igc_hw *hw = &adapter->hw;
3375        int rx_queue = IGC_N0_QUEUE;
3376        int tx_queue = IGC_N0_QUEUE;
3377
3378        if (q_vector->rx.ring)
3379                rx_queue = q_vector->rx.ring->reg_idx;
3380        if (q_vector->tx.ring)
3381                tx_queue = q_vector->tx.ring->reg_idx;
3382
3383        switch (hw->mac.type) {
3384        case igc_i225:
3385                if (rx_queue > IGC_N0_QUEUE)
3386                        igc_write_ivar(hw, msix_vector,
3387                                       rx_queue >> 1,
3388                                       (rx_queue & 0x1) << 4);
3389                if (tx_queue > IGC_N0_QUEUE)
3390                        igc_write_ivar(hw, msix_vector,
3391                                       tx_queue >> 1,
3392                                       ((tx_queue & 0x1) << 4) + 8);
3393                q_vector->eims_value = BIT(msix_vector);
3394                break;
3395        default:
3396                WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3397                break;
3398        }
3399
3400        /* add q_vector eims value to global eims_enable_mask */
3401        adapter->eims_enable_mask |= q_vector->eims_value;
3402
3403        /* configure q_vector to set itr on first interrupt */
3404        q_vector->set_itr = 1;
3405}
3406
3407/**
3408 * igc_configure_msix - Configure MSI-X hardware
3409 * @adapter: Pointer to adapter structure
3410 *
3411 * igc_configure_msix sets up the hardware to properly
3412 * generate MSI-X interrupts.
3413 */
3414static void igc_configure_msix(struct igc_adapter *adapter)
3415{
3416        struct igc_hw *hw = &adapter->hw;
3417        int i, vector = 0;
3418        u32 tmp;
3419
3420        adapter->eims_enable_mask = 0;
3421
3422        /* set vector for other causes, i.e. link changes */
3423        switch (hw->mac.type) {
3424        case igc_i225:
3425                /* Turn on MSI-X capability first, or our settings
3426                 * won't stick.  And it will take days to debug.
3427                 */
3428                wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3429                     IGC_GPIE_PBA | IGC_GPIE_EIAME |
3430                     IGC_GPIE_NSICR);
3431
3432                /* enable msix_other interrupt */
3433                adapter->eims_other = BIT(vector);
3434                tmp = (vector++ | IGC_IVAR_VALID) << 8;
3435
3436                wr32(IGC_IVAR_MISC, tmp);
3437                break;
3438        default:
3439                /* do nothing, since nothing else supports MSI-X */
3440                break;
3441        } /* switch (hw->mac.type) */
3442
3443        adapter->eims_enable_mask |= adapter->eims_other;
3444
3445        for (i = 0; i < adapter->num_q_vectors; i++)
3446                igc_assign_vector(adapter->q_vector[i], vector++);
3447
3448        wrfl();
3449}
3450
3451/**
3452 * igc_irq_enable - Enable default interrupt generation settings
3453 * @adapter: board private structure
3454 */
3455static void igc_irq_enable(struct igc_adapter *adapter)
3456{
3457        struct igc_hw *hw = &adapter->hw;
3458
3459        if (adapter->msix_entries) {
3460                u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3461                u32 regval = rd32(IGC_EIAC);
3462
3463                wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3464                regval = rd32(IGC_EIAM);
3465                wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3466                wr32(IGC_EIMS, adapter->eims_enable_mask);
3467                wr32(IGC_IMS, ims);
3468        } else {
3469                wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3470                wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3471        }
3472}
3473
3474/**
3475 * igc_irq_disable - Mask off interrupt generation on the NIC
3476 * @adapter: board private structure
3477 */
3478static void igc_irq_disable(struct igc_adapter *adapter)
3479{
3480        struct igc_hw *hw = &adapter->hw;
3481
3482        if (adapter->msix_entries) {
3483                u32 regval = rd32(IGC_EIAM);
3484
3485                wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3486                wr32(IGC_EIMC, adapter->eims_enable_mask);
3487                regval = rd32(IGC_EIAC);
3488                wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3489        }
3490
3491        wr32(IGC_IAM, 0);
3492        wr32(IGC_IMC, ~0);
3493        wrfl();
3494
3495        if (adapter->msix_entries) {
3496                int vector = 0, i;
3497
3498                synchronize_irq(adapter->msix_entries[vector++].vector);
3499
3500                for (i = 0; i < adapter->num_q_vectors; i++)
3501                        synchronize_irq(adapter->msix_entries[vector++].vector);
3502        } else {
3503                synchronize_irq(adapter->pdev->irq);
3504        }
3505}
3506
3507void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
3508                              const u32 max_rss_queues)
3509{
3510        /* Determine if we need to pair queues. */
3511        /* If rss_queues > half of max_rss_queues, pair the queues in
3512         * order to conserve interrupts due to limited supply.
3513         */
3514        if (adapter->rss_queues > (max_rss_queues / 2))
3515                adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3516        else
3517                adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
3518}
3519
3520unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
3521{
3522        return IGC_MAX_RX_QUEUES;
3523}
3524
3525static void igc_init_queue_configuration(struct igc_adapter *adapter)
3526{
3527        u32 max_rss_queues;
3528
3529        max_rss_queues = igc_get_max_rss_queues(adapter);
3530        adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3531
3532        igc_set_flag_queue_pairs(adapter, max_rss_queues);
3533}
3534
3535/**
3536 * igc_reset_q_vector - Reset config for interrupt vector
3537 * @adapter: board private structure to initialize
3538 * @v_idx: Index of vector to be reset
3539 *
3540 * If NAPI is enabled it will delete any references to the
3541 * NAPI struct. This is preparation for igc_free_q_vector.
3542 */
3543static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
3544{
3545        struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3546
3547        /* if we're coming from igc_set_interrupt_capability, the vectors are
3548         * not yet allocated
3549         */
3550        if (!q_vector)
3551                return;
3552
3553        if (q_vector->tx.ring)
3554                adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
3555
3556        if (q_vector->rx.ring)
3557                adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3558
3559        netif_napi_del(&q_vector->napi);
3560}
3561
3562/**
3563 * igc_free_q_vector - Free memory allocated for specific interrupt vector
3564 * @adapter: board private structure to initialize
3565 * @v_idx: Index of vector to be freed
3566 *
3567 * This function frees the memory allocated to the q_vector.
3568 */
3569static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3570{
3571        struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3572
3573        adapter->q_vector[v_idx] = NULL;
3574
3575        /* igc_get_stats64() might access the rings on this vector,
3576         * we must wait a grace period before freeing it.
3577         */
3578        if (q_vector)
3579                kfree_rcu(q_vector, rcu);
3580}
3581
3582/**
3583 * igc_free_q_vectors - Free memory allocated for interrupt vectors
3584 * @adapter: board private structure to initialize
3585 *
3586 * This function frees the memory allocated to the q_vectors.  In addition if
3587 * NAPI is enabled it will delete any references to the NAPI struct prior
3588 * to freeing the q_vector.
3589 */
3590static void igc_free_q_vectors(struct igc_adapter *adapter)
3591{
3592        int v_idx = adapter->num_q_vectors;
3593
3594        adapter->num_tx_queues = 0;
3595        adapter->num_rx_queues = 0;
3596        adapter->num_q_vectors = 0;
3597
3598        while (v_idx--) {
3599                igc_reset_q_vector(adapter, v_idx);
3600                igc_free_q_vector(adapter, v_idx);
3601        }
3602}
3603
3604/**
3605 * igc_update_itr - update the dynamic ITR value based on statistics
3606 * @q_vector: pointer to q_vector
3607 * @ring_container: ring info to update the itr for
3608 *
3609 * Stores a new ITR value based on packets and byte
3610 * counts during the last interrupt.  The advantage of per interrupt
3611 * computation is faster updates and more accurate ITR for the current
3612 * traffic pattern.  Constants in this function were computed
3613 * based on theoretical maximum wire speed and thresholds were set based
3614 * on testing data as well as attempting to minimize response time
3615 * while increasing bulk throughput.
3616 * NOTE: These calculations are only valid when operating in a single-
3617 * queue environment.
3618 */
3619static void igc_update_itr(struct igc_q_vector *q_vector,
3620                           struct igc_ring_container *ring_container)
3621{
3622        unsigned int packets = ring_container->total_packets;
3623        unsigned int bytes = ring_container->total_bytes;
3624        u8 itrval = ring_container->itr;
3625
3626        /* no packets, exit with status unchanged */
3627        if (packets == 0)
3628                return;
3629
3630        switch (itrval) {
3631        case lowest_latency:
3632                /* handle TSO and jumbo frames */
3633                if (bytes / packets > 8000)
3634                        itrval = bulk_latency;
3635                else if ((packets < 5) && (bytes > 512))
3636                        itrval = low_latency;
3637                break;
3638        case low_latency:  /* 50 usec aka 20000 ints/s */
3639                if (bytes > 10000) {
3640                        /* this if handles the TSO accounting */
3641                        if (bytes / packets > 8000)
3642                                itrval = bulk_latency;
3643                        else if ((packets < 10) || ((bytes / packets) > 1200))
3644                                itrval = bulk_latency;
3645                        else if ((packets > 35))
3646                                itrval = lowest_latency;
3647                } else if (bytes / packets > 2000) {
3648                        itrval = bulk_latency;
3649                } else if (packets <= 2 && bytes < 512) {
3650                        itrval = lowest_latency;
3651                }
3652                break;
3653        case bulk_latency: /* 250 usec aka 4000 ints/s */
3654                if (bytes > 25000) {
3655                        if (packets > 35)
3656                                itrval = low_latency;
3657                } else if (bytes < 1500) {
3658                        itrval = low_latency;
3659                }
3660                break;
3661        }
3662
3663        /* clear work counters since we have the values we need */
3664        ring_container->total_bytes = 0;
3665        ring_container->total_packets = 0;
3666
3667        /* write updated itr to ring container */
3668        ring_container->itr = itrval;
3669}
3670
3671static void igc_set_itr(struct igc_q_vector *q_vector)
3672{
3673        struct igc_adapter *adapter = q_vector->adapter;
3674        u32 new_itr = q_vector->itr_val;
3675        u8 current_itr = 0;
3676
3677        /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3678        switch (adapter->link_speed) {
3679        case SPEED_10:
3680        case SPEED_100:
3681                current_itr = 0;
3682                new_itr = IGC_4K_ITR;
3683                goto set_itr_now;
3684        default:
3685                break;
3686        }
3687
3688        igc_update_itr(q_vector, &q_vector->tx);
3689        igc_update_itr(q_vector, &q_vector->rx);
3690
3691        current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3692
3693        /* conservative mode (itr 3) eliminates the lowest_latency setting */
3694        if (current_itr == lowest_latency &&
3695            ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3696            (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3697                current_itr = low_latency;
3698
3699        switch (current_itr) {
3700        /* counts and packets in update_itr are dependent on these numbers */
3701        case lowest_latency:
3702                new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3703                break;
3704        case low_latency:
3705                new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3706                break;
3707        case bulk_latency:
3708                new_itr = IGC_4K_ITR;  /* 4,000 ints/sec */
3709                break;
3710        default:
3711                break;
3712        }
3713
3714set_itr_now:
3715        if (new_itr != q_vector->itr_val) {
3716                /* this attempts to bias the interrupt rate towards Bulk
3717                 * by adding intermediate steps when interrupt rate is
3718                 * increasing
3719                 */
3720                new_itr = new_itr > q_vector->itr_val ?
3721                          max((new_itr * q_vector->itr_val) /
3722                          (new_itr + (q_vector->itr_val >> 2)),
3723                          new_itr) : new_itr;
3724                /* Don't write the value here; it resets the adapter's
3725                 * internal timer, and causes us to delay far longer than
3726                 * we should between interrupts.  Instead, we write the ITR
3727                 * value at the beginning of the next interrupt so the timing
3728                 * ends up being correct.
3729                 */
3730                q_vector->itr_val = new_itr;
3731                q_vector->set_itr = 1;
3732        }
3733}
3734
3735static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3736{
3737        int v_idx = adapter->num_q_vectors;
3738
3739        if (adapter->msix_entries) {
3740                pci_disable_msix(adapter->pdev);
3741                kfree(adapter->msix_entries);
3742                adapter->msix_entries = NULL;
3743        } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3744                pci_disable_msi(adapter->pdev);
3745        }
3746
3747        while (v_idx--)
3748                igc_reset_q_vector(adapter, v_idx);
3749}
3750
3751/**
3752 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3753 * @adapter: Pointer to adapter structure
3754 * @msix: boolean value for MSI-X capability
3755 *
3756 * Attempt to configure interrupts using the best available
3757 * capabilities of the hardware and kernel.
3758 */
3759static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3760                                         bool msix)
3761{
3762        int numvecs, i;
3763        int err;
3764
3765        if (!msix)
3766                goto msi_only;
3767        adapter->flags |= IGC_FLAG_HAS_MSIX;
3768
3769        /* Number of supported queues. */
3770        adapter->num_rx_queues = adapter->rss_queues;
3771
3772        adapter->num_tx_queues = adapter->rss_queues;
3773
3774        /* start with one vector for every Rx queue */
3775        numvecs = adapter->num_rx_queues;
3776
3777        /* if Tx handler is separate add 1 for every Tx queue */
3778        if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3779                numvecs += adapter->num_tx_queues;
3780
3781        /* store the number of vectors reserved for queues */
3782        adapter->num_q_vectors = numvecs;
3783
3784        /* add 1 vector for link status interrupts */
3785        numvecs++;
3786
3787        adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3788                                        GFP_KERNEL);
3789
3790        if (!adapter->msix_entries)
3791                return;
3792
3793        /* populate entry values */
3794        for (i = 0; i < numvecs; i++)
3795                adapter->msix_entries[i].entry = i;
3796
3797        err = pci_enable_msix_range(adapter->pdev,
3798                                    adapter->msix_entries,
3799                                    numvecs,
3800                                    numvecs);
3801        if (err > 0)
3802                return;
3803
3804        kfree(adapter->msix_entries);
3805        adapter->msix_entries = NULL;
3806
3807        igc_reset_interrupt_capability(adapter);
3808
3809msi_only:
3810        adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3811
3812        adapter->rss_queues = 1;
3813        adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3814        adapter->num_rx_queues = 1;
3815        adapter->num_tx_queues = 1;
3816        adapter->num_q_vectors = 1;
3817        if (!pci_enable_msi(adapter->pdev))
3818                adapter->flags |= IGC_FLAG_HAS_MSI;
3819}
3820
3821/**
3822 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3823 * @q_vector: pointer to q_vector
3824 *
3825 * Stores a new ITR value based on strictly on packet size.  This
3826 * algorithm is less sophisticated than that used in igc_update_itr,
3827 * due to the difficulty of synchronizing statistics across multiple
3828 * receive rings.  The divisors and thresholds used by this function
3829 * were determined based on theoretical maximum wire speed and testing
3830 * data, in order to minimize response time while increasing bulk
3831 * throughput.
3832 * NOTE: This function is called only when operating in a multiqueue
3833 * receive environment.
3834 */
3835static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3836{
3837        struct igc_adapter *adapter = q_vector->adapter;
3838        int new_val = q_vector->itr_val;
3839        int avg_wire_size = 0;
3840        unsigned int packets;
3841
3842        /* For non-gigabit speeds, just fix the interrupt rate at 4000
3843         * ints/sec - ITR timer value of 120 ticks.
3844         */
3845        switch (adapter->link_speed) {
3846        case SPEED_10:
3847        case SPEED_100:
3848                new_val = IGC_4K_ITR;
3849                goto set_itr_val;
3850        default:
3851                break;
3852        }
3853
3854        packets = q_vector->rx.total_packets;
3855        if (packets)
3856                avg_wire_size = q_vector->rx.total_bytes / packets;
3857
3858        packets = q_vector->tx.total_packets;
3859        if (packets)
3860                avg_wire_size = max_t(u32, avg_wire_size,
3861                                      q_vector->tx.total_bytes / packets);
3862
3863        /* if avg_wire_size isn't set no work was done */
3864        if (!avg_wire_size)
3865                goto clear_counts;
3866
3867        /* Add 24 bytes to size to account for CRC, preamble, and gap */
3868        avg_wire_size += 24;
3869
3870        /* Don't starve jumbo frames */
3871        avg_wire_size = min(avg_wire_size, 3000);
3872
3873        /* Give a little boost to mid-size frames */
3874        if (avg_wire_size > 300 && avg_wire_size < 1200)
3875                new_val = avg_wire_size / 3;
3876        else
3877                new_val = avg_wire_size / 2;
3878
3879        /* conservative mode (itr 3) eliminates the lowest_latency setting */
3880        if (new_val < IGC_20K_ITR &&
3881            ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3882            (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3883                new_val = IGC_20K_ITR;
3884
3885set_itr_val:
3886        if (new_val != q_vector->itr_val) {
3887                q_vector->itr_val = new_val;
3888                q_vector->set_itr = 1;
3889        }
3890clear_counts:
3891        q_vector->rx.total_bytes = 0;
3892        q_vector->rx.total_packets = 0;
3893        q_vector->tx.total_bytes = 0;
3894        q_vector->tx.total_packets = 0;
3895}
3896
3897static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3898{
3899        struct igc_adapter *adapter = q_vector->adapter;
3900        struct igc_hw *hw = &adapter->hw;
3901
3902        if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3903            (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3904                if (adapter->num_q_vectors == 1)
3905                        igc_set_itr(q_vector);
3906                else
3907                        igc_update_ring_itr(q_vector);
3908        }
3909
3910        if (!test_bit(__IGC_DOWN, &adapter->state)) {
3911                if (adapter->msix_entries)
3912                        wr32(IGC_EIMS, q_vector->eims_value);
3913                else
3914                        igc_irq_enable(adapter);
3915        }
3916}
3917
3918static void igc_add_ring(struct igc_ring *ring,
3919                         struct igc_ring_container *head)
3920{
3921        head->ring = ring;
3922        head->count++;
3923}
3924
3925/**
3926 * igc_cache_ring_register - Descriptor ring to register mapping
3927 * @adapter: board private structure to initialize
3928 *
3929 * Once we know the feature-set enabled for the device, we'll cache
3930 * the register offset the descriptor ring is assigned to.
3931 */
3932static void igc_cache_ring_register(struct igc_adapter *adapter)
3933{
3934        int i = 0, j = 0;
3935
3936        switch (adapter->hw.mac.type) {
3937        case igc_i225:
3938        default:
3939                for (; i < adapter->num_rx_queues; i++)
3940                        adapter->rx_ring[i]->reg_idx = i;
3941                for (; j < adapter->num_tx_queues; j++)
3942                        adapter->tx_ring[j]->reg_idx = j;
3943                break;
3944        }
3945}
3946
3947/**
3948 * igc_poll - NAPI Rx polling callback
3949 * @napi: napi polling structure
3950 * @budget: count of how many packets we should handle
3951 */
3952static int igc_poll(struct napi_struct *napi, int budget)
3953{
3954        struct igc_q_vector *q_vector = container_of(napi,
3955                                                     struct igc_q_vector,
3956                                                     napi);
3957        struct igc_ring *rx_ring = q_vector->rx.ring;
3958        bool clean_complete = true;
3959        int work_done = 0;
3960
3961        if (q_vector->tx.ring)
3962                clean_complete = igc_clean_tx_irq(q_vector, budget);
3963
3964        if (rx_ring) {
3965                int cleaned = rx_ring->xsk_pool ?
3966                              igc_clean_rx_irq_zc(q_vector, budget) :
3967                              igc_clean_rx_irq(q_vector, budget);
3968
3969                work_done += cleaned;
3970                if (cleaned >= budget)
3971                        clean_complete = false;
3972        }
3973
3974        /* If all work not completed, return budget and keep polling */
3975        if (!clean_complete)
3976                return budget;
3977
3978        /* Exit the polling mode, but don't re-enable interrupts if stack might
3979         * poll us due to busy-polling
3980         */
3981        if (likely(napi_complete_done(napi, work_done)))
3982                igc_ring_irq_enable(q_vector);
3983
3984        return min(work_done, budget - 1);
3985}
3986
3987/**
3988 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3989 * @adapter: board private structure to initialize
3990 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3991 * @v_idx: index of vector in adapter struct
3992 * @txr_count: total number of Tx rings to allocate
3993 * @txr_idx: index of first Tx ring to allocate
3994 * @rxr_count: total number of Rx rings to allocate
3995 * @rxr_idx: index of first Rx ring to allocate
3996 *
3997 * We allocate one q_vector.  If allocation fails we return -ENOMEM.
3998 */
3999static int igc_alloc_q_vector(struct igc_adapter *adapter,
4000                              unsigned int v_count, unsigned int v_idx,
4001                              unsigned int txr_count, unsigned int txr_idx,
4002                              unsigned int rxr_count, unsigned int rxr_idx)
4003{
4004        struct igc_q_vector *q_vector;
4005        struct igc_ring *ring;
4006        int ring_count;
4007
4008        /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4009        if (txr_count > 1 || rxr_count > 1)
4010                return -ENOMEM;
4011
4012        ring_count = txr_count + rxr_count;
4013
4014        /* allocate q_vector and rings */
4015        q_vector = adapter->q_vector[v_idx];
4016        if (!q_vector)
4017                q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4018                                   GFP_KERNEL);
4019        else
4020                memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4021        if (!q_vector)
4022                return -ENOMEM;
4023
4024        /* initialize NAPI */
4025        netif_napi_add(adapter->netdev, &q_vector->napi,
4026                       igc_poll, 64);
4027
4028        /* tie q_vector and adapter together */
4029        adapter->q_vector[v_idx] = q_vector;
4030        q_vector->adapter = adapter;
4031
4032        /* initialize work limits */
4033        q_vector->tx.work_limit = adapter->tx_work_limit;
4034
4035        /* initialize ITR configuration */
4036        q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4037        q_vector->itr_val = IGC_START_ITR;
4038
4039        /* initialize pointer to rings */
4040        ring = q_vector->ring;
4041
4042        /* initialize ITR */
4043        if (rxr_count) {
4044                /* rx or rx/tx vector */
4045                if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4046                        q_vector->itr_val = adapter->rx_itr_setting;
4047        } else {
4048                /* tx only vector */
4049                if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4050                        q_vector->itr_val = adapter->tx_itr_setting;
4051        }
4052
4053        if (txr_count) {
4054                /* assign generic ring traits */
4055                ring->dev = &adapter->pdev->dev;
4056                ring->netdev = adapter->netdev;
4057
4058                /* configure backlink on ring */
4059                ring->q_vector = q_vector;
4060
4061                /* update q_vector Tx values */
4062                igc_add_ring(ring, &q_vector->tx);
4063
4064                /* apply Tx specific ring traits */
4065                ring->count = adapter->tx_ring_count;
4066                ring->queue_index = txr_idx;
4067
4068                /* assign ring to adapter */
4069                adapter->tx_ring[txr_idx] = ring;
4070
4071                /* push pointer to next ring */
4072                ring++;
4073        }
4074
4075        if (rxr_count) {
4076                /* assign generic ring traits */
4077                ring->dev = &adapter->pdev->dev;
4078                ring->netdev = adapter->netdev;
4079
4080                /* configure backlink on ring */
4081                ring->q_vector = q_vector;
4082
4083                /* update q_vector Rx values */
4084                igc_add_ring(ring, &q_vector->rx);
4085
4086                /* apply Rx specific ring traits */
4087                ring->count = adapter->rx_ring_count;
4088                ring->queue_index = rxr_idx;
4089
4090                /* assign ring to adapter */
4091                adapter->rx_ring[rxr_idx] = ring;
4092        }
4093
4094        return 0;
4095}
4096
4097/**
4098 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4099 * @adapter: board private structure to initialize
4100 *
4101 * We allocate one q_vector per queue interrupt.  If allocation fails we
4102 * return -ENOMEM.
4103 */
4104static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4105{
4106        int rxr_remaining = adapter->num_rx_queues;
4107        int txr_remaining = adapter->num_tx_queues;
4108        int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4109        int q_vectors = adapter->num_q_vectors;
4110        int err;
4111
4112        if (q_vectors >= (rxr_remaining + txr_remaining)) {
4113                for (; rxr_remaining; v_idx++) {
4114                        err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4115                                                 0, 0, 1, rxr_idx);
4116
4117                        if (err)
4118                                goto err_out;
4119
4120                        /* update counts and index */
4121                        rxr_remaining--;
4122                        rxr_idx++;
4123                }
4124        }
4125
4126        for (; v_idx < q_vectors; v_idx++) {
4127                int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4128                int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4129
4130                err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4131                                         tqpv, txr_idx, rqpv, rxr_idx);
4132
4133                if (err)
4134                        goto err_out;
4135
4136                /* update counts and index */
4137                rxr_remaining -= rqpv;
4138                txr_remaining -= tqpv;
4139                rxr_idx++;
4140                txr_idx++;
4141        }
4142
4143        return 0;
4144
4145err_out:
4146        adapter->num_tx_queues = 0;
4147        adapter->num_rx_queues = 0;
4148        adapter->num_q_vectors = 0;
4149
4150        while (v_idx--)
4151                igc_free_q_vector(adapter, v_idx);
4152
4153        return -ENOMEM;
4154}
4155
4156/**
4157 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4158 * @adapter: Pointer to adapter structure
4159 * @msix: boolean for MSI-X capability
4160 *
4161 * This function initializes the interrupts and allocates all of the queues.
4162 */
4163static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4164{
4165        struct net_device *dev = adapter->netdev;
4166        int err = 0;
4167
4168        igc_set_interrupt_capability(adapter, msix);
4169
4170        err = igc_alloc_q_vectors(adapter);
4171        if (err) {
4172                netdev_err(dev, "Unable to allocate memory for vectors\n");
4173                goto err_alloc_q_vectors;
4174        }
4175
4176        igc_cache_ring_register(adapter);
4177
4178        return 0;
4179
4180err_alloc_q_vectors:
4181        igc_reset_interrupt_capability(adapter);
4182        return err;
4183}
4184
4185/**
4186 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4187 * @adapter: board private structure to initialize
4188 *
4189 * igc_sw_init initializes the Adapter private data structure.
4190 * Fields are initialized based on PCI device information and
4191 * OS network device settings (MTU size).
4192 */
4193static int igc_sw_init(struct igc_adapter *adapter)
4194{
4195        struct net_device *netdev = adapter->netdev;
4196        struct pci_dev *pdev = adapter->pdev;
4197        struct igc_hw *hw = &adapter->hw;
4198
4199        pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4200
4201        /* set default ring sizes */
4202        adapter->tx_ring_count = IGC_DEFAULT_TXD;
4203        adapter->rx_ring_count = IGC_DEFAULT_RXD;
4204
4205        /* set default ITR values */
4206        adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4207        adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4208
4209        /* set default work limits */
4210        adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4211
4212        /* adjust max frame to be at least the size of a standard frame */
4213        adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4214                                VLAN_HLEN;
4215        adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4216
4217        mutex_init(&adapter->nfc_rule_lock);
4218        INIT_LIST_HEAD(&adapter->nfc_rule_list);
4219        adapter->nfc_rule_count = 0;
4220
4221        spin_lock_init(&adapter->stats64_lock);
4222        /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4223        adapter->flags |= IGC_FLAG_HAS_MSIX;
4224
4225        igc_init_queue_configuration(adapter);
4226
4227        /* This call may decrease the number of queues */
4228        if (igc_init_interrupt_scheme(adapter, true)) {
4229                netdev_err(netdev, "Unable to allocate memory for queues\n");
4230                return -ENOMEM;
4231        }
4232
4233        /* Explicitly disable IRQ since the NIC can be in any state. */
4234        igc_irq_disable(adapter);
4235
4236        set_bit(__IGC_DOWN, &adapter->state);
4237
4238        return 0;
4239}
4240
4241/**
4242 * igc_up - Open the interface and prepare it to handle traffic
4243 * @adapter: board private structure
4244 */
4245void igc_up(struct igc_adapter *adapter)
4246{
4247        struct igc_hw *hw = &adapter->hw;
4248        int i = 0;
4249
4250        /* hardware has been reset, we need to reload some things */
4251        igc_configure(adapter);
4252
4253        clear_bit(__IGC_DOWN, &adapter->state);
4254
4255        for (i = 0; i < adapter->num_q_vectors; i++)
4256                napi_enable(&adapter->q_vector[i]->napi);
4257
4258        if (adapter->msix_entries)
4259                igc_configure_msix(adapter);
4260        else
4261                igc_assign_vector(adapter->q_vector[0], 0);
4262
4263        /* Clear any pending interrupts. */
4264        rd32(IGC_ICR);
4265        igc_irq_enable(adapter);
4266
4267        netif_tx_start_all_queues(adapter->netdev);
4268
4269        /* start the watchdog. */
4270        hw->mac.get_link_status = true;
4271        schedule_work(&adapter->watchdog_task);
4272}
4273
4274/**
4275 * igc_update_stats - Update the board statistics counters
4276 * @adapter: board private structure
4277 */
4278void igc_update_stats(struct igc_adapter *adapter)
4279{
4280        struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4281        struct pci_dev *pdev = adapter->pdev;
4282        struct igc_hw *hw = &adapter->hw;
4283        u64 _bytes, _packets;
4284        u64 bytes, packets;
4285        unsigned int start;
4286        u32 mpc;
4287        int i;
4288
4289        /* Prevent stats update while adapter is being reset, or if the pci
4290         * connection is down.
4291         */
4292        if (adapter->link_speed == 0)
4293                return;
4294        if (pci_channel_offline(pdev))
4295                return;
4296
4297        packets = 0;
4298        bytes = 0;
4299
4300        rcu_read_lock();
4301        for (i = 0; i < adapter->num_rx_queues; i++) {
4302                struct igc_ring *ring = adapter->rx_ring[i];
4303                u32 rqdpc = rd32(IGC_RQDPC(i));
4304
4305                if (hw->mac.type >= igc_i225)
4306                        wr32(IGC_RQDPC(i), 0);
4307
4308                if (rqdpc) {
4309                        ring->rx_stats.drops += rqdpc;
4310                        net_stats->rx_fifo_errors += rqdpc;
4311                }
4312
4313                do {
4314                        start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
4315                        _bytes = ring->rx_stats.bytes;
4316                        _packets = ring->rx_stats.packets;
4317                } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
4318                bytes += _bytes;
4319                packets += _packets;
4320        }
4321
4322        net_stats->rx_bytes = bytes;
4323        net_stats->rx_packets = packets;
4324
4325        packets = 0;
4326        bytes = 0;
4327        for (i = 0; i < adapter->num_tx_queues; i++) {
4328                struct igc_ring *ring = adapter->tx_ring[i];
4329
4330                do {
4331                        start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
4332                        _bytes = ring->tx_stats.bytes;
4333                        _packets = ring->tx_stats.packets;
4334                } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
4335                bytes += _bytes;
4336                packets += _packets;
4337        }
4338        net_stats->tx_bytes = bytes;
4339        net_stats->tx_packets = packets;
4340        rcu_read_unlock();
4341
4342        /* read stats registers */
4343        adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4344        adapter->stats.gprc += rd32(IGC_GPRC);
4345        adapter->stats.gorc += rd32(IGC_GORCL);
4346        rd32(IGC_GORCH); /* clear GORCL */
4347        adapter->stats.bprc += rd32(IGC_BPRC);
4348        adapter->stats.mprc += rd32(IGC_MPRC);
4349        adapter->stats.roc += rd32(IGC_ROC);
4350
4351        adapter->stats.prc64 += rd32(IGC_PRC64);
4352        adapter->stats.prc127 += rd32(IGC_PRC127);
4353        adapter->stats.prc255 += rd32(IGC_PRC255);
4354        adapter->stats.prc511 += rd32(IGC_PRC511);
4355        adapter->stats.prc1023 += rd32(IGC_PRC1023);
4356        adapter->stats.prc1522 += rd32(IGC_PRC1522);
4357        adapter->stats.tlpic += rd32(IGC_TLPIC);
4358        adapter->stats.rlpic += rd32(IGC_RLPIC);
4359        adapter->stats.hgptc += rd32(IGC_HGPTC);
4360
4361        mpc = rd32(IGC_MPC);
4362        adapter->stats.mpc += mpc;
4363        net_stats->rx_fifo_errors += mpc;
4364        adapter->stats.scc += rd32(IGC_SCC);
4365        adapter->stats.ecol += rd32(IGC_ECOL);
4366        adapter->stats.mcc += rd32(IGC_MCC);
4367        adapter->stats.latecol += rd32(IGC_LATECOL);
4368        adapter->stats.dc += rd32(IGC_DC);
4369        adapter->stats.rlec += rd32(IGC_RLEC);
4370        adapter->stats.xonrxc += rd32(IGC_XONRXC);
4371        adapter->stats.xontxc += rd32(IGC_XONTXC);
4372        adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4373        adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4374        adapter->stats.fcruc += rd32(IGC_FCRUC);
4375        adapter->stats.gptc += rd32(IGC_GPTC);
4376        adapter->stats.gotc += rd32(IGC_GOTCL);
4377        rd32(IGC_GOTCH); /* clear GOTCL */
4378        adapter->stats.rnbc += rd32(IGC_RNBC);
4379        adapter->stats.ruc += rd32(IGC_RUC);
4380        adapter->stats.rfc += rd32(IGC_RFC);
4381        adapter->stats.rjc += rd32(IGC_RJC);
4382        adapter->stats.tor += rd32(IGC_TORH);
4383        adapter->stats.tot += rd32(IGC_TOTH);
4384        adapter->stats.tpr += rd32(IGC_TPR);
4385
4386        adapter->stats.ptc64 += rd32(IGC_PTC64);
4387        adapter->stats.ptc127 += rd32(IGC_PTC127);
4388        adapter->stats.ptc255 += rd32(IGC_PTC255);
4389        adapter->stats.ptc511 += rd32(IGC_PTC511);
4390        adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4391        adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4392
4393        adapter->stats.mptc += rd32(IGC_MPTC);
4394        adapter->stats.bptc += rd32(IGC_BPTC);
4395
4396        adapter->stats.tpt += rd32(IGC_TPT);
4397        adapter->stats.colc += rd32(IGC_COLC);
4398        adapter->stats.colc += rd32(IGC_RERC);
4399
4400        adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4401
4402        adapter->stats.tsctc += rd32(IGC_TSCTC);
4403
4404        adapter->stats.iac += rd32(IGC_IAC);
4405
4406        /* Fill out the OS statistics structure */
4407        net_stats->multicast = adapter->stats.mprc;
4408        net_stats->collisions = adapter->stats.colc;
4409
4410        /* Rx Errors */
4411
4412        /* RLEC on some newer hardware can be incorrect so build
4413         * our own version based on RUC and ROC
4414         */
4415        net_stats->rx_errors = adapter->stats.rxerrc +
4416                adapter->stats.crcerrs + adapter->stats.algnerrc +
4417                adapter->stats.ruc + adapter->stats.roc +
4418                adapter->stats.cexterr;
4419        net_stats->rx_length_errors = adapter->stats.ruc +
4420                                      adapter->stats.roc;
4421        net_stats->rx_crc_errors = adapter->stats.crcerrs;
4422        net_stats->rx_frame_errors = adapter->stats.algnerrc;
4423        net_stats->rx_missed_errors = adapter->stats.mpc;
4424
4425        /* Tx Errors */
4426        net_stats->tx_errors = adapter->stats.ecol +
4427                               adapter->stats.latecol;
4428        net_stats->tx_aborted_errors = adapter->stats.ecol;
4429        net_stats->tx_window_errors = adapter->stats.latecol;
4430        net_stats->tx_carrier_errors = adapter->stats.tncrs;
4431
4432        /* Tx Dropped needs to be maintained elsewhere */
4433
4434        /* Management Stats */
4435        adapter->stats.mgptc += rd32(IGC_MGTPTC);
4436        adapter->stats.mgprc += rd32(IGC_MGTPRC);
4437        adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4438}
4439
4440/**
4441 * igc_down - Close the interface
4442 * @adapter: board private structure
4443 */
4444void igc_down(struct igc_adapter *adapter)
4445{
4446        struct net_device *netdev = adapter->netdev;
4447        struct igc_hw *hw = &adapter->hw;
4448        u32 tctl, rctl;
4449        int i = 0;
4450
4451        set_bit(__IGC_DOWN, &adapter->state);
4452
4453        igc_ptp_suspend(adapter);
4454
4455        if (pci_device_is_present(adapter->pdev)) {
4456                /* disable receives in the hardware */
4457                rctl = rd32(IGC_RCTL);
4458                wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
4459                /* flush and sleep below */
4460        }
4461        /* set trans_start so we don't get spurious watchdogs during reset */
4462        netif_trans_update(netdev);
4463
4464        netif_carrier_off(netdev);
4465        netif_tx_stop_all_queues(netdev);
4466
4467        if (pci_device_is_present(adapter->pdev)) {
4468                /* disable transmits in the hardware */
4469                tctl = rd32(IGC_TCTL);
4470                tctl &= ~IGC_TCTL_EN;
4471                wr32(IGC_TCTL, tctl);
4472                /* flush both disables and wait for them to finish */
4473                wrfl();
4474                usleep_range(10000, 20000);
4475
4476                igc_irq_disable(adapter);
4477        }
4478
4479        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4480
4481        for (i = 0; i < adapter->num_q_vectors; i++) {
4482                if (adapter->q_vector[i]) {
4483                        napi_synchronize(&adapter->q_vector[i]->napi);
4484                        napi_disable(&adapter->q_vector[i]->napi);
4485                }
4486        }
4487
4488        del_timer_sync(&adapter->watchdog_timer);
4489        del_timer_sync(&adapter->phy_info_timer);
4490
4491        /* record the stats before reset*/
4492        spin_lock(&adapter->stats64_lock);
4493        igc_update_stats(adapter);
4494        spin_unlock(&adapter->stats64_lock);
4495
4496        adapter->link_speed = 0;
4497        adapter->link_duplex = 0;
4498
4499        if (!pci_channel_offline(adapter->pdev))
4500                igc_reset(adapter);
4501
4502        /* clear VLAN promisc flag so VFTA will be updated if necessary */
4503        adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
4504
4505        igc_clean_all_tx_rings(adapter);
4506        igc_clean_all_rx_rings(adapter);
4507}
4508
4509void igc_reinit_locked(struct igc_adapter *adapter)
4510{
4511        while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4512                usleep_range(1000, 2000);
4513        igc_down(adapter);
4514        igc_up(adapter);
4515        clear_bit(__IGC_RESETTING, &adapter->state);
4516}
4517
4518static void igc_reset_task(struct work_struct *work)
4519{
4520        struct igc_adapter *adapter;
4521
4522        adapter = container_of(work, struct igc_adapter, reset_task);
4523
4524        rtnl_lock();
4525        /* If we're already down or resetting, just bail */
4526        if (test_bit(__IGC_DOWN, &adapter->state) ||
4527            test_bit(__IGC_RESETTING, &adapter->state)) {
4528                rtnl_unlock();
4529                return;
4530        }
4531
4532        igc_rings_dump(adapter);
4533        igc_regs_dump(adapter);
4534        netdev_err(adapter->netdev, "Reset adapter\n");
4535        igc_reinit_locked(adapter);
4536        rtnl_unlock();
4537}
4538
4539/**
4540 * igc_change_mtu - Change the Maximum Transfer Unit
4541 * @netdev: network interface device structure
4542 * @new_mtu: new value for maximum frame size
4543 *
4544 * Returns 0 on success, negative on failure
4545 */
4546static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4547{
4548        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4549        struct igc_adapter *adapter = netdev_priv(netdev);
4550
4551        if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
4552                netdev_dbg(netdev, "Jumbo frames not supported with XDP");
4553                return -EINVAL;
4554        }
4555
4556        /* adjust max frame to be at least the size of a standard frame */
4557        if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4558                max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4559
4560        while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4561                usleep_range(1000, 2000);
4562
4563        /* igc_down has a dependency on max_frame_size */
4564        adapter->max_frame_size = max_frame;
4565
4566        if (netif_running(netdev))
4567                igc_down(adapter);
4568
4569        netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4570        netdev->mtu = new_mtu;
4571
4572        if (netif_running(netdev))
4573                igc_up(adapter);
4574        else
4575                igc_reset(adapter);
4576
4577        clear_bit(__IGC_RESETTING, &adapter->state);
4578
4579        return 0;
4580}
4581
4582/**
4583 * igc_get_stats64 - Get System Network Statistics
4584 * @netdev: network interface device structure
4585 * @stats: rtnl_link_stats64 pointer
4586 *
4587 * Returns the address of the device statistics structure.
4588 * The statistics are updated here and also from the timer callback.
4589 */
4590static void igc_get_stats64(struct net_device *netdev,
4591                            struct rtnl_link_stats64 *stats)
4592{
4593        struct igc_adapter *adapter = netdev_priv(netdev);
4594
4595        spin_lock(&adapter->stats64_lock);
4596        if (!test_bit(__IGC_RESETTING, &adapter->state))
4597                igc_update_stats(adapter);
4598        memcpy(stats, &adapter->stats64, sizeof(*stats));
4599        spin_unlock(&adapter->stats64_lock);
4600}
4601
4602static netdev_features_t igc_fix_features(struct net_device *netdev,
4603                                          netdev_features_t features)
4604{
4605        /* Since there is no support for separate Rx/Tx vlan accel
4606         * enable/disable make sure Tx flag is always in same state as Rx.
4607         */
4608        if (features & NETIF_F_HW_VLAN_CTAG_RX)
4609                features |= NETIF_F_HW_VLAN_CTAG_TX;
4610        else
4611                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4612
4613        return features;
4614}
4615
4616static int igc_set_features(struct net_device *netdev,
4617                            netdev_features_t features)
4618{
4619        netdev_features_t changed = netdev->features ^ features;
4620        struct igc_adapter *adapter = netdev_priv(netdev);
4621
4622        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
4623                igc_vlan_mode(netdev, features);
4624
4625        /* Add VLAN support */
4626        if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
4627                return 0;
4628
4629        if (!(features & NETIF_F_NTUPLE))
4630                igc_flush_nfc_rules(adapter);
4631
4632        netdev->features = features;
4633
4634        if (netif_running(netdev))
4635                igc_reinit_locked(adapter);
4636        else
4637                igc_reset(adapter);
4638
4639        return 1;
4640}
4641
4642static netdev_features_t
4643igc_features_check(struct sk_buff *skb, struct net_device *dev,
4644                   netdev_features_t features)
4645{
4646        unsigned int network_hdr_len, mac_hdr_len;
4647
4648        /* Make certain the headers can be described by a context descriptor */
4649        mac_hdr_len = skb_network_header(skb) - skb->data;
4650        if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
4651                return features & ~(NETIF_F_HW_CSUM |
4652                                    NETIF_F_SCTP_CRC |
4653                                    NETIF_F_HW_VLAN_CTAG_TX |
4654                                    NETIF_F_TSO |
4655                                    NETIF_F_TSO6);
4656
4657        network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4658        if (unlikely(network_hdr_len >  IGC_MAX_NETWORK_HDR_LEN))
4659                return features & ~(NETIF_F_HW_CSUM |
4660                                    NETIF_F_SCTP_CRC |
4661                                    NETIF_F_TSO |
4662                                    NETIF_F_TSO6);
4663
4664        /* We can only support IPv4 TSO in tunnels if we can mangle the
4665         * inner IP ID field, so strip TSO if MANGLEID is not supported.
4666         */
4667        if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4668                features &= ~NETIF_F_TSO;
4669
4670        return features;
4671}
4672
4673static void igc_tsync_interrupt(struct igc_adapter *adapter)
4674{
4675        u32 ack, tsauxc, sec, nsec, tsicr;
4676        struct igc_hw *hw = &adapter->hw;
4677        struct ptp_clock_event event;
4678        struct timespec64 ts;
4679
4680        tsicr = rd32(IGC_TSICR);
4681        ack = 0;
4682
4683        if (tsicr & IGC_TSICR_SYS_WRAP) {
4684                event.type = PTP_CLOCK_PPS;
4685                if (adapter->ptp_caps.pps)
4686                        ptp_clock_event(adapter->ptp_clock, &event);
4687                ack |= IGC_TSICR_SYS_WRAP;
4688        }
4689
4690        if (tsicr & IGC_TSICR_TXTS) {
4691                /* retrieve hardware timestamp */
4692                schedule_work(&adapter->ptp_tx_work);
4693                ack |= IGC_TSICR_TXTS;
4694        }
4695
4696        if (tsicr & IGC_TSICR_TT0) {
4697                spin_lock(&adapter->tmreg_lock);
4698                ts = timespec64_add(adapter->perout[0].start,
4699                                    adapter->perout[0].period);
4700                wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4701                wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
4702                tsauxc = rd32(IGC_TSAUXC);
4703                tsauxc |= IGC_TSAUXC_EN_TT0;
4704                wr32(IGC_TSAUXC, tsauxc);
4705                adapter->perout[0].start = ts;
4706                spin_unlock(&adapter->tmreg_lock);
4707                ack |= IGC_TSICR_TT0;
4708        }
4709
4710        if (tsicr & IGC_TSICR_TT1) {
4711                spin_lock(&adapter->tmreg_lock);
4712                ts = timespec64_add(adapter->perout[1].start,
4713                                    adapter->perout[1].period);
4714                wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4715                wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
4716                tsauxc = rd32(IGC_TSAUXC);
4717                tsauxc |= IGC_TSAUXC_EN_TT1;
4718                wr32(IGC_TSAUXC, tsauxc);
4719                adapter->perout[1].start = ts;
4720                spin_unlock(&adapter->tmreg_lock);
4721                ack |= IGC_TSICR_TT1;
4722        }
4723
4724        if (tsicr & IGC_TSICR_AUTT0) {
4725                nsec = rd32(IGC_AUXSTMPL0);
4726                sec  = rd32(IGC_AUXSTMPH0);
4727                event.type = PTP_CLOCK_EXTTS;
4728                event.index = 0;
4729                event.timestamp = sec * NSEC_PER_SEC + nsec;
4730                ptp_clock_event(adapter->ptp_clock, &event);
4731                ack |= IGC_TSICR_AUTT0;
4732        }
4733
4734        if (tsicr & IGC_TSICR_AUTT1) {
4735                nsec = rd32(IGC_AUXSTMPL1);
4736                sec  = rd32(IGC_AUXSTMPH1);
4737                event.type = PTP_CLOCK_EXTTS;
4738                event.index = 1;
4739                event.timestamp = sec * NSEC_PER_SEC + nsec;
4740                ptp_clock_event(adapter->ptp_clock, &event);
4741                ack |= IGC_TSICR_AUTT1;
4742        }
4743
4744        /* acknowledge the interrupts */
4745        wr32(IGC_TSICR, ack);
4746}
4747
4748/**
4749 * igc_msix_other - msix other interrupt handler
4750 * @irq: interrupt number
4751 * @data: pointer to a q_vector
4752 */
4753static irqreturn_t igc_msix_other(int irq, void *data)
4754{
4755        struct igc_adapter *adapter = data;
4756        struct igc_hw *hw = &adapter->hw;
4757        u32 icr = rd32(IGC_ICR);
4758
4759        /* reading ICR causes bit 31 of EICR to be cleared */
4760        if (icr & IGC_ICR_DRSTA)
4761                schedule_work(&adapter->reset_task);
4762
4763        if (icr & IGC_ICR_DOUTSYNC) {
4764                /* HW is reporting DMA is out of sync */
4765                adapter->stats.doosync++;
4766        }
4767
4768        if (icr & IGC_ICR_LSC) {
4769                hw->mac.get_link_status = true;
4770                /* guard against interrupt when we're going down */
4771                if (!test_bit(__IGC_DOWN, &adapter->state))
4772                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
4773        }
4774
4775        if (icr & IGC_ICR_TS)
4776                igc_tsync_interrupt(adapter);
4777
4778        wr32(IGC_EIMS, adapter->eims_other);
4779
4780        return IRQ_HANDLED;
4781}
4782
4783static void igc_write_itr(struct igc_q_vector *q_vector)
4784{
4785        u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4786
4787        if (!q_vector->set_itr)
4788                return;
4789
4790        if (!itr_val)
4791                itr_val = IGC_ITR_VAL_MASK;
4792
4793        itr_val |= IGC_EITR_CNT_IGNR;
4794
4795        writel(itr_val, q_vector->itr_register);
4796        q_vector->set_itr = 0;
4797}
4798
4799static irqreturn_t igc_msix_ring(int irq, void *data)
4800{
4801        struct igc_q_vector *q_vector = data;
4802
4803        /* Write the ITR value calculated from the previous interrupt. */
4804        igc_write_itr(q_vector);
4805
4806        napi_schedule(&q_vector->napi);
4807
4808        return IRQ_HANDLED;
4809}
4810
4811/**
4812 * igc_request_msix - Initialize MSI-X interrupts
4813 * @adapter: Pointer to adapter structure
4814 *
4815 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4816 * kernel.
4817 */
4818static int igc_request_msix(struct igc_adapter *adapter)
4819{
4820        int i = 0, err = 0, vector = 0, free_vector = 0;
4821        struct net_device *netdev = adapter->netdev;
4822
4823        err = request_irq(adapter->msix_entries[vector].vector,
4824                          &igc_msix_other, 0, netdev->name, adapter);
4825        if (err)
4826                goto err_out;
4827
4828        for (i = 0; i < adapter->num_q_vectors; i++) {
4829                struct igc_q_vector *q_vector = adapter->q_vector[i];
4830
4831                vector++;
4832
4833                q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4834
4835                if (q_vector->rx.ring && q_vector->tx.ring)
4836                        sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4837                                q_vector->rx.ring->queue_index);
4838                else if (q_vector->tx.ring)
4839                        sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4840                                q_vector->tx.ring->queue_index);
4841                else if (q_vector->rx.ring)
4842                        sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4843                                q_vector->rx.ring->queue_index);
4844                else
4845                        sprintf(q_vector->name, "%s-unused", netdev->name);
4846
4847                err = request_irq(adapter->msix_entries[vector].vector,
4848                                  igc_msix_ring, 0, q_vector->name,
4849                                  q_vector);
4850                if (err)
4851                        goto err_free;
4852        }
4853
4854        igc_configure_msix(adapter);
4855        return 0;
4856
4857err_free:
4858        /* free already assigned IRQs */
4859        free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4860
4861        vector--;
4862        for (i = 0; i < vector; i++) {
4863                free_irq(adapter->msix_entries[free_vector++].vector,
4864                         adapter->q_vector[i]);
4865        }
4866err_out:
4867        return err;
4868}
4869
4870/**
4871 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4872 * @adapter: Pointer to adapter structure
4873 *
4874 * This function resets the device so that it has 0 rx queues, tx queues, and
4875 * MSI-X interrupts allocated.
4876 */
4877static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4878{
4879        igc_free_q_vectors(adapter);
4880        igc_reset_interrupt_capability(adapter);
4881}
4882
4883/* Need to wait a few seconds after link up to get diagnostic information from
4884 * the phy
4885 */
4886static void igc_update_phy_info(struct timer_list *t)
4887{
4888        struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4889
4890        igc_get_phy_info(&adapter->hw);
4891}
4892
4893/**
4894 * igc_has_link - check shared code for link and determine up/down
4895 * @adapter: pointer to driver private info
4896 */
4897bool igc_has_link(struct igc_adapter *adapter)
4898{
4899        struct igc_hw *hw = &adapter->hw;
4900        bool link_active = false;
4901
4902        /* get_link_status is set on LSC (link status) interrupt or
4903         * rx sequence error interrupt.  get_link_status will stay
4904         * false until the igc_check_for_link establishes link
4905         * for copper adapters ONLY
4906         */
4907        switch (hw->phy.media_type) {
4908        case igc_media_type_copper:
4909                if (!hw->mac.get_link_status)
4910                        return true;
4911                hw->mac.ops.check_for_link(hw);
4912                link_active = !hw->mac.get_link_status;
4913                break;
4914        default:
4915        case igc_media_type_unknown:
4916                break;
4917        }
4918
4919        if (hw->mac.type == igc_i225 &&
4920            hw->phy.id == I225_I_PHY_ID) {
4921                if (!netif_carrier_ok(adapter->netdev)) {
4922                        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4923                } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4924                        adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4925                        adapter->link_check_timeout = jiffies;
4926                }
4927        }
4928
4929        return link_active;
4930}
4931
4932/**
4933 * igc_watchdog - Timer Call-back
4934 * @t: timer for the watchdog
4935 */
4936static void igc_watchdog(struct timer_list *t)
4937{
4938        struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4939        /* Do the rest outside of interrupt context */
4940        schedule_work(&adapter->watchdog_task);
4941}
4942
4943static void igc_watchdog_task(struct work_struct *work)
4944{
4945        struct igc_adapter *adapter = container_of(work,
4946                                                   struct igc_adapter,
4947                                                   watchdog_task);
4948        struct net_device *netdev = adapter->netdev;
4949        struct igc_hw *hw = &adapter->hw;
4950        struct igc_phy_info *phy = &hw->phy;
4951        u16 phy_data, retry_count = 20;
4952        u32 link;
4953        int i;
4954
4955        link = igc_has_link(adapter);
4956
4957        if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4958                if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4959                        adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4960                else
4961                        link = false;
4962        }
4963
4964        if (link) {
4965                /* Cancel scheduled suspend requests. */
4966                pm_runtime_resume(netdev->dev.parent);
4967
4968                if (!netif_carrier_ok(netdev)) {
4969                        u32 ctrl;
4970
4971                        hw->mac.ops.get_speed_and_duplex(hw,
4972                                                         &adapter->link_speed,
4973                                                         &adapter->link_duplex);
4974
4975                        ctrl = rd32(IGC_CTRL);
4976                        /* Link status message must follow this format */
4977                        netdev_info(netdev,
4978                                    "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4979                                    adapter->link_speed,
4980                                    adapter->link_duplex == FULL_DUPLEX ?
4981                                    "Full" : "Half",
4982                                    (ctrl & IGC_CTRL_TFCE) &&
4983                                    (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4984                                    (ctrl & IGC_CTRL_RFCE) ?  "RX" :
4985                                    (ctrl & IGC_CTRL_TFCE) ?  "TX" : "None");
4986
4987                        /* disable EEE if enabled */
4988                        if ((adapter->flags & IGC_FLAG_EEE) &&
4989                            adapter->link_duplex == HALF_DUPLEX) {
4990                                netdev_info(netdev,
4991                                            "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4992                                adapter->hw.dev_spec._base.eee_enable = false;
4993                                adapter->flags &= ~IGC_FLAG_EEE;
4994                        }
4995
4996                        /* check if SmartSpeed worked */
4997                        igc_check_downshift(hw);
4998                        if (phy->speed_downgraded)
4999                                netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5000
5001                        /* adjust timeout factor according to speed/duplex */
5002                        adapter->tx_timeout_factor = 1;
5003                        switch (adapter->link_speed) {
5004                        case SPEED_10:
5005                                adapter->tx_timeout_factor = 14;
5006                                break;
5007                        case SPEED_100:
5008                                /* maybe add some timeout factor ? */
5009                                break;
5010                        }
5011
5012                        if (adapter->link_speed != SPEED_1000)
5013                                goto no_wait;
5014
5015                        /* wait for Remote receiver status OK */
5016retry_read_status:
5017                        if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5018                                              &phy_data)) {
5019                                if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5020                                    retry_count) {
5021                                        msleep(100);
5022                                        retry_count--;
5023                                        goto retry_read_status;
5024                                } else if (!retry_count) {
5025                                        netdev_err(netdev, "exceed max 2 second\n");
5026                                }
5027                        } else {
5028                                netdev_err(netdev, "read 1000Base-T Status Reg\n");
5029                        }
5030no_wait:
5031                        netif_carrier_on(netdev);
5032
5033                        /* link state has changed, schedule phy info update */
5034                        if (!test_bit(__IGC_DOWN, &adapter->state))
5035                                mod_timer(&adapter->phy_info_timer,
5036                                          round_jiffies(jiffies + 2 * HZ));
5037                }
5038        } else {
5039                if (netif_carrier_ok(netdev)) {
5040                        adapter->link_speed = 0;
5041                        adapter->link_duplex = 0;
5042
5043                        /* Links status message must follow this format */
5044                        netdev_info(netdev, "NIC Link is Down\n");
5045                        netif_carrier_off(netdev);
5046
5047                        /* link state has changed, schedule phy info update */
5048                        if (!test_bit(__IGC_DOWN, &adapter->state))
5049                                mod_timer(&adapter->phy_info_timer,
5050                                          round_jiffies(jiffies + 2 * HZ));
5051
5052                        /* link is down, time to check for alternate media */
5053                        if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
5054                                if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5055                                        schedule_work(&adapter->reset_task);
5056                                        /* return immediately */
5057                                        return;
5058                                }
5059                        }
5060                        pm_schedule_suspend(netdev->dev.parent,
5061                                            MSEC_PER_SEC * 5);
5062
5063                /* also check for alternate media here */
5064                } else if (!netif_carrier_ok(netdev) &&
5065                           (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
5066                        if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5067                                schedule_work(&adapter->reset_task);
5068                                /* return immediately */
5069                                return;
5070                        }
5071                }
5072        }
5073
5074        spin_lock(&adapter->stats64_lock);
5075        igc_update_stats(adapter);
5076        spin_unlock(&adapter->stats64_lock);
5077
5078        for (i = 0; i < adapter->num_tx_queues; i++) {
5079                struct igc_ring *tx_ring = adapter->tx_ring[i];
5080
5081                if (!netif_carrier_ok(netdev)) {
5082                        /* We've lost link, so the controller stops DMA,
5083                         * but we've got queued Tx work that's never going
5084                         * to get done, so reset controller to flush Tx.
5085                         * (Do the reset outside of interrupt context).
5086                         */
5087                        if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5088                                adapter->tx_timeout_count++;
5089                                schedule_work(&adapter->reset_task);
5090                                /* return immediately since reset is imminent */
5091                                return;
5092                        }
5093                }
5094
5095                /* Force detection of hung controller every watchdog period */
5096                set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5097        }
5098
5099        /* Cause software interrupt to ensure Rx ring is cleaned */
5100        if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5101                u32 eics = 0;
5102
5103                for (i = 0; i < adapter->num_q_vectors; i++)
5104                        eics |= adapter->q_vector[i]->eims_value;
5105                wr32(IGC_EICS, eics);
5106        } else {
5107                wr32(IGC_ICS, IGC_ICS_RXDMT0);
5108        }
5109
5110        igc_ptp_tx_hang(adapter);
5111
5112        /* Reset the timer */
5113        if (!test_bit(__IGC_DOWN, &adapter->state)) {
5114                if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5115                        mod_timer(&adapter->watchdog_timer,
5116                                  round_jiffies(jiffies +  HZ));
5117                else
5118                        mod_timer(&adapter->watchdog_timer,
5119                                  round_jiffies(jiffies + 2 * HZ));
5120        }
5121}
5122
5123/**
5124 * igc_intr_msi - Interrupt Handler
5125 * @irq: interrupt number
5126 * @data: pointer to a network interface device structure
5127 */
5128static irqreturn_t igc_intr_msi(int irq, void *data)
5129{
5130        struct igc_adapter *adapter = data;
5131        struct igc_q_vector *q_vector = adapter->q_vector[0];
5132        struct igc_hw *hw = &adapter->hw;
5133        /* read ICR disables interrupts using IAM */
5134        u32 icr = rd32(IGC_ICR);
5135
5136        igc_write_itr(q_vector);
5137
5138        if (icr & IGC_ICR_DRSTA)
5139                schedule_work(&adapter->reset_task);
5140
5141        if (icr & IGC_ICR_DOUTSYNC) {
5142                /* HW is reporting DMA is out of sync */
5143                adapter->stats.doosync++;
5144        }
5145
5146        if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5147                hw->mac.get_link_status = true;
5148                if (!test_bit(__IGC_DOWN, &adapter->state))
5149                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
5150        }
5151
5152        napi_schedule(&q_vector->napi);
5153
5154        return IRQ_HANDLED;
5155}
5156
5157/**
5158 * igc_intr - Legacy Interrupt Handler
5159 * @irq: interrupt number
5160 * @data: pointer to a network interface device structure
5161 */
5162static irqreturn_t igc_intr(int irq, void *data)
5163{
5164        struct igc_adapter *adapter = data;
5165        struct igc_q_vector *q_vector = adapter->q_vector[0];
5166        struct igc_hw *hw = &adapter->hw;
5167        /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
5168         * need for the IMC write
5169         */
5170        u32 icr = rd32(IGC_ICR);
5171
5172        /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5173         * not set, then the adapter didn't send an interrupt
5174         */
5175        if (!(icr & IGC_ICR_INT_ASSERTED))
5176                return IRQ_NONE;
5177
5178        igc_write_itr(q_vector);
5179
5180        if (icr & IGC_ICR_DRSTA)
5181                schedule_work(&adapter->reset_task);
5182
5183        if (icr & IGC_ICR_DOUTSYNC) {
5184                /* HW is reporting DMA is out of sync */
5185                adapter->stats.doosync++;
5186        }
5187
5188        if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5189                hw->mac.get_link_status = true;
5190                /* guard against interrupt when we're going down */
5191                if (!test_bit(__IGC_DOWN, &adapter->state))
5192                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
5193        }
5194
5195        napi_schedule(&q_vector->napi);
5196
5197        return IRQ_HANDLED;
5198}
5199
5200static void igc_free_irq(struct igc_adapter *adapter)
5201{
5202        if (adapter->msix_entries) {
5203                int vector = 0, i;
5204
5205                free_irq(adapter->msix_entries[vector++].vector, adapter);
5206
5207                for (i = 0; i < adapter->num_q_vectors; i++)
5208                        free_irq(adapter->msix_entries[vector++].vector,
5209                                 adapter->q_vector[i]);
5210        } else {
5211                free_irq(adapter->pdev->irq, adapter);
5212        }
5213}
5214
5215/**
5216 * igc_request_irq - initialize interrupts
5217 * @adapter: Pointer to adapter structure
5218 *
5219 * Attempts to configure interrupts using the best available
5220 * capabilities of the hardware and kernel.
5221 */
5222static int igc_request_irq(struct igc_adapter *adapter)
5223{
5224        struct net_device *netdev = adapter->netdev;
5225        struct pci_dev *pdev = adapter->pdev;
5226        int err = 0;
5227
5228        if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5229                err = igc_request_msix(adapter);
5230                if (!err)
5231                        goto request_done;
5232                /* fall back to MSI */
5233                igc_free_all_tx_resources(adapter);
5234                igc_free_all_rx_resources(adapter);
5235
5236                igc_clear_interrupt_scheme(adapter);
5237                err = igc_init_interrupt_scheme(adapter, false);
5238                if (err)
5239                        goto request_done;
5240                igc_setup_all_tx_resources(adapter);
5241                igc_setup_all_rx_resources(adapter);
5242                igc_configure(adapter);
5243        }
5244
5245        igc_assign_vector(adapter->q_vector[0], 0);
5246
5247        if (adapter->flags & IGC_FLAG_HAS_MSI) {
5248                err = request_irq(pdev->irq, &igc_intr_msi, 0,
5249                                  netdev->name, adapter);
5250                if (!err)
5251                        goto request_done;
5252
5253                /* fall back to legacy interrupts */
5254                igc_reset_interrupt_capability(adapter);
5255                adapter->flags &= ~IGC_FLAG_HAS_MSI;
5256        }
5257
5258        err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5259                          netdev->name, adapter);
5260
5261        if (err)
5262                netdev_err(netdev, "Error %d getting interrupt\n", err);
5263
5264request_done:
5265        return err;
5266}
5267
5268/**
5269 * __igc_open - Called when a network interface is made active
5270 * @netdev: network interface device structure
5271 * @resuming: boolean indicating if the device is resuming
5272 *
5273 * Returns 0 on success, negative value on failure
5274 *
5275 * The open entry point is called when a network interface is made
5276 * active by the system (IFF_UP).  At this point all resources needed
5277 * for transmit and receive operations are allocated, the interrupt
5278 * handler is registered with the OS, the watchdog timer is started,
5279 * and the stack is notified that the interface is ready.
5280 */
5281static int __igc_open(struct net_device *netdev, bool resuming)
5282{
5283        struct igc_adapter *adapter = netdev_priv(netdev);
5284        struct pci_dev *pdev = adapter->pdev;
5285        struct igc_hw *hw = &adapter->hw;
5286        int err = 0;
5287        int i = 0;
5288
5289        /* disallow open during test */
5290
5291        if (test_bit(__IGC_TESTING, &adapter->state)) {
5292                WARN_ON(resuming);
5293                return -EBUSY;
5294        }
5295
5296        if (!resuming)
5297                pm_runtime_get_sync(&pdev->dev);
5298
5299        netif_carrier_off(netdev);
5300
5301        /* allocate transmit descriptors */
5302        err = igc_setup_all_tx_resources(adapter);
5303        if (err)
5304                goto err_setup_tx;
5305
5306        /* allocate receive descriptors */
5307        err = igc_setup_all_rx_resources(adapter);
5308        if (err)
5309                goto err_setup_rx;
5310
5311        igc_power_up_link(adapter);
5312
5313        igc_configure(adapter);
5314
5315        err = igc_request_irq(adapter);
5316        if (err)
5317                goto err_req_irq;
5318
5319        /* Notify the stack of the actual queue counts. */
5320        err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5321        if (err)
5322                goto err_set_queues;
5323
5324        err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5325        if (err)
5326                goto err_set_queues;
5327
5328        clear_bit(__IGC_DOWN, &adapter->state);
5329
5330        for (i = 0; i < adapter->num_q_vectors; i++)
5331                napi_enable(&adapter->q_vector[i]->napi);
5332
5333        /* Clear any pending interrupts. */
5334        rd32(IGC_ICR);
5335        igc_irq_enable(adapter);
5336
5337        if (!resuming)
5338                pm_runtime_put(&pdev->dev);
5339
5340        netif_tx_start_all_queues(netdev);
5341
5342        /* start the watchdog. */
5343        hw->mac.get_link_status = true;
5344        schedule_work(&adapter->watchdog_task);
5345
5346        return IGC_SUCCESS;
5347
5348err_set_queues:
5349        igc_free_irq(adapter);
5350err_req_irq:
5351        igc_release_hw_control(adapter);
5352        igc_power_down_phy_copper_base(&adapter->hw);
5353        igc_free_all_rx_resources(adapter);
5354err_setup_rx:
5355        igc_free_all_tx_resources(adapter);
5356err_setup_tx:
5357        igc_reset(adapter);
5358        if (!resuming)
5359                pm_runtime_put(&pdev->dev);
5360
5361        return err;
5362}
5363
5364int igc_open(struct net_device *netdev)
5365{
5366        return __igc_open(netdev, false);
5367}
5368
5369/**
5370 * __igc_close - Disables a network interface
5371 * @netdev: network interface device structure
5372 * @suspending: boolean indicating the device is suspending
5373 *
5374 * Returns 0, this is not allowed to fail
5375 *
5376 * The close entry point is called when an interface is de-activated
5377 * by the OS.  The hardware is still under the driver's control, but
5378 * needs to be disabled.  A global MAC reset is issued to stop the
5379 * hardware, and all transmit and receive resources are freed.
5380 */
5381static int __igc_close(struct net_device *netdev, bool suspending)
5382{
5383        struct igc_adapter *adapter = netdev_priv(netdev);
5384        struct pci_dev *pdev = adapter->pdev;
5385
5386        WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5387
5388        if (!suspending)
5389                pm_runtime_get_sync(&pdev->dev);
5390
5391        igc_down(adapter);
5392
5393        igc_release_hw_control(adapter);
5394
5395        igc_free_irq(adapter);
5396
5397        igc_free_all_tx_resources(adapter);
5398        igc_free_all_rx_resources(adapter);
5399
5400        if (!suspending)
5401                pm_runtime_put_sync(&pdev->dev);
5402
5403        return 0;
5404}
5405
5406int igc_close(struct net_device *netdev)
5407{
5408        if (netif_device_present(netdev) || netdev->dismantle)
5409                return __igc_close(netdev, false);
5410        return 0;
5411}
5412
5413/**
5414 * igc_ioctl - Access the hwtstamp interface
5415 * @netdev: network interface device structure
5416 * @ifr: interface request data
5417 * @cmd: ioctl command
5418 **/
5419static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5420{
5421        switch (cmd) {
5422        case SIOCGHWTSTAMP:
5423                return igc_ptp_get_ts_config(netdev, ifr);
5424        case SIOCSHWTSTAMP:
5425                return igc_ptp_set_ts_config(netdev, ifr);
5426        default:
5427                return -EOPNOTSUPP;
5428        }
5429}
5430
5431static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5432                                      bool enable)
5433{
5434        struct igc_ring *ring;
5435        int i;
5436
5437        if (queue < 0 || queue >= adapter->num_tx_queues)
5438                return -EINVAL;
5439
5440        ring = adapter->tx_ring[queue];
5441        ring->launchtime_enable = enable;
5442
5443        if (adapter->base_time)
5444                return 0;
5445
5446        adapter->cycle_time = NSEC_PER_SEC;
5447
5448        for (i = 0; i < adapter->num_tx_queues; i++) {
5449                ring = adapter->tx_ring[i];
5450                ring->start_time = 0;
5451                ring->end_time = NSEC_PER_SEC;
5452        }
5453
5454        return 0;
5455}
5456
5457static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
5458{
5459        struct timespec64 b;
5460
5461        b = ktime_to_timespec64(base_time);
5462
5463        return timespec64_compare(now, &b) > 0;
5464}
5465
5466static bool validate_schedule(struct igc_adapter *adapter,
5467                              const struct tc_taprio_qopt_offload *qopt)
5468{
5469        int queue_uses[IGC_MAX_TX_QUEUES] = { };
5470        struct timespec64 now;
5471        size_t n;
5472
5473        if (qopt->cycle_time_extension)
5474                return false;
5475
5476        igc_ptp_read(adapter, &now);
5477
5478        /* If we program the controller's BASET registers with a time
5479         * in the future, it will hold all the packets until that
5480         * time, causing a lot of TX Hangs, so to avoid that, we
5481         * reject schedules that would start in the future.
5482         */
5483        if (!is_base_time_past(qopt->base_time, &now))
5484                return false;
5485
5486        for (n = 0; n < qopt->num_entries; n++) {
5487                const struct tc_taprio_sched_entry *e;
5488                int i;
5489
5490                e = &qopt->entries[n];
5491
5492                /* i225 only supports "global" frame preemption
5493                 * settings.
5494                 */
5495                if (e->command != TC_TAPRIO_CMD_SET_GATES)
5496                        return false;
5497
5498                for (i = 0; i < adapter->num_tx_queues; i++) {
5499                        if (e->gate_mask & BIT(i))
5500                                queue_uses[i]++;
5501
5502                        if (queue_uses[i] > 1)
5503                                return false;
5504                }
5505        }
5506
5507        return true;
5508}
5509
5510static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
5511                                     struct tc_etf_qopt_offload *qopt)
5512{
5513        struct igc_hw *hw = &adapter->hw;
5514        int err;
5515
5516        if (hw->mac.type != igc_i225)
5517                return -EOPNOTSUPP;
5518
5519        err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
5520        if (err)
5521                return err;
5522
5523        return igc_tsn_offload_apply(adapter);
5524}
5525
5526static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5527                                 struct tc_taprio_qopt_offload *qopt)
5528{
5529        u32 start_time = 0, end_time = 0;
5530        size_t n;
5531
5532        if (!qopt->enable) {
5533                adapter->base_time = 0;
5534                return 0;
5535        }
5536
5537        if (adapter->base_time)
5538                return -EALREADY;
5539
5540        if (!validate_schedule(adapter, qopt))
5541                return -EINVAL;
5542
5543        adapter->cycle_time = qopt->cycle_time;
5544        adapter->base_time = qopt->base_time;
5545
5546        /* FIXME: be a little smarter about cases when the gate for a
5547         * queue stays open for more than one entry.
5548         */
5549        for (n = 0; n < qopt->num_entries; n++) {
5550                struct tc_taprio_sched_entry *e = &qopt->entries[n];
5551                int i;
5552
5553                end_time += e->interval;
5554
5555                for (i = 0; i < adapter->num_tx_queues; i++) {
5556                        struct igc_ring *ring = adapter->tx_ring[i];
5557
5558                        if (!(e->gate_mask & BIT(i)))
5559                                continue;
5560
5561                        ring->start_time = start_time;
5562                        ring->end_time = end_time;
5563                }
5564
5565                start_time += e->interval;
5566        }
5567
5568        return 0;
5569}
5570
5571static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5572                                         struct tc_taprio_qopt_offload *qopt)
5573{
5574        struct igc_hw *hw = &adapter->hw;
5575        int err;
5576
5577        if (hw->mac.type != igc_i225)
5578                return -EOPNOTSUPP;
5579
5580        err = igc_save_qbv_schedule(adapter, qopt);
5581        if (err)
5582                return err;
5583
5584        return igc_tsn_offload_apply(adapter);
5585}
5586
5587static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5588                        void *type_data)
5589{
5590        struct igc_adapter *adapter = netdev_priv(dev);
5591
5592        switch (type) {
5593        case TC_SETUP_QDISC_TAPRIO:
5594                return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5595
5596        case TC_SETUP_QDISC_ETF:
5597                return igc_tsn_enable_launchtime(adapter, type_data);
5598
5599        default:
5600                return -EOPNOTSUPP;
5601        }
5602}
5603
5604static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
5605{
5606        struct igc_adapter *adapter = netdev_priv(dev);
5607
5608        switch (bpf->command) {
5609        case XDP_SETUP_PROG:
5610                return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
5611        case XDP_SETUP_XSK_POOL:
5612                return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
5613                                          bpf->xsk.queue_id);
5614        default:
5615                return -EOPNOTSUPP;
5616        }
5617}
5618
5619static int igc_xdp_xmit(struct net_device *dev, int num_frames,
5620                        struct xdp_frame **frames, u32 flags)
5621{
5622        struct igc_adapter *adapter = netdev_priv(dev);
5623        int cpu = smp_processor_id();
5624        struct netdev_queue *nq;
5625        struct igc_ring *ring;
5626        int i, drops;
5627
5628        if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
5629                return -ENETDOWN;
5630
5631        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5632                return -EINVAL;
5633
5634        ring = igc_xdp_get_tx_ring(adapter, cpu);
5635        nq = txring_txq(ring);
5636
5637        __netif_tx_lock(nq, cpu);
5638
5639        drops = 0;
5640        for (i = 0; i < num_frames; i++) {
5641                int err;
5642                struct xdp_frame *xdpf = frames[i];
5643
5644                err = igc_xdp_init_tx_descriptor(ring, xdpf);
5645                if (err) {
5646                        xdp_return_frame_rx_napi(xdpf);
5647                        drops++;
5648                }
5649        }
5650
5651        if (flags & XDP_XMIT_FLUSH)
5652                igc_flush_tx_descriptors(ring);
5653
5654        __netif_tx_unlock(nq);
5655
5656        return num_frames - drops;
5657}
5658
5659static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
5660                                        struct igc_q_vector *q_vector)
5661{
5662        struct igc_hw *hw = &adapter->hw;
5663        u32 eics = 0;
5664
5665        eics |= q_vector->eims_value;
5666        wr32(IGC_EICS, eics);
5667}
5668
5669int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5670{
5671        struct igc_adapter *adapter = netdev_priv(dev);
5672        struct igc_q_vector *q_vector;
5673        struct igc_ring *ring;
5674
5675        if (test_bit(__IGC_DOWN, &adapter->state))
5676                return -ENETDOWN;
5677
5678        if (!igc_xdp_is_enabled(adapter))
5679                return -ENXIO;
5680
5681        if (queue_id >= adapter->num_rx_queues)
5682                return -EINVAL;
5683
5684        ring = adapter->rx_ring[queue_id];
5685
5686        if (!ring->xsk_pool)
5687                return -ENXIO;
5688
5689        q_vector = adapter->q_vector[queue_id];
5690        if (!napi_if_scheduled_mark_missed(&q_vector->napi))
5691                igc_trigger_rxtxq_interrupt(adapter, q_vector);
5692
5693        return 0;
5694}
5695
5696static const struct net_device_ops igc_netdev_ops = {
5697        .ndo_open               = igc_open,
5698        .ndo_stop               = igc_close,
5699        .ndo_start_xmit         = igc_xmit_frame,
5700        .ndo_set_rx_mode        = igc_set_rx_mode,
5701        .ndo_set_mac_address    = igc_set_mac,
5702        .ndo_change_mtu         = igc_change_mtu,
5703        .ndo_get_stats64        = igc_get_stats64,
5704        .ndo_fix_features       = igc_fix_features,
5705        .ndo_set_features       = igc_set_features,
5706        .ndo_features_check     = igc_features_check,
5707        .ndo_do_ioctl           = igc_ioctl,
5708        .ndo_setup_tc           = igc_setup_tc,
5709        .ndo_bpf                = igc_bpf,
5710        .ndo_xdp_xmit           = igc_xdp_xmit,
5711        .ndo_xsk_wakeup         = igc_xsk_wakeup,
5712};
5713
5714/* PCIe configuration access */
5715void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5716{
5717        struct igc_adapter *adapter = hw->back;
5718
5719        pci_read_config_word(adapter->pdev, reg, value);
5720}
5721
5722void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5723{
5724        struct igc_adapter *adapter = hw->back;
5725
5726        pci_write_config_word(adapter->pdev, reg, *value);
5727}
5728
5729s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5730{
5731        struct igc_adapter *adapter = hw->back;
5732
5733        if (!pci_is_pcie(adapter->pdev))
5734                return -IGC_ERR_CONFIG;
5735
5736        pcie_capability_read_word(adapter->pdev, reg, value);
5737
5738        return IGC_SUCCESS;
5739}
5740
5741s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5742{
5743        struct igc_adapter *adapter = hw->back;
5744
5745        if (!pci_is_pcie(adapter->pdev))
5746                return -IGC_ERR_CONFIG;
5747
5748        pcie_capability_write_word(adapter->pdev, reg, *value);
5749
5750        return IGC_SUCCESS;
5751}
5752
5753u32 igc_rd32(struct igc_hw *hw, u32 reg)
5754{
5755        struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
5756        u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
5757        u32 value = 0;
5758
5759        value = readl(&hw_addr[reg]);
5760
5761        /* reads should not return all F's */
5762        if (!(~value) && (!reg || !(~readl(hw_addr)))) {
5763                struct net_device *netdev = igc->netdev;
5764
5765                hw->hw_addr = NULL;
5766                netif_device_detach(netdev);
5767                netdev_err(netdev, "PCIe link lost, device now detached\n");
5768                WARN(pci_device_is_present(igc->pdev),
5769                     "igc: Failed to read reg 0x%x!\n", reg);
5770        }
5771
5772        return value;
5773}
5774
5775int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
5776{
5777        struct igc_mac_info *mac = &adapter->hw.mac;
5778
5779        mac->autoneg = false;
5780
5781        /* Make sure dplx is at most 1 bit and lsb of speed is not set
5782         * for the switch() below to work
5783         */
5784        if ((spd & 1) || (dplx & ~1))
5785                goto err_inval;
5786
5787        switch (spd + dplx) {
5788        case SPEED_10 + DUPLEX_HALF:
5789                mac->forced_speed_duplex = ADVERTISE_10_HALF;
5790                break;
5791        case SPEED_10 + DUPLEX_FULL:
5792                mac->forced_speed_duplex = ADVERTISE_10_FULL;
5793                break;
5794        case SPEED_100 + DUPLEX_HALF:
5795                mac->forced_speed_duplex = ADVERTISE_100_HALF;
5796                break;
5797        case SPEED_100 + DUPLEX_FULL:
5798                mac->forced_speed_duplex = ADVERTISE_100_FULL;
5799                break;
5800        case SPEED_1000 + DUPLEX_FULL:
5801                mac->autoneg = true;
5802                adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5803                break;
5804        case SPEED_1000 + DUPLEX_HALF: /* not supported */
5805                goto err_inval;
5806        case SPEED_2500 + DUPLEX_FULL:
5807                mac->autoneg = true;
5808                adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
5809                break;
5810        case SPEED_2500 + DUPLEX_HALF: /* not supported */
5811        default:
5812                goto err_inval;
5813        }
5814
5815        /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5816        adapter->hw.phy.mdix = AUTO_ALL_MODES;
5817
5818        return 0;
5819
5820err_inval:
5821        netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
5822        return -EINVAL;
5823}
5824
5825/**
5826 * igc_probe - Device Initialization Routine
5827 * @pdev: PCI device information struct
5828 * @ent: entry in igc_pci_tbl
5829 *
5830 * Returns 0 on success, negative on failure
5831 *
5832 * igc_probe initializes an adapter identified by a pci_dev structure.
5833 * The OS initialization, configuring the adapter private structure,
5834 * and a hardware reset occur.
5835 */
5836static int igc_probe(struct pci_dev *pdev,
5837                     const struct pci_device_id *ent)
5838{
5839        struct igc_adapter *adapter;
5840        struct net_device *netdev;
5841        struct igc_hw *hw;
5842        const struct igc_info *ei = igc_info_tbl[ent->driver_data];
5843        int err, pci_using_dac;
5844
5845        err = pci_enable_device_mem(pdev);
5846        if (err)
5847                return err;
5848
5849        pci_using_dac = 0;
5850        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5851        if (!err) {
5852                pci_using_dac = 1;
5853        } else {
5854                err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5855                if (err) {
5856                        dev_err(&pdev->dev,
5857                                "No usable DMA configuration, aborting\n");
5858                        goto err_dma;
5859                }
5860        }
5861
5862        err = pci_request_mem_regions(pdev, igc_driver_name);
5863        if (err)
5864                goto err_pci_reg;
5865
5866        pci_enable_pcie_error_reporting(pdev);
5867
5868        pci_set_master(pdev);
5869
5870        err = -ENOMEM;
5871        netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5872                                   IGC_MAX_TX_QUEUES);
5873
5874        if (!netdev)
5875                goto err_alloc_etherdev;
5876
5877        SET_NETDEV_DEV(netdev, &pdev->dev);
5878
5879        pci_set_drvdata(pdev, netdev);
5880        adapter = netdev_priv(netdev);
5881        adapter->netdev = netdev;
5882        adapter->pdev = pdev;
5883        hw = &adapter->hw;
5884        hw->back = adapter;
5885        adapter->port_num = hw->bus.func;
5886        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5887
5888        err = pci_save_state(pdev);
5889        if (err)
5890                goto err_ioremap;
5891
5892        err = -EIO;
5893        adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5894                                   pci_resource_len(pdev, 0));
5895        if (!adapter->io_addr)
5896                goto err_ioremap;
5897
5898        /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5899        hw->hw_addr = adapter->io_addr;
5900
5901        netdev->netdev_ops = &igc_netdev_ops;
5902        igc_ethtool_set_ops(netdev);
5903        netdev->watchdog_timeo = 5 * HZ;
5904
5905        netdev->mem_start = pci_resource_start(pdev, 0);
5906        netdev->mem_end = pci_resource_end(pdev, 0);
5907
5908        /* PCI config space info */
5909        hw->vendor_id = pdev->vendor;
5910        hw->device_id = pdev->device;
5911        hw->revision_id = pdev->revision;
5912        hw->subsystem_vendor_id = pdev->subsystem_vendor;
5913        hw->subsystem_device_id = pdev->subsystem_device;
5914
5915        /* Copy the default MAC and PHY function pointers */
5916        memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5917        memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5918
5919        /* Initialize skew-specific constants */
5920        err = ei->get_invariants(hw);
5921        if (err)
5922                goto err_sw_init;
5923
5924        /* Add supported features to the features list*/
5925        netdev->features |= NETIF_F_SG;
5926        netdev->features |= NETIF_F_TSO;
5927        netdev->features |= NETIF_F_TSO6;
5928        netdev->features |= NETIF_F_TSO_ECN;
5929        netdev->features |= NETIF_F_RXCSUM;
5930        netdev->features |= NETIF_F_HW_CSUM;
5931        netdev->features |= NETIF_F_SCTP_CRC;
5932        netdev->features |= NETIF_F_HW_TC;
5933
5934#define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5935                                  NETIF_F_GSO_GRE_CSUM | \
5936                                  NETIF_F_GSO_IPXIP4 | \
5937                                  NETIF_F_GSO_IPXIP6 | \
5938                                  NETIF_F_GSO_UDP_TUNNEL | \
5939                                  NETIF_F_GSO_UDP_TUNNEL_CSUM)
5940
5941        netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5942        netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5943
5944        /* setup the private structure */
5945        err = igc_sw_init(adapter);
5946        if (err)
5947                goto err_sw_init;
5948
5949        /* copy netdev features into list of user selectable features */
5950        netdev->hw_features |= NETIF_F_NTUPLE;
5951        netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
5952        netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
5953        netdev->hw_features |= netdev->features;
5954
5955        if (pci_using_dac)
5956                netdev->features |= NETIF_F_HIGHDMA;
5957
5958        netdev->vlan_features |= netdev->features;
5959
5960        /* MTU range: 68 - 9216 */
5961        netdev->min_mtu = ETH_MIN_MTU;
5962        netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5963
5964        /* before reading the NVM, reset the controller to put the device in a
5965         * known good starting state
5966         */
5967        hw->mac.ops.reset_hw(hw);
5968
5969        if (igc_get_flash_presence_i225(hw)) {
5970                if (hw->nvm.ops.validate(hw) < 0) {
5971                        dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5972                        err = -EIO;
5973                        goto err_eeprom;
5974                }
5975        }
5976
5977        if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5978                /* copy the MAC address out of the NVM */
5979                if (hw->mac.ops.read_mac_addr(hw))
5980                        dev_err(&pdev->dev, "NVM Read Error\n");
5981        }
5982
5983        memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5984
5985        if (!is_valid_ether_addr(netdev->dev_addr)) {
5986                dev_err(&pdev->dev, "Invalid MAC Address\n");
5987                err = -EIO;
5988                goto err_eeprom;
5989        }
5990
5991        /* configure RXPBSIZE and TXPBSIZE */
5992        wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5993        wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5994
5995        timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5996        timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5997
5998        INIT_WORK(&adapter->reset_task, igc_reset_task);
5999        INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6000
6001        /* Initialize link properties that are user-changeable */
6002        adapter->fc_autoneg = true;
6003        hw->mac.autoneg = true;
6004        hw->phy.autoneg_advertised = 0xaf;
6005
6006        hw->fc.requested_mode = igc_fc_default;
6007        hw->fc.current_mode = igc_fc_default;
6008
6009        /* By default, support wake on port A */
6010        adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6011
6012        /* initialize the wol settings based on the eeprom settings */
6013        if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6014                adapter->wol |= IGC_WUFC_MAG;
6015
6016        device_set_wakeup_enable(&adapter->pdev->dev,
6017                                 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6018
6019        igc_ptp_init(adapter);
6020
6021        /* reset the hardware with the new settings */
6022        igc_reset(adapter);
6023
6024        /* let the f/w know that the h/w is now under the control of the
6025         * driver.
6026         */
6027        igc_get_hw_control(adapter);
6028
6029        strncpy(netdev->name, "eth%d", IFNAMSIZ);
6030        err = register_netdev(netdev);
6031        if (err)
6032                goto err_register;
6033
6034         /* carrier off reporting is important to ethtool even BEFORE open */
6035        netif_carrier_off(netdev);
6036
6037        /* Check if Media Autosense is enabled */
6038        adapter->ei = *ei;
6039
6040        /* print pcie link status and MAC address */
6041        pcie_print_link_status(pdev);
6042        netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6043
6044        dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6045        /* Disable EEE for internal PHY devices */
6046        hw->dev_spec._base.eee_enable = false;
6047        adapter->flags &= ~IGC_FLAG_EEE;
6048        igc_set_eee_i225(hw, false, false, false);
6049
6050        pm_runtime_put_noidle(&pdev->dev);
6051
6052        return 0;
6053
6054err_register:
6055        igc_release_hw_control(adapter);
6056err_eeprom:
6057        if (!igc_check_reset_block(hw))
6058                igc_reset_phy(hw);
6059err_sw_init:
6060        igc_clear_interrupt_scheme(adapter);
6061        iounmap(adapter->io_addr);
6062err_ioremap:
6063        free_netdev(netdev);
6064err_alloc_etherdev:
6065        pci_disable_pcie_error_reporting(pdev);
6066        pci_release_mem_regions(pdev);
6067err_pci_reg:
6068err_dma:
6069        pci_disable_device(pdev);
6070        return err;
6071}
6072
6073/**
6074 * igc_remove - Device Removal Routine
6075 * @pdev: PCI device information struct
6076 *
6077 * igc_remove is called by the PCI subsystem to alert the driver
6078 * that it should release a PCI device.  This could be caused by a
6079 * Hot-Plug event, or because the driver is going to be removed from
6080 * memory.
6081 */
6082static void igc_remove(struct pci_dev *pdev)
6083{
6084        struct net_device *netdev = pci_get_drvdata(pdev);
6085        struct igc_adapter *adapter = netdev_priv(netdev);
6086
6087        pm_runtime_get_noresume(&pdev->dev);
6088
6089        igc_flush_nfc_rules(adapter);
6090
6091        igc_ptp_stop(adapter);
6092
6093        set_bit(__IGC_DOWN, &adapter->state);
6094
6095        del_timer_sync(&adapter->watchdog_timer);
6096        del_timer_sync(&adapter->phy_info_timer);
6097
6098        cancel_work_sync(&adapter->reset_task);
6099        cancel_work_sync(&adapter->watchdog_task);
6100
6101        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
6102         * would have already happened in close and is redundant.
6103         */
6104        igc_release_hw_control(adapter);
6105        unregister_netdev(netdev);
6106
6107        igc_clear_interrupt_scheme(adapter);
6108        pci_iounmap(pdev, adapter->io_addr);
6109        pci_release_mem_regions(pdev);
6110
6111        free_netdev(netdev);
6112
6113        pci_disable_pcie_error_reporting(pdev);
6114
6115        pci_disable_device(pdev);
6116}
6117
6118static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6119                          bool runtime)
6120{
6121        struct net_device *netdev = pci_get_drvdata(pdev);
6122        struct igc_adapter *adapter = netdev_priv(netdev);
6123        u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6124        struct igc_hw *hw = &adapter->hw;
6125        u32 ctrl, rctl, status;
6126        bool wake;
6127
6128        rtnl_lock();
6129        netif_device_detach(netdev);
6130
6131        if (netif_running(netdev))
6132                __igc_close(netdev, true);
6133
6134        igc_ptp_suspend(adapter);
6135
6136        igc_clear_interrupt_scheme(adapter);
6137        rtnl_unlock();
6138
6139        status = rd32(IGC_STATUS);
6140        if (status & IGC_STATUS_LU)
6141                wufc &= ~IGC_WUFC_LNKC;
6142
6143        if (wufc) {
6144                igc_setup_rctl(adapter);
6145                igc_set_rx_mode(netdev);
6146
6147                /* turn on all-multi mode if wake on multicast is enabled */
6148                if (wufc & IGC_WUFC_MC) {
6149                        rctl = rd32(IGC_RCTL);
6150                        rctl |= IGC_RCTL_MPE;
6151                        wr32(IGC_RCTL, rctl);
6152                }
6153
6154                ctrl = rd32(IGC_CTRL);
6155                ctrl |= IGC_CTRL_ADVD3WUC;
6156                wr32(IGC_CTRL, ctrl);
6157
6158                /* Allow time for pending master requests to run */
6159                igc_disable_pcie_master(hw);
6160
6161                wr32(IGC_WUC, IGC_WUC_PME_EN);
6162                wr32(IGC_WUFC, wufc);
6163        } else {
6164                wr32(IGC_WUC, 0);
6165                wr32(IGC_WUFC, 0);
6166        }
6167
6168        wake = wufc || adapter->en_mng_pt;
6169        if (!wake)
6170                igc_power_down_phy_copper_base(&adapter->hw);
6171        else
6172                igc_power_up_link(adapter);
6173
6174        if (enable_wake)
6175                *enable_wake = wake;
6176
6177        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
6178         * would have already happened in close and is redundant.
6179         */
6180        igc_release_hw_control(adapter);
6181
6182        pci_disable_device(pdev);
6183
6184        return 0;
6185}
6186
6187#ifdef CONFIG_PM
6188static int __maybe_unused igc_runtime_suspend(struct device *dev)
6189{
6190        return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6191}
6192
6193static void igc_deliver_wake_packet(struct net_device *netdev)
6194{
6195        struct igc_adapter *adapter = netdev_priv(netdev);
6196        struct igc_hw *hw = &adapter->hw;
6197        struct sk_buff *skb;
6198        u32 wupl;
6199
6200        wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6201
6202        /* WUPM stores only the first 128 bytes of the wake packet.
6203         * Read the packet only if we have the whole thing.
6204         */
6205        if (wupl == 0 || wupl > IGC_WUPM_BYTES)
6206                return;
6207
6208        skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
6209        if (!skb)
6210                return;
6211
6212        skb_put(skb, wupl);
6213
6214        /* Ensure reads are 32-bit aligned */
6215        wupl = roundup(wupl, 4);
6216
6217        memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
6218
6219        skb->protocol = eth_type_trans(skb, netdev);
6220        netif_rx(skb);
6221}
6222
6223static int __maybe_unused igc_resume(struct device *dev)
6224{
6225        struct pci_dev *pdev = to_pci_dev(dev);
6226        struct net_device *netdev = pci_get_drvdata(pdev);
6227        struct igc_adapter *adapter = netdev_priv(netdev);
6228        struct igc_hw *hw = &adapter->hw;
6229        u32 err, val;
6230
6231        pci_set_power_state(pdev, PCI_D0);
6232        pci_restore_state(pdev);
6233        pci_save_state(pdev);
6234
6235        if (!pci_device_is_present(pdev))
6236                return -ENODEV;
6237        err = pci_enable_device_mem(pdev);
6238        if (err) {
6239                netdev_err(netdev, "Cannot enable PCI device from suspend\n");
6240                return err;
6241        }
6242        pci_set_master(pdev);
6243
6244        pci_enable_wake(pdev, PCI_D3hot, 0);
6245        pci_enable_wake(pdev, PCI_D3cold, 0);
6246
6247        if (igc_init_interrupt_scheme(adapter, true)) {
6248                netdev_err(netdev, "Unable to allocate memory for queues\n");
6249                return -ENOMEM;
6250        }
6251
6252        igc_reset(adapter);
6253
6254        /* let the f/w know that the h/w is now under the control of the
6255         * driver.
6256         */
6257        igc_get_hw_control(adapter);
6258
6259        val = rd32(IGC_WUS);
6260        if (val & WAKE_PKT_WUS)
6261                igc_deliver_wake_packet(netdev);
6262
6263        wr32(IGC_WUS, ~0);
6264
6265        rtnl_lock();
6266        if (!err && netif_running(netdev))
6267                err = __igc_open(netdev, true);
6268
6269        if (!err)
6270                netif_device_attach(netdev);
6271        rtnl_unlock();
6272
6273        return err;
6274}
6275
6276static int __maybe_unused igc_runtime_resume(struct device *dev)
6277{
6278        return igc_resume(dev);
6279}
6280
6281static int __maybe_unused igc_suspend(struct device *dev)
6282{
6283        return __igc_shutdown(to_pci_dev(dev), NULL, 0);
6284}
6285
6286static int __maybe_unused igc_runtime_idle(struct device *dev)
6287{
6288        struct net_device *netdev = dev_get_drvdata(dev);
6289        struct igc_adapter *adapter = netdev_priv(netdev);
6290
6291        if (!igc_has_link(adapter))
6292                pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6293
6294        return -EBUSY;
6295}
6296#endif /* CONFIG_PM */
6297
6298static void igc_shutdown(struct pci_dev *pdev)
6299{
6300        bool wake;
6301
6302        __igc_shutdown(pdev, &wake, 0);
6303
6304        if (system_state == SYSTEM_POWER_OFF) {
6305                pci_wake_from_d3(pdev, wake);
6306                pci_set_power_state(pdev, PCI_D3hot);
6307        }
6308}
6309
6310/**
6311 *  igc_io_error_detected - called when PCI error is detected
6312 *  @pdev: Pointer to PCI device
6313 *  @state: The current PCI connection state
6314 *
6315 *  This function is called after a PCI bus error affecting
6316 *  this device has been detected.
6317 **/
6318static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
6319                                              pci_channel_state_t state)
6320{
6321        struct net_device *netdev = pci_get_drvdata(pdev);
6322        struct igc_adapter *adapter = netdev_priv(netdev);
6323
6324        netif_device_detach(netdev);
6325
6326        if (state == pci_channel_io_perm_failure)
6327                return PCI_ERS_RESULT_DISCONNECT;
6328
6329        if (netif_running(netdev))
6330                igc_down(adapter);
6331        pci_disable_device(pdev);
6332
6333        /* Request a slot reset. */
6334        return PCI_ERS_RESULT_NEED_RESET;
6335}
6336
6337/**
6338 *  igc_io_slot_reset - called after the PCI bus has been reset.
6339 *  @pdev: Pointer to PCI device
6340 *
6341 *  Restart the card from scratch, as if from a cold-boot. Implementation
6342 *  resembles the first-half of the igc_resume routine.
6343 **/
6344static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
6345{
6346        struct net_device *netdev = pci_get_drvdata(pdev);
6347        struct igc_adapter *adapter = netdev_priv(netdev);
6348        struct igc_hw *hw = &adapter->hw;
6349        pci_ers_result_t result;
6350
6351        if (pci_enable_device_mem(pdev)) {
6352                netdev_err(netdev, "Could not re-enable PCI device after reset\n");
6353                result = PCI_ERS_RESULT_DISCONNECT;
6354        } else {
6355                pci_set_master(pdev);
6356                pci_restore_state(pdev);
6357                pci_save_state(pdev);
6358
6359                pci_enable_wake(pdev, PCI_D3hot, 0);
6360                pci_enable_wake(pdev, PCI_D3cold, 0);
6361
6362                /* In case of PCI error, adapter loses its HW address
6363                 * so we should re-assign it here.
6364                 */
6365                hw->hw_addr = adapter->io_addr;
6366
6367                igc_reset(adapter);
6368                wr32(IGC_WUS, ~0);
6369                result = PCI_ERS_RESULT_RECOVERED;
6370        }
6371
6372        return result;
6373}
6374
6375/**
6376 *  igc_io_resume - called when traffic can start to flow again.
6377 *  @pdev: Pointer to PCI device
6378 *
6379 *  This callback is called when the error recovery driver tells us that
6380 *  its OK to resume normal operation. Implementation resembles the
6381 *  second-half of the igc_resume routine.
6382 */
6383static void igc_io_resume(struct pci_dev *pdev)
6384{
6385        struct net_device *netdev = pci_get_drvdata(pdev);
6386        struct igc_adapter *adapter = netdev_priv(netdev);
6387
6388        rtnl_lock();
6389        if (netif_running(netdev)) {
6390                if (igc_open(netdev)) {
6391                        netdev_err(netdev, "igc_open failed after reset\n");
6392                        return;
6393                }
6394        }
6395
6396        netif_device_attach(netdev);
6397
6398        /* let the f/w know that the h/w is now under the control of the
6399         * driver.
6400         */
6401        igc_get_hw_control(adapter);
6402        rtnl_unlock();
6403}
6404
6405static const struct pci_error_handlers igc_err_handler = {
6406        .error_detected = igc_io_error_detected,
6407        .slot_reset = igc_io_slot_reset,
6408        .resume = igc_io_resume,
6409};
6410
6411#ifdef CONFIG_PM
6412static const struct dev_pm_ops igc_pm_ops = {
6413        SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
6414        SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
6415                           igc_runtime_idle)
6416};
6417#endif
6418
6419static struct pci_driver igc_driver = {
6420        .name     = igc_driver_name,
6421        .id_table = igc_pci_tbl,
6422        .probe    = igc_probe,
6423        .remove   = igc_remove,
6424#ifdef CONFIG_PM
6425        .driver.pm = &igc_pm_ops,
6426#endif
6427        .shutdown = igc_shutdown,
6428        .err_handler = &igc_err_handler,
6429};
6430
6431/**
6432 * igc_reinit_queues - return error
6433 * @adapter: pointer to adapter structure
6434 */
6435int igc_reinit_queues(struct igc_adapter *adapter)
6436{
6437        struct net_device *netdev = adapter->netdev;
6438        int err = 0;
6439
6440        if (netif_running(netdev))
6441                igc_close(netdev);
6442
6443        igc_reset_interrupt_capability(adapter);
6444
6445        if (igc_init_interrupt_scheme(adapter, true)) {
6446                netdev_err(netdev, "Unable to allocate memory for queues\n");
6447                return -ENOMEM;
6448        }
6449
6450        if (netif_running(netdev))
6451                err = igc_open(netdev);
6452
6453        return err;
6454}
6455
6456/**
6457 * igc_get_hw_dev - return device
6458 * @hw: pointer to hardware structure
6459 *
6460 * used by hardware layer to print debugging information
6461 */
6462struct net_device *igc_get_hw_dev(struct igc_hw *hw)
6463{
6464        struct igc_adapter *adapter = hw->back;
6465
6466        return adapter->netdev;
6467}
6468
6469static void igc_disable_rx_ring_hw(struct igc_ring *ring)
6470{
6471        struct igc_hw *hw = &ring->q_vector->adapter->hw;
6472        u8 idx = ring->reg_idx;
6473        u32 rxdctl;
6474
6475        rxdctl = rd32(IGC_RXDCTL(idx));
6476        rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
6477        rxdctl |= IGC_RXDCTL_SWFLUSH;
6478        wr32(IGC_RXDCTL(idx), rxdctl);
6479}
6480
6481void igc_disable_rx_ring(struct igc_ring *ring)
6482{
6483        igc_disable_rx_ring_hw(ring);
6484        igc_clean_rx_ring(ring);
6485}
6486
6487void igc_enable_rx_ring(struct igc_ring *ring)
6488{
6489        struct igc_adapter *adapter = ring->q_vector->adapter;
6490
6491        igc_configure_rx_ring(adapter, ring);
6492
6493        if (ring->xsk_pool)
6494                igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
6495        else
6496                igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
6497}
6498
6499static void igc_disable_tx_ring_hw(struct igc_ring *ring)
6500{
6501        struct igc_hw *hw = &ring->q_vector->adapter->hw;
6502        u8 idx = ring->reg_idx;
6503        u32 txdctl;
6504
6505        txdctl = rd32(IGC_TXDCTL(idx));
6506        txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
6507        txdctl |= IGC_TXDCTL_SWFLUSH;
6508        wr32(IGC_TXDCTL(idx), txdctl);
6509}
6510
6511void igc_disable_tx_ring(struct igc_ring *ring)
6512{
6513        igc_disable_tx_ring_hw(ring);
6514        igc_clean_tx_ring(ring);
6515}
6516
6517void igc_enable_tx_ring(struct igc_ring *ring)
6518{
6519        struct igc_adapter *adapter = ring->q_vector->adapter;
6520
6521        igc_configure_tx_ring(adapter, ring);
6522}
6523
6524/**
6525 * igc_init_module - Driver Registration Routine
6526 *
6527 * igc_init_module is the first routine called when the driver is
6528 * loaded. All it does is register with the PCI subsystem.
6529 */
6530static int __init igc_init_module(void)
6531{
6532        int ret;
6533
6534        pr_info("%s\n", igc_driver_string);
6535        pr_info("%s\n", igc_copyright);
6536
6537        ret = pci_register_driver(&igc_driver);
6538        return ret;
6539}
6540
6541module_init(igc_init_module);
6542
6543/**
6544 * igc_exit_module - Driver Exit Cleanup Routine
6545 *
6546 * igc_exit_module is called just before the driver is removed
6547 * from memory.
6548 */
6549static void __exit igc_exit_module(void)
6550{
6551        pci_unregister_driver(&igc_driver);
6552}
6553
6554module_exit(igc_exit_module);
6555/* igc_main.c */
6556