linux/drivers/net/ethernet/intel/i40e/i40e_main.c
<<
>>
Prefs
   1/*******************************************************************************
   2 *
   3 * Intel Ethernet Controller XL710 Family Linux Driver
   4 * Copyright(c) 2013 - 2014 Intel Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 *
  18 * The full GNU General Public License is included in this distribution in
  19 * the file called "COPYING".
  20 *
  21 * Contact Information:
  22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24 *
  25 ******************************************************************************/
  26
  27/* Local includes */
  28#include "i40e.h"
  29#include "i40e_diag.h"
  30#ifdef CONFIG_I40E_VXLAN
  31#include <net/vxlan.h>
  32#endif
  33
  34const char i40e_driver_name[] = "i40e";
  35static const char i40e_driver_string[] =
  36                        "Intel(R) Ethernet Connection XL710 Network Driver";
  37
  38#define DRV_KERN "-k"
  39
  40#define DRV_VERSION_MAJOR 1
  41#define DRV_VERSION_MINOR 2
  42#define DRV_VERSION_BUILD 2
  43#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  44             __stringify(DRV_VERSION_MINOR) "." \
  45             __stringify(DRV_VERSION_BUILD)    DRV_KERN
  46const char i40e_driver_version_str[] = DRV_VERSION;
  47static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
  48
  49/* a bit of forward declarations */
  50static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
  51static void i40e_handle_reset_warning(struct i40e_pf *pf);
  52static int i40e_add_vsi(struct i40e_vsi *vsi);
  53static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
  54static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
  55static int i40e_setup_misc_vector(struct i40e_pf *pf);
  56static void i40e_determine_queue_usage(struct i40e_pf *pf);
  57static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
  58static void i40e_fdir_sb_setup(struct i40e_pf *pf);
  59static int i40e_veb_get_bw_info(struct i40e_veb *veb);
  60
  61/* i40e_pci_tbl - PCI Device ID Table
  62 *
  63 * Last entry must be all 0s
  64 *
  65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
  66 *   Class, Class Mask, private data (not used) }
  67 */
  68static const struct pci_device_id i40e_pci_tbl[] = {
  69        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
  70        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
  71        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
  72        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
  73        {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
  74        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
  75        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
  76        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
  77        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
  78        /* required last entry */
  79        {0, }
  80};
  81MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
  82
  83#define I40E_MAX_VF_COUNT 128
  84static int debug = -1;
  85module_param(debug, int, 0);
  86MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  87
  88MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  89MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
  90MODULE_LICENSE("GPL");
  91MODULE_VERSION(DRV_VERSION);
  92
  93/**
  94 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  95 * @hw:   pointer to the HW structure
  96 * @mem:  ptr to mem struct to fill out
  97 * @size: size of memory requested
  98 * @alignment: what to align the allocation to
  99 **/
 100int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
 101                            u64 size, u32 alignment)
 102{
 103        struct i40e_pf *pf = (struct i40e_pf *)hw->back;
 104
 105        mem->size = ALIGN(size, alignment);
 106        mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
 107                                      &mem->pa, GFP_KERNEL);
 108        if (!mem->va)
 109                return -ENOMEM;
 110
 111        return 0;
 112}
 113
 114/**
 115 * i40e_free_dma_mem_d - OS specific memory free for shared code
 116 * @hw:   pointer to the HW structure
 117 * @mem:  ptr to mem struct to free
 118 **/
 119int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
 120{
 121        struct i40e_pf *pf = (struct i40e_pf *)hw->back;
 122
 123        dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
 124        mem->va = NULL;
 125        mem->pa = 0;
 126        mem->size = 0;
 127
 128        return 0;
 129}
 130
 131/**
 132 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
 133 * @hw:   pointer to the HW structure
 134 * @mem:  ptr to mem struct to fill out
 135 * @size: size of memory requested
 136 **/
 137int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
 138                             u32 size)
 139{
 140        mem->size = size;
 141        mem->va = kzalloc(size, GFP_KERNEL);
 142
 143        if (!mem->va)
 144                return -ENOMEM;
 145
 146        return 0;
 147}
 148
 149/**
 150 * i40e_free_virt_mem_d - OS specific memory free for shared code
 151 * @hw:   pointer to the HW structure
 152 * @mem:  ptr to mem struct to free
 153 **/
 154int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
 155{
 156        /* it's ok to kfree a NULL pointer */
 157        kfree(mem->va);
 158        mem->va = NULL;
 159        mem->size = 0;
 160
 161        return 0;
 162}
 163
 164/**
 165 * i40e_get_lump - find a lump of free generic resource
 166 * @pf: board private structure
 167 * @pile: the pile of resource to search
 168 * @needed: the number of items needed
 169 * @id: an owner id to stick on the items assigned
 170 *
 171 * Returns the base item index of the lump, or negative for error
 172 *
 173 * The search_hint trick and lack of advanced fit-finding only work
 174 * because we're highly likely to have all the same size lump requests.
 175 * Linear search time and any fragmentation should be minimal.
 176 **/
 177static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
 178                         u16 needed, u16 id)
 179{
 180        int ret = -ENOMEM;
 181        int i, j;
 182
 183        if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
 184                dev_info(&pf->pdev->dev,
 185                         "param err: pile=%p needed=%d id=0x%04x\n",
 186                         pile, needed, id);
 187                return -EINVAL;
 188        }
 189
 190        /* start the linear search with an imperfect hint */
 191        i = pile->search_hint;
 192        while (i < pile->num_entries) {
 193                /* skip already allocated entries */
 194                if (pile->list[i] & I40E_PILE_VALID_BIT) {
 195                        i++;
 196                        continue;
 197                }
 198
 199                /* do we have enough in this lump? */
 200                for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
 201                        if (pile->list[i+j] & I40E_PILE_VALID_BIT)
 202                                break;
 203                }
 204
 205                if (j == needed) {
 206                        /* there was enough, so assign it to the requestor */
 207                        for (j = 0; j < needed; j++)
 208                                pile->list[i+j] = id | I40E_PILE_VALID_BIT;
 209                        ret = i;
 210                        pile->search_hint = i + j;
 211                        break;
 212                } else {
 213                        /* not enough, so skip over it and continue looking */
 214                        i += j;
 215                }
 216        }
 217
 218        return ret;
 219}
 220
 221/**
 222 * i40e_put_lump - return a lump of generic resource
 223 * @pile: the pile of resource to search
 224 * @index: the base item index
 225 * @id: the owner id of the items assigned
 226 *
 227 * Returns the count of items in the lump
 228 **/
 229static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
 230{
 231        int valid_id = (id | I40E_PILE_VALID_BIT);
 232        int count = 0;
 233        int i;
 234
 235        if (!pile || index >= pile->num_entries)
 236                return -EINVAL;
 237
 238        for (i = index;
 239             i < pile->num_entries && pile->list[i] == valid_id;
 240             i++) {
 241                pile->list[i] = 0;
 242                count++;
 243        }
 244
 245        if (count && index < pile->search_hint)
 246                pile->search_hint = index;
 247
 248        return count;
 249}
 250
 251/**
 252 * i40e_service_event_schedule - Schedule the service task to wake up
 253 * @pf: board private structure
 254 *
 255 * If not already scheduled, this puts the task into the work queue
 256 **/
 257static void i40e_service_event_schedule(struct i40e_pf *pf)
 258{
 259        if (!test_bit(__I40E_DOWN, &pf->state) &&
 260            !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
 261            !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
 262                schedule_work(&pf->service_task);
 263}
 264
 265/**
 266 * i40e_tx_timeout - Respond to a Tx Hang
 267 * @netdev: network interface device structure
 268 *
 269 * If any port has noticed a Tx timeout, it is likely that the whole
 270 * device is munged, not just the one netdev port, so go for the full
 271 * reset.
 272 **/
 273#ifdef I40E_FCOE
 274void i40e_tx_timeout(struct net_device *netdev)
 275#else
 276static void i40e_tx_timeout(struct net_device *netdev)
 277#endif
 278{
 279        struct i40e_netdev_priv *np = netdev_priv(netdev);
 280        struct i40e_vsi *vsi = np->vsi;
 281        struct i40e_pf *pf = vsi->back;
 282
 283        pf->tx_timeout_count++;
 284
 285        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
 286                pf->tx_timeout_recovery_level = 1;
 287        pf->tx_timeout_last_recovery = jiffies;
 288        netdev_info(netdev, "tx_timeout recovery level %d\n",
 289                    pf->tx_timeout_recovery_level);
 290
 291        switch (pf->tx_timeout_recovery_level) {
 292        case 0:
 293                /* disable and re-enable queues for the VSI */
 294                if (in_interrupt()) {
 295                        set_bit(__I40E_REINIT_REQUESTED, &pf->state);
 296                        set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
 297                } else {
 298                        i40e_vsi_reinit_locked(vsi);
 299                }
 300                break;
 301        case 1:
 302                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
 303                break;
 304        case 2:
 305                set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
 306                break;
 307        case 3:
 308                set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
 309                break;
 310        default:
 311                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
 312                set_bit(__I40E_DOWN_REQUESTED, &pf->state);
 313                set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
 314                break;
 315        }
 316        i40e_service_event_schedule(pf);
 317        pf->tx_timeout_recovery_level++;
 318}
 319
 320/**
 321 * i40e_release_rx_desc - Store the new tail and head values
 322 * @rx_ring: ring to bump
 323 * @val: new head index
 324 **/
 325static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
 326{
 327        rx_ring->next_to_use = val;
 328
 329        /* Force memory writes to complete before letting h/w
 330         * know there are new descriptors to fetch.  (Only
 331         * applicable for weak-ordered memory model archs,
 332         * such as IA-64).
 333         */
 334        wmb();
 335        writel(val, rx_ring->tail);
 336}
 337
 338/**
 339 * i40e_get_vsi_stats_struct - Get System Network Statistics
 340 * @vsi: the VSI we care about
 341 *
 342 * Returns the address of the device statistics structure.
 343 * The statistics are actually updated from the service task.
 344 **/
 345struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
 346{
 347        return &vsi->net_stats;
 348}
 349
 350/**
 351 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
 352 * @netdev: network interface device structure
 353 *
 354 * Returns the address of the device statistics structure.
 355 * The statistics are actually updated from the service task.
 356 **/
 357#ifdef I40E_FCOE
 358struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 359                                             struct net_device *netdev,
 360                                             struct rtnl_link_stats64 *stats)
 361#else
 362static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
 363                                             struct net_device *netdev,
 364                                             struct rtnl_link_stats64 *stats)
 365#endif
 366{
 367        struct i40e_netdev_priv *np = netdev_priv(netdev);
 368        struct i40e_ring *tx_ring, *rx_ring;
 369        struct i40e_vsi *vsi = np->vsi;
 370        struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
 371        int i;
 372
 373        if (test_bit(__I40E_DOWN, &vsi->state))
 374                return stats;
 375
 376        if (!vsi->tx_rings)
 377                return stats;
 378
 379        rcu_read_lock();
 380        for (i = 0; i < vsi->num_queue_pairs; i++) {
 381                u64 bytes, packets;
 382                unsigned int start;
 383
 384                tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
 385                if (!tx_ring)
 386                        continue;
 387
 388                do {
 389                        start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
 390                        packets = tx_ring->stats.packets;
 391                        bytes   = tx_ring->stats.bytes;
 392                } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
 393
 394                stats->tx_packets += packets;
 395                stats->tx_bytes   += bytes;
 396                rx_ring = &tx_ring[1];
 397
 398                do {
 399                        start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
 400                        packets = rx_ring->stats.packets;
 401                        bytes   = rx_ring->stats.bytes;
 402                } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
 403
 404                stats->rx_packets += packets;
 405                stats->rx_bytes   += bytes;
 406        }
 407        rcu_read_unlock();
 408
 409        /* following stats updated by i40e_watchdog_subtask() */
 410        stats->multicast        = vsi_stats->multicast;
 411        stats->tx_errors        = vsi_stats->tx_errors;
 412        stats->tx_dropped       = vsi_stats->tx_dropped;
 413        stats->rx_errors        = vsi_stats->rx_errors;
 414        stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
 415        stats->rx_length_errors = vsi_stats->rx_length_errors;
 416
 417        return stats;
 418}
 419
 420/**
 421 * i40e_vsi_reset_stats - Resets all stats of the given vsi
 422 * @vsi: the VSI to have its stats reset
 423 **/
 424void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
 425{
 426        struct rtnl_link_stats64 *ns;
 427        int i;
 428
 429        if (!vsi)
 430                return;
 431
 432        ns = i40e_get_vsi_stats_struct(vsi);
 433        memset(ns, 0, sizeof(*ns));
 434        memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
 435        memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
 436        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
 437        if (vsi->rx_rings && vsi->rx_rings[0]) {
 438                for (i = 0; i < vsi->num_queue_pairs; i++) {
 439                        memset(&vsi->rx_rings[i]->stats, 0 ,
 440                               sizeof(vsi->rx_rings[i]->stats));
 441                        memset(&vsi->rx_rings[i]->rx_stats, 0 ,
 442                               sizeof(vsi->rx_rings[i]->rx_stats));
 443                        memset(&vsi->tx_rings[i]->stats, 0 ,
 444                               sizeof(vsi->tx_rings[i]->stats));
 445                        memset(&vsi->tx_rings[i]->tx_stats, 0,
 446                               sizeof(vsi->tx_rings[i]->tx_stats));
 447                }
 448        }
 449        vsi->stat_offsets_loaded = false;
 450}
 451
 452/**
 453 * i40e_pf_reset_stats - Reset all of the stats for the given pf
 454 * @pf: the PF to be reset
 455 **/
 456void i40e_pf_reset_stats(struct i40e_pf *pf)
 457{
 458        int i;
 459
 460        memset(&pf->stats, 0, sizeof(pf->stats));
 461        memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
 462        pf->stat_offsets_loaded = false;
 463
 464        for (i = 0; i < I40E_MAX_VEB; i++) {
 465                if (pf->veb[i]) {
 466                        memset(&pf->veb[i]->stats, 0,
 467                               sizeof(pf->veb[i]->stats));
 468                        memset(&pf->veb[i]->stats_offsets, 0,
 469                               sizeof(pf->veb[i]->stats_offsets));
 470                        pf->veb[i]->stat_offsets_loaded = false;
 471                }
 472        }
 473}
 474
 475/**
 476 * i40e_stat_update48 - read and update a 48 bit stat from the chip
 477 * @hw: ptr to the hardware info
 478 * @hireg: the high 32 bit reg to read
 479 * @loreg: the low 32 bit reg to read
 480 * @offset_loaded: has the initial offset been loaded yet
 481 * @offset: ptr to current offset value
 482 * @stat: ptr to the stat
 483 *
 484 * Since the device stats are not reset at PFReset, they likely will not
 485 * be zeroed when the driver starts.  We'll save the first values read
 486 * and use them as offsets to be subtracted from the raw values in order
 487 * to report stats that count from zero.  In the process, we also manage
 488 * the potential roll-over.
 489 **/
 490static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
 491                               bool offset_loaded, u64 *offset, u64 *stat)
 492{
 493        u64 new_data;
 494
 495        if (hw->device_id == I40E_DEV_ID_QEMU) {
 496                new_data = rd32(hw, loreg);
 497                new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
 498        } else {
 499                new_data = rd64(hw, loreg);
 500        }
 501        if (!offset_loaded)
 502                *offset = new_data;
 503        if (likely(new_data >= *offset))
 504                *stat = new_data - *offset;
 505        else
 506                *stat = (new_data + ((u64)1 << 48)) - *offset;
 507        *stat &= 0xFFFFFFFFFFFFULL;
 508}
 509
 510/**
 511 * i40e_stat_update32 - read and update a 32 bit stat from the chip
 512 * @hw: ptr to the hardware info
 513 * @reg: the hw reg to read
 514 * @offset_loaded: has the initial offset been loaded yet
 515 * @offset: ptr to current offset value
 516 * @stat: ptr to the stat
 517 **/
 518static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
 519                               bool offset_loaded, u64 *offset, u64 *stat)
 520{
 521        u32 new_data;
 522
 523        new_data = rd32(hw, reg);
 524        if (!offset_loaded)
 525                *offset = new_data;
 526        if (likely(new_data >= *offset))
 527                *stat = (u32)(new_data - *offset);
 528        else
 529                *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
 530}
 531
 532/**
 533 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
 534 * @vsi: the VSI to be updated
 535 **/
 536void i40e_update_eth_stats(struct i40e_vsi *vsi)
 537{
 538        int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
 539        struct i40e_pf *pf = vsi->back;
 540        struct i40e_hw *hw = &pf->hw;
 541        struct i40e_eth_stats *oes;
 542        struct i40e_eth_stats *es;     /* device's eth stats */
 543
 544        es = &vsi->eth_stats;
 545        oes = &vsi->eth_stats_offsets;
 546
 547        /* Gather up the stats that the hw collects */
 548        i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
 549                           vsi->stat_offsets_loaded,
 550                           &oes->tx_errors, &es->tx_errors);
 551        i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
 552                           vsi->stat_offsets_loaded,
 553                           &oes->rx_discards, &es->rx_discards);
 554        i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
 555                           vsi->stat_offsets_loaded,
 556                           &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
 557        i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
 558                           vsi->stat_offsets_loaded,
 559                           &oes->tx_errors, &es->tx_errors);
 560
 561        i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
 562                           I40E_GLV_GORCL(stat_idx),
 563                           vsi->stat_offsets_loaded,
 564                           &oes->rx_bytes, &es->rx_bytes);
 565        i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
 566                           I40E_GLV_UPRCL(stat_idx),
 567                           vsi->stat_offsets_loaded,
 568                           &oes->rx_unicast, &es->rx_unicast);
 569        i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
 570                           I40E_GLV_MPRCL(stat_idx),
 571                           vsi->stat_offsets_loaded,
 572                           &oes->rx_multicast, &es->rx_multicast);
 573        i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
 574                           I40E_GLV_BPRCL(stat_idx),
 575                           vsi->stat_offsets_loaded,
 576                           &oes->rx_broadcast, &es->rx_broadcast);
 577
 578        i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
 579                           I40E_GLV_GOTCL(stat_idx),
 580                           vsi->stat_offsets_loaded,
 581                           &oes->tx_bytes, &es->tx_bytes);
 582        i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
 583                           I40E_GLV_UPTCL(stat_idx),
 584                           vsi->stat_offsets_loaded,
 585                           &oes->tx_unicast, &es->tx_unicast);
 586        i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
 587                           I40E_GLV_MPTCL(stat_idx),
 588                           vsi->stat_offsets_loaded,
 589                           &oes->tx_multicast, &es->tx_multicast);
 590        i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
 591                           I40E_GLV_BPTCL(stat_idx),
 592                           vsi->stat_offsets_loaded,
 593                           &oes->tx_broadcast, &es->tx_broadcast);
 594        vsi->stat_offsets_loaded = true;
 595}
 596
 597/**
 598 * i40e_update_veb_stats - Update Switch component statistics
 599 * @veb: the VEB being updated
 600 **/
 601static void i40e_update_veb_stats(struct i40e_veb *veb)
 602{
 603        struct i40e_pf *pf = veb->pf;
 604        struct i40e_hw *hw = &pf->hw;
 605        struct i40e_eth_stats *oes;
 606        struct i40e_eth_stats *es;     /* device's eth stats */
 607        int idx = 0;
 608
 609        idx = veb->stats_idx;
 610        es = &veb->stats;
 611        oes = &veb->stats_offsets;
 612
 613        /* Gather up the stats that the hw collects */
 614        i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
 615                           veb->stat_offsets_loaded,
 616                           &oes->tx_discards, &es->tx_discards);
 617        if (hw->revision_id > 0)
 618                i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
 619                                   veb->stat_offsets_loaded,
 620                                   &oes->rx_unknown_protocol,
 621                                   &es->rx_unknown_protocol);
 622        i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
 623                           veb->stat_offsets_loaded,
 624                           &oes->rx_bytes, &es->rx_bytes);
 625        i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
 626                           veb->stat_offsets_loaded,
 627                           &oes->rx_unicast, &es->rx_unicast);
 628        i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
 629                           veb->stat_offsets_loaded,
 630                           &oes->rx_multicast, &es->rx_multicast);
 631        i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
 632                           veb->stat_offsets_loaded,
 633                           &oes->rx_broadcast, &es->rx_broadcast);
 634
 635        i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
 636                           veb->stat_offsets_loaded,
 637                           &oes->tx_bytes, &es->tx_bytes);
 638        i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
 639                           veb->stat_offsets_loaded,
 640                           &oes->tx_unicast, &es->tx_unicast);
 641        i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
 642                           veb->stat_offsets_loaded,
 643                           &oes->tx_multicast, &es->tx_multicast);
 644        i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
 645                           veb->stat_offsets_loaded,
 646                           &oes->tx_broadcast, &es->tx_broadcast);
 647        veb->stat_offsets_loaded = true;
 648}
 649
 650#ifdef I40E_FCOE
 651/**
 652 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
 653 * @vsi: the VSI that is capable of doing FCoE
 654 **/
 655static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
 656{
 657        struct i40e_pf *pf = vsi->back;
 658        struct i40e_hw *hw = &pf->hw;
 659        struct i40e_fcoe_stats *ofs;
 660        struct i40e_fcoe_stats *fs;     /* device's eth stats */
 661        int idx;
 662
 663        if (vsi->type != I40E_VSI_FCOE)
 664                return;
 665
 666        idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
 667        fs = &vsi->fcoe_stats;
 668        ofs = &vsi->fcoe_stats_offsets;
 669
 670        i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
 671                           vsi->fcoe_stat_offsets_loaded,
 672                           &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
 673        i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
 674                           vsi->fcoe_stat_offsets_loaded,
 675                           &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
 676        i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
 677                           vsi->fcoe_stat_offsets_loaded,
 678                           &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
 679        i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
 680                           vsi->fcoe_stat_offsets_loaded,
 681                           &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
 682        i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
 683                           vsi->fcoe_stat_offsets_loaded,
 684                           &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
 685        i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
 686                           vsi->fcoe_stat_offsets_loaded,
 687                           &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
 688        i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
 689                           vsi->fcoe_stat_offsets_loaded,
 690                           &ofs->fcoe_last_error, &fs->fcoe_last_error);
 691        i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
 692                           vsi->fcoe_stat_offsets_loaded,
 693                           &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
 694
 695        vsi->fcoe_stat_offsets_loaded = true;
 696}
 697
 698#endif
 699/**
 700 * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
 701 * @pf: the corresponding PF
 702 *
 703 * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
 704 **/
 705static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
 706{
 707        struct i40e_hw_port_stats *osd = &pf->stats_offsets;
 708        struct i40e_hw_port_stats *nsd = &pf->stats;
 709        struct i40e_hw *hw = &pf->hw;
 710        u64 xoff = 0;
 711        u16 i, v;
 712
 713        if ((hw->fc.current_mode != I40E_FC_FULL) &&
 714            (hw->fc.current_mode != I40E_FC_RX_PAUSE))
 715                return;
 716
 717        xoff = nsd->link_xoff_rx;
 718        i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
 719                           pf->stat_offsets_loaded,
 720                           &osd->link_xoff_rx, &nsd->link_xoff_rx);
 721
 722        /* No new LFC xoff rx */
 723        if (!(nsd->link_xoff_rx - xoff))
 724                return;
 725
 726        /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
 727        for (v = 0; v < pf->num_alloc_vsi; v++) {
 728                struct i40e_vsi *vsi = pf->vsi[v];
 729
 730                if (!vsi || !vsi->tx_rings[0])
 731                        continue;
 732
 733                for (i = 0; i < vsi->num_queue_pairs; i++) {
 734                        struct i40e_ring *ring = vsi->tx_rings[i];
 735                        clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
 736                }
 737        }
 738}
 739
 740/**
 741 * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
 742 * @pf: the corresponding PF
 743 *
 744 * Update the Rx XOFF counter (PAUSE frames) in PFC mode
 745 **/
 746static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
 747{
 748        struct i40e_hw_port_stats *osd = &pf->stats_offsets;
 749        struct i40e_hw_port_stats *nsd = &pf->stats;
 750        bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
 751        struct i40e_dcbx_config *dcb_cfg;
 752        struct i40e_hw *hw = &pf->hw;
 753        u16 i, v;
 754        u8 tc;
 755
 756        dcb_cfg = &hw->local_dcbx_config;
 757
 758        /* See if DCB enabled with PFC TC */
 759        if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
 760            !(dcb_cfg->pfc.pfcenable)) {
 761                i40e_update_link_xoff_rx(pf);
 762                return;
 763        }
 764
 765        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
 766                u64 prio_xoff = nsd->priority_xoff_rx[i];
 767                i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
 768                                   pf->stat_offsets_loaded,
 769                                   &osd->priority_xoff_rx[i],
 770                                   &nsd->priority_xoff_rx[i]);
 771
 772                /* No new PFC xoff rx */
 773                if (!(nsd->priority_xoff_rx[i] - prio_xoff))
 774                        continue;
 775                /* Get the TC for given priority */
 776                tc = dcb_cfg->etscfg.prioritytable[i];
 777                xoff[tc] = true;
 778        }
 779
 780        /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
 781        for (v = 0; v < pf->num_alloc_vsi; v++) {
 782                struct i40e_vsi *vsi = pf->vsi[v];
 783
 784                if (!vsi || !vsi->tx_rings[0])
 785                        continue;
 786
 787                for (i = 0; i < vsi->num_queue_pairs; i++) {
 788                        struct i40e_ring *ring = vsi->tx_rings[i];
 789
 790                        tc = ring->dcb_tc;
 791                        if (xoff[tc])
 792                                clear_bit(__I40E_HANG_CHECK_ARMED,
 793                                          &ring->state);
 794                }
 795        }
 796}
 797
 798/**
 799 * i40e_update_vsi_stats - Update the vsi statistics counters.
 800 * @vsi: the VSI to be updated
 801 *
 802 * There are a few instances where we store the same stat in a
 803 * couple of different structs.  This is partly because we have
 804 * the netdev stats that need to be filled out, which is slightly
 805 * different from the "eth_stats" defined by the chip and used in
 806 * VF communications.  We sort it out here.
 807 **/
 808static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
 809{
 810        struct i40e_pf *pf = vsi->back;
 811        struct rtnl_link_stats64 *ons;
 812        struct rtnl_link_stats64 *ns;   /* netdev stats */
 813        struct i40e_eth_stats *oes;
 814        struct i40e_eth_stats *es;     /* device's eth stats */
 815        u32 tx_restart, tx_busy;
 816        struct i40e_ring *p;
 817        u32 rx_page, rx_buf;
 818        u64 bytes, packets;
 819        unsigned int start;
 820        u64 rx_p, rx_b;
 821        u64 tx_p, tx_b;
 822        u16 q;
 823
 824        if (test_bit(__I40E_DOWN, &vsi->state) ||
 825            test_bit(__I40E_CONFIG_BUSY, &pf->state))
 826                return;
 827
 828        ns = i40e_get_vsi_stats_struct(vsi);
 829        ons = &vsi->net_stats_offsets;
 830        es = &vsi->eth_stats;
 831        oes = &vsi->eth_stats_offsets;
 832
 833        /* Gather up the netdev and vsi stats that the driver collects
 834         * on the fly during packet processing
 835         */
 836        rx_b = rx_p = 0;
 837        tx_b = tx_p = 0;
 838        tx_restart = tx_busy = 0;
 839        rx_page = 0;
 840        rx_buf = 0;
 841        rcu_read_lock();
 842        for (q = 0; q < vsi->num_queue_pairs; q++) {
 843                /* locate Tx ring */
 844                p = ACCESS_ONCE(vsi->tx_rings[q]);
 845
 846                do {
 847                        start = u64_stats_fetch_begin_irq(&p->syncp);
 848                        packets = p->stats.packets;
 849                        bytes = p->stats.bytes;
 850                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 851                tx_b += bytes;
 852                tx_p += packets;
 853                tx_restart += p->tx_stats.restart_queue;
 854                tx_busy += p->tx_stats.tx_busy;
 855
 856                /* Rx queue is part of the same block as Tx queue */
 857                p = &p[1];
 858                do {
 859                        start = u64_stats_fetch_begin_irq(&p->syncp);
 860                        packets = p->stats.packets;
 861                        bytes = p->stats.bytes;
 862                } while (u64_stats_fetch_retry_irq(&p->syncp, start));
 863                rx_b += bytes;
 864                rx_p += packets;
 865                rx_buf += p->rx_stats.alloc_buff_failed;
 866                rx_page += p->rx_stats.alloc_page_failed;
 867        }
 868        rcu_read_unlock();
 869        vsi->tx_restart = tx_restart;
 870        vsi->tx_busy = tx_busy;
 871        vsi->rx_page_failed = rx_page;
 872        vsi->rx_buf_failed = rx_buf;
 873
 874        ns->rx_packets = rx_p;
 875        ns->rx_bytes = rx_b;
 876        ns->tx_packets = tx_p;
 877        ns->tx_bytes = tx_b;
 878
 879        /* update netdev stats from eth stats */
 880        i40e_update_eth_stats(vsi);
 881        ons->tx_errors = oes->tx_errors;
 882        ns->tx_errors = es->tx_errors;
 883        ons->multicast = oes->rx_multicast;
 884        ns->multicast = es->rx_multicast;
 885        ons->rx_dropped = oes->rx_discards;
 886        ns->rx_dropped = es->rx_discards;
 887        ons->tx_dropped = oes->tx_discards;
 888        ns->tx_dropped = es->tx_discards;
 889
 890        /* pull in a couple PF stats if this is the main vsi */
 891        if (vsi == pf->vsi[pf->lan_vsi]) {
 892                ns->rx_crc_errors = pf->stats.crc_errors;
 893                ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
 894                ns->rx_length_errors = pf->stats.rx_length_errors;
 895        }
 896}
 897
 898/**
 899 * i40e_update_pf_stats - Update the pf statistics counters.
 900 * @pf: the PF to be updated
 901 **/
 902static void i40e_update_pf_stats(struct i40e_pf *pf)
 903{
 904        struct i40e_hw_port_stats *osd = &pf->stats_offsets;
 905        struct i40e_hw_port_stats *nsd = &pf->stats;
 906        struct i40e_hw *hw = &pf->hw;
 907        u32 val;
 908        int i;
 909
 910        i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
 911                           I40E_GLPRT_GORCL(hw->port),
 912                           pf->stat_offsets_loaded,
 913                           &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
 914        i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
 915                           I40E_GLPRT_GOTCL(hw->port),
 916                           pf->stat_offsets_loaded,
 917                           &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
 918        i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
 919                           pf->stat_offsets_loaded,
 920                           &osd->eth.rx_discards,
 921                           &nsd->eth.rx_discards);
 922        i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
 923                           pf->stat_offsets_loaded,
 924                           &osd->eth.tx_discards,
 925                           &nsd->eth.tx_discards);
 926
 927        i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
 928                           I40E_GLPRT_UPRCL(hw->port),
 929                           pf->stat_offsets_loaded,
 930                           &osd->eth.rx_unicast,
 931                           &nsd->eth.rx_unicast);
 932        i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
 933                           I40E_GLPRT_MPRCL(hw->port),
 934                           pf->stat_offsets_loaded,
 935                           &osd->eth.rx_multicast,
 936                           &nsd->eth.rx_multicast);
 937        i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
 938                           I40E_GLPRT_BPRCL(hw->port),
 939                           pf->stat_offsets_loaded,
 940                           &osd->eth.rx_broadcast,
 941                           &nsd->eth.rx_broadcast);
 942        i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
 943                           I40E_GLPRT_UPTCL(hw->port),
 944                           pf->stat_offsets_loaded,
 945                           &osd->eth.tx_unicast,
 946                           &nsd->eth.tx_unicast);
 947        i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
 948                           I40E_GLPRT_MPTCL(hw->port),
 949                           pf->stat_offsets_loaded,
 950                           &osd->eth.tx_multicast,
 951                           &nsd->eth.tx_multicast);
 952        i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
 953                           I40E_GLPRT_BPTCL(hw->port),
 954                           pf->stat_offsets_loaded,
 955                           &osd->eth.tx_broadcast,
 956                           &nsd->eth.tx_broadcast);
 957
 958        i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
 959                           pf->stat_offsets_loaded,
 960                           &osd->tx_dropped_link_down,
 961                           &nsd->tx_dropped_link_down);
 962
 963        i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
 964                           pf->stat_offsets_loaded,
 965                           &osd->crc_errors, &nsd->crc_errors);
 966
 967        i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
 968                           pf->stat_offsets_loaded,
 969                           &osd->illegal_bytes, &nsd->illegal_bytes);
 970
 971        i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
 972                           pf->stat_offsets_loaded,
 973                           &osd->mac_local_faults,
 974                           &nsd->mac_local_faults);
 975        i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
 976                           pf->stat_offsets_loaded,
 977                           &osd->mac_remote_faults,
 978                           &nsd->mac_remote_faults);
 979
 980        i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
 981                           pf->stat_offsets_loaded,
 982                           &osd->rx_length_errors,
 983                           &nsd->rx_length_errors);
 984
 985        i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
 986                           pf->stat_offsets_loaded,
 987                           &osd->link_xon_rx, &nsd->link_xon_rx);
 988        i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
 989                           pf->stat_offsets_loaded,
 990                           &osd->link_xon_tx, &nsd->link_xon_tx);
 991        i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
 992        i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
 993                           pf->stat_offsets_loaded,
 994                           &osd->link_xoff_tx, &nsd->link_xoff_tx);
 995
 996        for (i = 0; i < 8; i++) {
 997                i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
 998                                   pf->stat_offsets_loaded,
 999                                   &osd->priority_xon_rx[i],
1000                                   &nsd->priority_xon_rx[i]);
1001                i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1002                                   pf->stat_offsets_loaded,
1003                                   &osd->priority_xon_tx[i],
1004                                   &nsd->priority_xon_tx[i]);
1005                i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1006                                   pf->stat_offsets_loaded,
1007                                   &osd->priority_xoff_tx[i],
1008                                   &nsd->priority_xoff_tx[i]);
1009                i40e_stat_update32(hw,
1010                                   I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1011                                   pf->stat_offsets_loaded,
1012                                   &osd->priority_xon_2_xoff[i],
1013                                   &nsd->priority_xon_2_xoff[i]);
1014        }
1015
1016        i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1017                           I40E_GLPRT_PRC64L(hw->port),
1018                           pf->stat_offsets_loaded,
1019                           &osd->rx_size_64, &nsd->rx_size_64);
1020        i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1021                           I40E_GLPRT_PRC127L(hw->port),
1022                           pf->stat_offsets_loaded,
1023                           &osd->rx_size_127, &nsd->rx_size_127);
1024        i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1025                           I40E_GLPRT_PRC255L(hw->port),
1026                           pf->stat_offsets_loaded,
1027                           &osd->rx_size_255, &nsd->rx_size_255);
1028        i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1029                           I40E_GLPRT_PRC511L(hw->port),
1030                           pf->stat_offsets_loaded,
1031                           &osd->rx_size_511, &nsd->rx_size_511);
1032        i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1033                           I40E_GLPRT_PRC1023L(hw->port),
1034                           pf->stat_offsets_loaded,
1035                           &osd->rx_size_1023, &nsd->rx_size_1023);
1036        i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1037                           I40E_GLPRT_PRC1522L(hw->port),
1038                           pf->stat_offsets_loaded,
1039                           &osd->rx_size_1522, &nsd->rx_size_1522);
1040        i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1041                           I40E_GLPRT_PRC9522L(hw->port),
1042                           pf->stat_offsets_loaded,
1043                           &osd->rx_size_big, &nsd->rx_size_big);
1044
1045        i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1046                           I40E_GLPRT_PTC64L(hw->port),
1047                           pf->stat_offsets_loaded,
1048                           &osd->tx_size_64, &nsd->tx_size_64);
1049        i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1050                           I40E_GLPRT_PTC127L(hw->port),
1051                           pf->stat_offsets_loaded,
1052                           &osd->tx_size_127, &nsd->tx_size_127);
1053        i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1054                           I40E_GLPRT_PTC255L(hw->port),
1055                           pf->stat_offsets_loaded,
1056                           &osd->tx_size_255, &nsd->tx_size_255);
1057        i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1058                           I40E_GLPRT_PTC511L(hw->port),
1059                           pf->stat_offsets_loaded,
1060                           &osd->tx_size_511, &nsd->tx_size_511);
1061        i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1062                           I40E_GLPRT_PTC1023L(hw->port),
1063                           pf->stat_offsets_loaded,
1064                           &osd->tx_size_1023, &nsd->tx_size_1023);
1065        i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1066                           I40E_GLPRT_PTC1522L(hw->port),
1067                           pf->stat_offsets_loaded,
1068                           &osd->tx_size_1522, &nsd->tx_size_1522);
1069        i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1070                           I40E_GLPRT_PTC9522L(hw->port),
1071                           pf->stat_offsets_loaded,
1072                           &osd->tx_size_big, &nsd->tx_size_big);
1073
1074        i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1075                           pf->stat_offsets_loaded,
1076                           &osd->rx_undersize, &nsd->rx_undersize);
1077        i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1078                           pf->stat_offsets_loaded,
1079                           &osd->rx_fragments, &nsd->rx_fragments);
1080        i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1081                           pf->stat_offsets_loaded,
1082                           &osd->rx_oversize, &nsd->rx_oversize);
1083        i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1084                           pf->stat_offsets_loaded,
1085                           &osd->rx_jabber, &nsd->rx_jabber);
1086
1087        /* FDIR stats */
1088        i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
1089                           pf->stat_offsets_loaded,
1090                           &osd->fd_atr_match, &nsd->fd_atr_match);
1091        i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
1092                           pf->stat_offsets_loaded,
1093                           &osd->fd_sb_match, &nsd->fd_sb_match);
1094
1095        val = rd32(hw, I40E_PRTPM_EEE_STAT);
1096        nsd->tx_lpi_status =
1097                       (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1098                        I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1099        nsd->rx_lpi_status =
1100                       (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1101                        I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1102        i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1103                           pf->stat_offsets_loaded,
1104                           &osd->tx_lpi_count, &nsd->tx_lpi_count);
1105        i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1106                           pf->stat_offsets_loaded,
1107                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
1108
1109        pf->stat_offsets_loaded = true;
1110}
1111
1112/**
1113 * i40e_update_stats - Update the various statistics counters.
1114 * @vsi: the VSI to be updated
1115 *
1116 * Update the various stats for this VSI and its related entities.
1117 **/
1118void i40e_update_stats(struct i40e_vsi *vsi)
1119{
1120        struct i40e_pf *pf = vsi->back;
1121
1122        if (vsi == pf->vsi[pf->lan_vsi])
1123                i40e_update_pf_stats(pf);
1124
1125        i40e_update_vsi_stats(vsi);
1126#ifdef I40E_FCOE
1127        i40e_update_fcoe_stats(vsi);
1128#endif
1129}
1130
1131/**
1132 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1133 * @vsi: the VSI to be searched
1134 * @macaddr: the MAC address
1135 * @vlan: the vlan
1136 * @is_vf: make sure its a vf filter, else doesn't matter
1137 * @is_netdev: make sure its a netdev filter, else doesn't matter
1138 *
1139 * Returns ptr to the filter object or NULL
1140 **/
1141static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1142                                                u8 *macaddr, s16 vlan,
1143                                                bool is_vf, bool is_netdev)
1144{
1145        struct i40e_mac_filter *f;
1146
1147        if (!vsi || !macaddr)
1148                return NULL;
1149
1150        list_for_each_entry(f, &vsi->mac_filter_list, list) {
1151                if ((ether_addr_equal(macaddr, f->macaddr)) &&
1152                    (vlan == f->vlan)    &&
1153                    (!is_vf || f->is_vf) &&
1154                    (!is_netdev || f->is_netdev))
1155                        return f;
1156        }
1157        return NULL;
1158}
1159
1160/**
1161 * i40e_find_mac - Find a mac addr in the macvlan filters list
1162 * @vsi: the VSI to be searched
1163 * @macaddr: the MAC address we are searching for
1164 * @is_vf: make sure its a vf filter, else doesn't matter
1165 * @is_netdev: make sure its a netdev filter, else doesn't matter
1166 *
1167 * Returns the first filter with the provided MAC address or NULL if
1168 * MAC address was not found
1169 **/
1170struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1171                                      bool is_vf, bool is_netdev)
1172{
1173        struct i40e_mac_filter *f;
1174
1175        if (!vsi || !macaddr)
1176                return NULL;
1177
1178        list_for_each_entry(f, &vsi->mac_filter_list, list) {
1179                if ((ether_addr_equal(macaddr, f->macaddr)) &&
1180                    (!is_vf || f->is_vf) &&
1181                    (!is_netdev || f->is_netdev))
1182                        return f;
1183        }
1184        return NULL;
1185}
1186
1187/**
1188 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1189 * @vsi: the VSI to be searched
1190 *
1191 * Returns true if VSI is in vlan mode or false otherwise
1192 **/
1193bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1194{
1195        struct i40e_mac_filter *f;
1196
1197        /* Only -1 for all the filters denotes not in vlan mode
1198         * so we have to go through all the list in order to make sure
1199         */
1200        list_for_each_entry(f, &vsi->mac_filter_list, list) {
1201                if (f->vlan >= 0)
1202                        return true;
1203        }
1204
1205        return false;
1206}
1207
1208/**
1209 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1210 * @vsi: the VSI to be searched
1211 * @macaddr: the mac address to be filtered
1212 * @is_vf: true if it is a vf
1213 * @is_netdev: true if it is a netdev
1214 *
1215 * Goes through all the macvlan filters and adds a
1216 * macvlan filter for each unique vlan that already exists
1217 *
1218 * Returns first filter found on success, else NULL
1219 **/
1220struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1221                                             bool is_vf, bool is_netdev)
1222{
1223        struct i40e_mac_filter *f;
1224
1225        list_for_each_entry(f, &vsi->mac_filter_list, list) {
1226                if (!i40e_find_filter(vsi, macaddr, f->vlan,
1227                                      is_vf, is_netdev)) {
1228                        if (!i40e_add_filter(vsi, macaddr, f->vlan,
1229                                             is_vf, is_netdev))
1230                                return NULL;
1231                }
1232        }
1233
1234        return list_first_entry_or_null(&vsi->mac_filter_list,
1235                                        struct i40e_mac_filter, list);
1236}
1237
1238/**
1239 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1240 * @vsi: the PF Main VSI - inappropriate for any other VSI
1241 * @macaddr: the MAC address
1242 *
1243 * Some older firmware configurations set up a default promiscuous VLAN
1244 * filter that needs to be removed.
1245 **/
1246static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1247{
1248        struct i40e_aqc_remove_macvlan_element_data element;
1249        struct i40e_pf *pf = vsi->back;
1250        i40e_status aq_ret;
1251
1252        /* Only appropriate for the PF main VSI */
1253        if (vsi->type != I40E_VSI_MAIN)
1254                return -EINVAL;
1255
1256        memset(&element, 0, sizeof(element));
1257        ether_addr_copy(element.mac_addr, macaddr);
1258        element.vlan_tag = 0;
1259        element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1260                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1261        aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1262        if (aq_ret)
1263                return -ENOENT;
1264
1265        return 0;
1266}
1267
1268/**
1269 * i40e_add_filter - Add a mac/vlan filter to the VSI
1270 * @vsi: the VSI to be searched
1271 * @macaddr: the MAC address
1272 * @vlan: the vlan
1273 * @is_vf: make sure its a vf filter, else doesn't matter
1274 * @is_netdev: make sure its a netdev filter, else doesn't matter
1275 *
1276 * Returns ptr to the filter object or NULL when no memory available.
1277 **/
1278struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1279                                        u8 *macaddr, s16 vlan,
1280                                        bool is_vf, bool is_netdev)
1281{
1282        struct i40e_mac_filter *f;
1283
1284        if (!vsi || !macaddr)
1285                return NULL;
1286
1287        f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1288        if (!f) {
1289                f = kzalloc(sizeof(*f), GFP_ATOMIC);
1290                if (!f)
1291                        goto add_filter_out;
1292
1293                ether_addr_copy(f->macaddr, macaddr);
1294                f->vlan = vlan;
1295                f->changed = true;
1296
1297                INIT_LIST_HEAD(&f->list);
1298                list_add(&f->list, &vsi->mac_filter_list);
1299        }
1300
1301        /* increment counter and add a new flag if needed */
1302        if (is_vf) {
1303                if (!f->is_vf) {
1304                        f->is_vf = true;
1305                        f->counter++;
1306                }
1307        } else if (is_netdev) {
1308                if (!f->is_netdev) {
1309                        f->is_netdev = true;
1310                        f->counter++;
1311                }
1312        } else {
1313                f->counter++;
1314        }
1315
1316        /* changed tells sync_filters_subtask to
1317         * push the filter down to the firmware
1318         */
1319        if (f->changed) {
1320                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1321                vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1322        }
1323
1324add_filter_out:
1325        return f;
1326}
1327
1328/**
1329 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1330 * @vsi: the VSI to be searched
1331 * @macaddr: the MAC address
1332 * @vlan: the vlan
1333 * @is_vf: make sure it's a vf filter, else doesn't matter
1334 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1335 **/
1336void i40e_del_filter(struct i40e_vsi *vsi,
1337                     u8 *macaddr, s16 vlan,
1338                     bool is_vf, bool is_netdev)
1339{
1340        struct i40e_mac_filter *f;
1341
1342        if (!vsi || !macaddr)
1343                return;
1344
1345        f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1346        if (!f || f->counter == 0)
1347                return;
1348
1349        if (is_vf) {
1350                if (f->is_vf) {
1351                        f->is_vf = false;
1352                        f->counter--;
1353                }
1354        } else if (is_netdev) {
1355                if (f->is_netdev) {
1356                        f->is_netdev = false;
1357                        f->counter--;
1358                }
1359        } else {
1360                /* make sure we don't remove a filter in use by vf or netdev */
1361                int min_f = 0;
1362                min_f += (f->is_vf ? 1 : 0);
1363                min_f += (f->is_netdev ? 1 : 0);
1364
1365                if (f->counter > min_f)
1366                        f->counter--;
1367        }
1368
1369        /* counter == 0 tells sync_filters_subtask to
1370         * remove the filter from the firmware's list
1371         */
1372        if (f->counter == 0) {
1373                f->changed = true;
1374                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1375                vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1376        }
1377}
1378
1379/**
1380 * i40e_set_mac - NDO callback to set mac address
1381 * @netdev: network interface device structure
1382 * @p: pointer to an address structure
1383 *
1384 * Returns 0 on success, negative on failure
1385 **/
1386#ifdef I40E_FCOE
1387int i40e_set_mac(struct net_device *netdev, void *p)
1388#else
1389static int i40e_set_mac(struct net_device *netdev, void *p)
1390#endif
1391{
1392        struct i40e_netdev_priv *np = netdev_priv(netdev);
1393        struct i40e_vsi *vsi = np->vsi;
1394        struct i40e_pf *pf = vsi->back;
1395        struct i40e_hw *hw = &pf->hw;
1396        struct sockaddr *addr = p;
1397        struct i40e_mac_filter *f;
1398
1399        if (!is_valid_ether_addr(addr->sa_data))
1400                return -EADDRNOTAVAIL;
1401
1402        if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1403                netdev_info(netdev, "already using mac address %pM\n",
1404                            addr->sa_data);
1405                return 0;
1406        }
1407
1408        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1409            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1410                return -EADDRNOTAVAIL;
1411
1412        if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1413                netdev_info(netdev, "returning to hw mac address %pM\n",
1414                            hw->mac.addr);
1415        else
1416                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1417
1418        if (vsi->type == I40E_VSI_MAIN) {
1419                i40e_status ret;
1420                ret = i40e_aq_mac_address_write(&vsi->back->hw,
1421                                                I40E_AQC_WRITE_TYPE_LAA_WOL,
1422                                                addr->sa_data, NULL);
1423                if (ret) {
1424                        netdev_info(netdev,
1425                                    "Addr change for Main VSI failed: %d\n",
1426                                    ret);
1427                        return -EADDRNOTAVAIL;
1428                }
1429        }
1430
1431        if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1432                struct i40e_aqc_remove_macvlan_element_data element;
1433
1434                memset(&element, 0, sizeof(element));
1435                ether_addr_copy(element.mac_addr, netdev->dev_addr);
1436                element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1437                i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1438        } else {
1439                i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1440                                false, false);
1441        }
1442
1443        if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1444                struct i40e_aqc_add_macvlan_element_data element;
1445
1446                memset(&element, 0, sizeof(element));
1447                ether_addr_copy(element.mac_addr, hw->mac.addr);
1448                element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1449                i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1450        } else {
1451                f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1452                                    false, false);
1453                if (f)
1454                        f->is_laa = true;
1455        }
1456
1457        i40e_sync_vsi_filters(vsi);
1458        ether_addr_copy(netdev->dev_addr, addr->sa_data);
1459
1460        return 0;
1461}
1462
1463/**
1464 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1465 * @vsi: the VSI being setup
1466 * @ctxt: VSI context structure
1467 * @enabled_tc: Enabled TCs bitmap
1468 * @is_add: True if called before Add VSI
1469 *
1470 * Setup VSI queue mapping for enabled traffic classes.
1471 **/
1472#ifdef I40E_FCOE
1473void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1474                              struct i40e_vsi_context *ctxt,
1475                              u8 enabled_tc,
1476                              bool is_add)
1477#else
1478static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1479                                     struct i40e_vsi_context *ctxt,
1480                                     u8 enabled_tc,
1481                                     bool is_add)
1482#endif
1483{
1484        struct i40e_pf *pf = vsi->back;
1485        u16 sections = 0;
1486        u8 netdev_tc = 0;
1487        u16 numtc = 0;
1488        u16 qcount;
1489        u8 offset;
1490        u16 qmap;
1491        int i;
1492        u16 num_tc_qps = 0;
1493
1494        sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1495        offset = 0;
1496
1497        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1498                /* Find numtc from enabled TC bitmap */
1499                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1500                        if (enabled_tc & (1 << i)) /* TC is enabled */
1501                                numtc++;
1502                }
1503                if (!numtc) {
1504                        dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1505                        numtc = 1;
1506                }
1507        } else {
1508                /* At least TC0 is enabled in case of non-DCB case */
1509                numtc = 1;
1510        }
1511
1512        vsi->tc_config.numtc = numtc;
1513        vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514        /* Number of queues per enabled TC */
1515        num_tc_qps = vsi->alloc_queue_pairs/numtc;
1516        num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1517
1518        /* Setup queue offset/count for all TCs for given VSI */
1519        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1520                /* See if the given TC is enabled for the given VSI */
1521                if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
1522                        int pow, num_qps;
1523
1524                        switch (vsi->type) {
1525                        case I40E_VSI_MAIN:
1526                                qcount = min_t(int, pf->rss_size, num_tc_qps);
1527                                break;
1528#ifdef I40E_FCOE
1529                        case I40E_VSI_FCOE:
1530                                qcount = num_tc_qps;
1531                                break;
1532#endif
1533                        case I40E_VSI_FDIR:
1534                        case I40E_VSI_SRIOV:
1535                        case I40E_VSI_VMDQ2:
1536                        default:
1537                                qcount = num_tc_qps;
1538                                WARN_ON(i != 0);
1539                                break;
1540                        }
1541                        vsi->tc_config.tc_info[i].qoffset = offset;
1542                        vsi->tc_config.tc_info[i].qcount = qcount;
1543
1544                        /* find the power-of-2 of the number of queue pairs */
1545                        num_qps = qcount;
1546                        pow = 0;
1547                        while (num_qps && ((1 << pow) < qcount)) {
1548                                pow++;
1549                                num_qps >>= 1;
1550                        }
1551
1552                        vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1553                        qmap =
1554                            (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1555                            (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1556
1557                        offset += qcount;
1558                } else {
1559                        /* TC is not enabled so set the offset to
1560                         * default queue and allocate one queue
1561                         * for the given TC.
1562                         */
1563                        vsi->tc_config.tc_info[i].qoffset = 0;
1564                        vsi->tc_config.tc_info[i].qcount = 1;
1565                        vsi->tc_config.tc_info[i].netdev_tc = 0;
1566
1567                        qmap = 0;
1568                }
1569                ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1570        }
1571
1572        /* Set actual Tx/Rx queue pairs */
1573        vsi->num_queue_pairs = offset;
1574
1575        /* Scheduler section valid can only be set for ADD VSI */
1576        if (is_add) {
1577                sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1578
1579                ctxt->info.up_enable_bits = enabled_tc;
1580        }
1581        if (vsi->type == I40E_VSI_SRIOV) {
1582                ctxt->info.mapping_flags |=
1583                                     cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1584                for (i = 0; i < vsi->num_queue_pairs; i++)
1585                        ctxt->info.queue_mapping[i] =
1586                                               cpu_to_le16(vsi->base_queue + i);
1587        } else {
1588                ctxt->info.mapping_flags |=
1589                                        cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1590                ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1591        }
1592        ctxt->info.valid_sections |= cpu_to_le16(sections);
1593}
1594
1595/**
1596 * i40e_set_rx_mode - NDO callback to set the netdev filters
1597 * @netdev: network interface device structure
1598 **/
1599#ifdef I40E_FCOE
1600void i40e_set_rx_mode(struct net_device *netdev)
1601#else
1602static void i40e_set_rx_mode(struct net_device *netdev)
1603#endif
1604{
1605        struct i40e_netdev_priv *np = netdev_priv(netdev);
1606        struct i40e_mac_filter *f, *ftmp;
1607        struct i40e_vsi *vsi = np->vsi;
1608        struct netdev_hw_addr *uca;
1609        struct netdev_hw_addr *mca;
1610        struct netdev_hw_addr *ha;
1611
1612        /* add addr if not already in the filter list */
1613        netdev_for_each_uc_addr(uca, netdev) {
1614                if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1615                        if (i40e_is_vsi_in_vlan(vsi))
1616                                i40e_put_mac_in_vlan(vsi, uca->addr,
1617                                                     false, true);
1618                        else
1619                                i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1620                                                false, true);
1621                }
1622        }
1623
1624        netdev_for_each_mc_addr(mca, netdev) {
1625                if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1626                        if (i40e_is_vsi_in_vlan(vsi))
1627                                i40e_put_mac_in_vlan(vsi, mca->addr,
1628                                                     false, true);
1629                        else
1630                                i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1631                                                false, true);
1632                }
1633        }
1634
1635        /* remove filter if not in netdev list */
1636        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1637                bool found = false;
1638
1639                if (!f->is_netdev)
1640                        continue;
1641
1642                if (is_multicast_ether_addr(f->macaddr)) {
1643                        netdev_for_each_mc_addr(mca, netdev) {
1644                                if (ether_addr_equal(mca->addr, f->macaddr)) {
1645                                        found = true;
1646                                        break;
1647                                }
1648                        }
1649                } else {
1650                        netdev_for_each_uc_addr(uca, netdev) {
1651                                if (ether_addr_equal(uca->addr, f->macaddr)) {
1652                                        found = true;
1653                                        break;
1654                                }
1655                        }
1656
1657                        for_each_dev_addr(netdev, ha) {
1658                                if (ether_addr_equal(ha->addr, f->macaddr)) {
1659                                        found = true;
1660                                        break;
1661                                }
1662                        }
1663                }
1664                if (!found)
1665                        i40e_del_filter(
1666                           vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1667        }
1668
1669        /* check for other flag changes */
1670        if (vsi->current_netdev_flags != vsi->netdev->flags) {
1671                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1672                vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1673        }
1674}
1675
1676/**
1677 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1678 * @vsi: ptr to the VSI
1679 *
1680 * Push any outstanding VSI filter changes through the AdminQ.
1681 *
1682 * Returns 0 or error value
1683 **/
1684int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
1685{
1686        struct i40e_mac_filter *f, *ftmp;
1687        bool promisc_forced_on = false;
1688        bool add_happened = false;
1689        int filter_list_len = 0;
1690        u32 changed_flags = 0;
1691        i40e_status aq_ret = 0;
1692        struct i40e_pf *pf;
1693        int num_add = 0;
1694        int num_del = 0;
1695        u16 cmd_flags;
1696
1697        /* empty array typed pointers, kcalloc later */
1698        struct i40e_aqc_add_macvlan_element_data *add_list;
1699        struct i40e_aqc_remove_macvlan_element_data *del_list;
1700
1701        while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1702                usleep_range(1000, 2000);
1703        pf = vsi->back;
1704
1705        if (vsi->netdev) {
1706                changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1707                vsi->current_netdev_flags = vsi->netdev->flags;
1708        }
1709
1710        if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1711                vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1712
1713                filter_list_len = pf->hw.aq.asq_buf_size /
1714                            sizeof(struct i40e_aqc_remove_macvlan_element_data);
1715                del_list = kcalloc(filter_list_len,
1716                            sizeof(struct i40e_aqc_remove_macvlan_element_data),
1717                            GFP_KERNEL);
1718                if (!del_list)
1719                        return -ENOMEM;
1720
1721                list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1722                        if (!f->changed)
1723                                continue;
1724
1725                        if (f->counter != 0)
1726                                continue;
1727                        f->changed = false;
1728                        cmd_flags = 0;
1729
1730                        /* add to delete list */
1731                        ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1732                        del_list[num_del].vlan_tag =
1733                                cpu_to_le16((u16)(f->vlan ==
1734                                            I40E_VLAN_ANY ? 0 : f->vlan));
1735
1736                        cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1737                        del_list[num_del].flags = cmd_flags;
1738                        num_del++;
1739
1740                        /* unlink from filter list */
1741                        list_del(&f->list);
1742                        kfree(f);
1743
1744                        /* flush a full buffer */
1745                        if (num_del == filter_list_len) {
1746                                aq_ret = i40e_aq_remove_macvlan(&pf->hw,
1747                                            vsi->seid, del_list, num_del,
1748                                            NULL);
1749                                num_del = 0;
1750                                memset(del_list, 0, sizeof(*del_list));
1751
1752                                if (aq_ret &&
1753                                    pf->hw.aq.asq_last_status !=
1754                                                              I40E_AQ_RC_ENOENT)
1755                                        dev_info(&pf->pdev->dev,
1756                                                 "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
1757                                                 aq_ret,
1758                                                 pf->hw.aq.asq_last_status);
1759                        }
1760                }
1761                if (num_del) {
1762                        aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1763                                                     del_list, num_del, NULL);
1764                        num_del = 0;
1765
1766                        if (aq_ret &&
1767                            pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
1768                                dev_info(&pf->pdev->dev,
1769                                         "ignoring delete macvlan error, err %d, aq_err %d\n",
1770                                         aq_ret, pf->hw.aq.asq_last_status);
1771                }
1772
1773                kfree(del_list);
1774                del_list = NULL;
1775
1776                /* do all the adds now */
1777                filter_list_len = pf->hw.aq.asq_buf_size /
1778                               sizeof(struct i40e_aqc_add_macvlan_element_data),
1779                add_list = kcalloc(filter_list_len,
1780                               sizeof(struct i40e_aqc_add_macvlan_element_data),
1781                               GFP_KERNEL);
1782                if (!add_list)
1783                        return -ENOMEM;
1784
1785                list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1786                        if (!f->changed)
1787                                continue;
1788
1789                        if (f->counter == 0)
1790                                continue;
1791                        f->changed = false;
1792                        add_happened = true;
1793                        cmd_flags = 0;
1794
1795                        /* add to add array */
1796                        ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1797                        add_list[num_add].vlan_tag =
1798                                cpu_to_le16(
1799                                 (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1800                        add_list[num_add].queue_number = 0;
1801
1802                        cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1803                        add_list[num_add].flags = cpu_to_le16(cmd_flags);
1804                        num_add++;
1805
1806                        /* flush a full buffer */
1807                        if (num_add == filter_list_len) {
1808                                aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1809                                                             add_list, num_add,
1810                                                             NULL);
1811                                num_add = 0;
1812
1813                                if (aq_ret)
1814                                        break;
1815                                memset(add_list, 0, sizeof(*add_list));
1816                        }
1817                }
1818                if (num_add) {
1819                        aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1820                                                     add_list, num_add, NULL);
1821                        num_add = 0;
1822                }
1823                kfree(add_list);
1824                add_list = NULL;
1825
1826                if (add_happened && aq_ret &&
1827                    pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
1828                        dev_info(&pf->pdev->dev,
1829                                 "add filter failed, err %d, aq_err %d\n",
1830                                 aq_ret, pf->hw.aq.asq_last_status);
1831                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1832                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1833                                      &vsi->state)) {
1834                                promisc_forced_on = true;
1835                                set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1836                                        &vsi->state);
1837                                dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1838                        }
1839                }
1840        }
1841
1842        /* check for changes in promiscuous modes */
1843        if (changed_flags & IFF_ALLMULTI) {
1844                bool cur_multipromisc;
1845                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1846                aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1847                                                               vsi->seid,
1848                                                               cur_multipromisc,
1849                                                               NULL);
1850                if (aq_ret)
1851                        dev_info(&pf->pdev->dev,
1852                                 "set multi promisc failed, err %d, aq_err %d\n",
1853                                 aq_ret, pf->hw.aq.asq_last_status);
1854        }
1855        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1856                bool cur_promisc;
1857                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1858                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1859                                        &vsi->state));
1860                aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
1861                                                             vsi->seid,
1862                                                             cur_promisc, NULL);
1863                if (aq_ret)
1864                        dev_info(&pf->pdev->dev,
1865                                 "set uni promisc failed, err %d, aq_err %d\n",
1866                                 aq_ret, pf->hw.aq.asq_last_status);
1867                aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1868                                                   vsi->seid,
1869                                                   cur_promisc, NULL);
1870                if (aq_ret)
1871                        dev_info(&pf->pdev->dev,
1872                                 "set brdcast promisc failed, err %d, aq_err %d\n",
1873                                 aq_ret, pf->hw.aq.asq_last_status);
1874        }
1875
1876        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
1877        return 0;
1878}
1879
1880/**
1881 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
1882 * @pf: board private structure
1883 **/
1884static void i40e_sync_filters_subtask(struct i40e_pf *pf)
1885{
1886        int v;
1887
1888        if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
1889                return;
1890        pf->flags &= ~I40E_FLAG_FILTER_SYNC;
1891
1892        for (v = 0; v < pf->num_alloc_vsi; v++) {
1893                if (pf->vsi[v] &&
1894                    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
1895                        i40e_sync_vsi_filters(pf->vsi[v]);
1896        }
1897}
1898
1899/**
1900 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
1901 * @netdev: network interface device structure
1902 * @new_mtu: new value for maximum frame size
1903 *
1904 * Returns 0 on success, negative on failure
1905 **/
1906static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
1907{
1908        struct i40e_netdev_priv *np = netdev_priv(netdev);
1909        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1910        struct i40e_vsi *vsi = np->vsi;
1911
1912        /* MTU < 68 is an error and causes problems on some kernels */
1913        if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
1914                return -EINVAL;
1915
1916        netdev_info(netdev, "changing MTU from %d to %d\n",
1917                    netdev->mtu, new_mtu);
1918        netdev->mtu = new_mtu;
1919        if (netif_running(netdev))
1920                i40e_vsi_reinit_locked(vsi);
1921
1922        return 0;
1923}
1924
1925/**
1926 * i40e_ioctl - Access the hwtstamp interface
1927 * @netdev: network interface device structure
1928 * @ifr: interface request data
1929 * @cmd: ioctl command
1930 **/
1931int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1932{
1933        struct i40e_netdev_priv *np = netdev_priv(netdev);
1934        struct i40e_pf *pf = np->vsi->back;
1935
1936        switch (cmd) {
1937        case SIOCGHWTSTAMP:
1938                return i40e_ptp_get_ts_config(pf, ifr);
1939        case SIOCSHWTSTAMP:
1940                return i40e_ptp_set_ts_config(pf, ifr);
1941        default:
1942                return -EOPNOTSUPP;
1943        }
1944}
1945
1946/**
1947 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
1948 * @vsi: the vsi being adjusted
1949 **/
1950void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
1951{
1952        struct i40e_vsi_context ctxt;
1953        i40e_status ret;
1954
1955        if ((vsi->info.valid_sections &
1956             cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1957            ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
1958                return;  /* already enabled */
1959
1960        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1961        vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1962                                    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1963
1964        ctxt.seid = vsi->seid;
1965        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1966        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1967        if (ret) {
1968                dev_info(&vsi->back->pdev->dev,
1969                         "%s: update vsi failed, aq_err=%d\n",
1970                         __func__, vsi->back->hw.aq.asq_last_status);
1971        }
1972}
1973
1974/**
1975 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
1976 * @vsi: the vsi being adjusted
1977 **/
1978void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
1979{
1980        struct i40e_vsi_context ctxt;
1981        i40e_status ret;
1982
1983        if ((vsi->info.valid_sections &
1984             cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
1985            ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
1986             I40E_AQ_VSI_PVLAN_EMOD_MASK))
1987                return;  /* already disabled */
1988
1989        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
1990        vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1991                                    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
1992
1993        ctxt.seid = vsi->seid;
1994        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
1995        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
1996        if (ret) {
1997                dev_info(&vsi->back->pdev->dev,
1998                         "%s: update vsi failed, aq_err=%d\n",
1999                         __func__, vsi->back->hw.aq.asq_last_status);
2000        }
2001}
2002
2003/**
2004 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2005 * @netdev: network interface to be adjusted
2006 * @features: netdev features to test if VLAN offload is enabled or not
2007 **/
2008static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2009{
2010        struct i40e_netdev_priv *np = netdev_priv(netdev);
2011        struct i40e_vsi *vsi = np->vsi;
2012
2013        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2014                i40e_vlan_stripping_enable(vsi);
2015        else
2016                i40e_vlan_stripping_disable(vsi);
2017}
2018
2019/**
2020 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2021 * @vsi: the vsi being configured
2022 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2023 **/
2024int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2025{
2026        struct i40e_mac_filter *f, *add_f;
2027        bool is_netdev, is_vf;
2028
2029        is_vf = (vsi->type == I40E_VSI_SRIOV);
2030        is_netdev = !!(vsi->netdev);
2031
2032        if (is_netdev) {
2033                add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2034                                        is_vf, is_netdev);
2035                if (!add_f) {
2036                        dev_info(&vsi->back->pdev->dev,
2037                                 "Could not add vlan filter %d for %pM\n",
2038                                 vid, vsi->netdev->dev_addr);
2039                        return -ENOMEM;
2040                }
2041        }
2042
2043        list_for_each_entry(f, &vsi->mac_filter_list, list) {
2044                add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2045                if (!add_f) {
2046                        dev_info(&vsi->back->pdev->dev,
2047                                 "Could not add vlan filter %d for %pM\n",
2048                                 vid, f->macaddr);
2049                        return -ENOMEM;
2050                }
2051        }
2052
2053        /* Now if we add a vlan tag, make sure to check if it is the first
2054         * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2055         * with 0, so we now accept untagged and specified tagged traffic
2056         * (and not any taged and untagged)
2057         */
2058        if (vid > 0) {
2059                if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2060                                                  I40E_VLAN_ANY,
2061                                                  is_vf, is_netdev)) {
2062                        i40e_del_filter(vsi, vsi->netdev->dev_addr,
2063                                        I40E_VLAN_ANY, is_vf, is_netdev);
2064                        add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2065                                                is_vf, is_netdev);
2066                        if (!add_f) {
2067                                dev_info(&vsi->back->pdev->dev,
2068                                         "Could not add filter 0 for %pM\n",
2069                                         vsi->netdev->dev_addr);
2070                                return -ENOMEM;
2071                        }
2072                }
2073        }
2074
2075        /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2076        if (vid > 0 && !vsi->info.pvid) {
2077                list_for_each_entry(f, &vsi->mac_filter_list, list) {
2078                        if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2079                                             is_vf, is_netdev)) {
2080                                i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2081                                                is_vf, is_netdev);
2082                                add_f = i40e_add_filter(vsi, f->macaddr,
2083                                                        0, is_vf, is_netdev);
2084                                if (!add_f) {
2085                                        dev_info(&vsi->back->pdev->dev,
2086                                                 "Could not add filter 0 for %pM\n",
2087                                                 f->macaddr);
2088                                        return -ENOMEM;
2089                                }
2090                        }
2091                }
2092        }
2093
2094        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2095            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2096                return 0;
2097
2098        return i40e_sync_vsi_filters(vsi);
2099}
2100
2101/**
2102 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2103 * @vsi: the vsi being configured
2104 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2105 *
2106 * Return: 0 on success or negative otherwise
2107 **/
2108int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2109{
2110        struct net_device *netdev = vsi->netdev;
2111        struct i40e_mac_filter *f, *add_f;
2112        bool is_vf, is_netdev;
2113        int filter_count = 0;
2114
2115        is_vf = (vsi->type == I40E_VSI_SRIOV);
2116        is_netdev = !!(netdev);
2117
2118        if (is_netdev)
2119                i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2120
2121        list_for_each_entry(f, &vsi->mac_filter_list, list)
2122                i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2123
2124        /* go through all the filters for this VSI and if there is only
2125         * vid == 0 it means there are no other filters, so vid 0 must
2126         * be replaced with -1. This signifies that we should from now
2127         * on accept any traffic (with any tag present, or untagged)
2128         */
2129        list_for_each_entry(f, &vsi->mac_filter_list, list) {
2130                if (is_netdev) {
2131                        if (f->vlan &&
2132                            ether_addr_equal(netdev->dev_addr, f->macaddr))
2133                                filter_count++;
2134                }
2135
2136                if (f->vlan)
2137                        filter_count++;
2138        }
2139
2140        if (!filter_count && is_netdev) {
2141                i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2142                f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2143                                    is_vf, is_netdev);
2144                if (!f) {
2145                        dev_info(&vsi->back->pdev->dev,
2146                                 "Could not add filter %d for %pM\n",
2147                                 I40E_VLAN_ANY, netdev->dev_addr);
2148                        return -ENOMEM;
2149                }
2150        }
2151
2152        if (!filter_count) {
2153                list_for_each_entry(f, &vsi->mac_filter_list, list) {
2154                        i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2155                        add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2156                                            is_vf, is_netdev);
2157                        if (!add_f) {
2158                                dev_info(&vsi->back->pdev->dev,
2159                                         "Could not add filter %d for %pM\n",
2160                                         I40E_VLAN_ANY, f->macaddr);
2161                                return -ENOMEM;
2162                        }
2163                }
2164        }
2165
2166        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2167            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2168                return 0;
2169
2170        return i40e_sync_vsi_filters(vsi);
2171}
2172
2173/**
2174 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2175 * @netdev: network interface to be adjusted
2176 * @vid: vlan id to be added
2177 *
2178 * net_device_ops implementation for adding vlan ids
2179 **/
2180#ifdef I40E_FCOE
2181int i40e_vlan_rx_add_vid(struct net_device *netdev,
2182                         __always_unused __be16 proto, u16 vid)
2183#else
2184static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2185                                __always_unused __be16 proto, u16 vid)
2186#endif
2187{
2188        struct i40e_netdev_priv *np = netdev_priv(netdev);
2189        struct i40e_vsi *vsi = np->vsi;
2190        int ret = 0;
2191
2192        if (vid > 4095)
2193                return -EINVAL;
2194
2195        netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2196
2197        /* If the network stack called us with vid = 0 then
2198         * it is asking to receive priority tagged packets with
2199         * vlan id 0.  Our HW receives them by default when configured
2200         * to receive untagged packets so there is no need to add an
2201         * extra filter for vlan 0 tagged packets.
2202         */
2203        if (vid)
2204                ret = i40e_vsi_add_vlan(vsi, vid);
2205
2206        if (!ret && (vid < VLAN_N_VID))
2207                set_bit(vid, vsi->active_vlans);
2208
2209        return ret;
2210}
2211
2212/**
2213 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2214 * @netdev: network interface to be adjusted
2215 * @vid: vlan id to be removed
2216 *
2217 * net_device_ops implementation for removing vlan ids
2218 **/
2219#ifdef I40E_FCOE
2220int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2221                          __always_unused __be16 proto, u16 vid)
2222#else
2223static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2224                                 __always_unused __be16 proto, u16 vid)
2225#endif
2226{
2227        struct i40e_netdev_priv *np = netdev_priv(netdev);
2228        struct i40e_vsi *vsi = np->vsi;
2229
2230        netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2231
2232        /* return code is ignored as there is nothing a user
2233         * can do about failure to remove and a log message was
2234         * already printed from the other function
2235         */
2236        i40e_vsi_kill_vlan(vsi, vid);
2237
2238        clear_bit(vid, vsi->active_vlans);
2239
2240        return 0;
2241}
2242
2243/**
2244 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2245 * @vsi: the vsi being brought back up
2246 **/
2247static void i40e_restore_vlan(struct i40e_vsi *vsi)
2248{
2249        u16 vid;
2250
2251        if (!vsi->netdev)
2252                return;
2253
2254        i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2255
2256        for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2257                i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2258                                     vid);
2259}
2260
2261/**
2262 * i40e_vsi_add_pvid - Add pvid for the VSI
2263 * @vsi: the vsi being adjusted
2264 * @vid: the vlan id to set as a PVID
2265 **/
2266int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2267{
2268        struct i40e_vsi_context ctxt;
2269        i40e_status aq_ret;
2270
2271        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2272        vsi->info.pvid = cpu_to_le16(vid);
2273        vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2274                                    I40E_AQ_VSI_PVLAN_INSERT_PVID |
2275                                    I40E_AQ_VSI_PVLAN_EMOD_STR;
2276
2277        ctxt.seid = vsi->seid;
2278        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
2279        aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2280        if (aq_ret) {
2281                dev_info(&vsi->back->pdev->dev,
2282                         "%s: update vsi failed, aq_err=%d\n",
2283                         __func__, vsi->back->hw.aq.asq_last_status);
2284                return -ENOENT;
2285        }
2286
2287        return 0;
2288}
2289
2290/**
2291 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2292 * @vsi: the vsi being adjusted
2293 *
2294 * Just use the vlan_rx_register() service to put it back to normal
2295 **/
2296void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2297{
2298        i40e_vlan_stripping_disable(vsi);
2299
2300        vsi->info.pvid = 0;
2301}
2302
2303/**
2304 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2305 * @vsi: ptr to the VSI
2306 *
2307 * If this function returns with an error, then it's possible one or
2308 * more of the rings is populated (while the rest are not).  It is the
2309 * callers duty to clean those orphaned rings.
2310 *
2311 * Return 0 on success, negative on failure
2312 **/
2313static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2314{
2315        int i, err = 0;
2316
2317        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2318                err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2319
2320        return err;
2321}
2322
2323/**
2324 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2325 * @vsi: ptr to the VSI
2326 *
2327 * Free VSI's transmit software resources
2328 **/
2329static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2330{
2331        int i;
2332
2333        if (!vsi->tx_rings)
2334                return;
2335
2336        for (i = 0; i < vsi->num_queue_pairs; i++)
2337                if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2338                        i40e_free_tx_resources(vsi->tx_rings[i]);
2339}
2340
2341/**
2342 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2343 * @vsi: ptr to the VSI
2344 *
2345 * If this function returns with an error, then it's possible one or
2346 * more of the rings is populated (while the rest are not).  It is the
2347 * callers duty to clean those orphaned rings.
2348 *
2349 * Return 0 on success, negative on failure
2350 **/
2351static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2352{
2353        int i, err = 0;
2354
2355        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2356                err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2357#ifdef I40E_FCOE
2358        i40e_fcoe_setup_ddp_resources(vsi);
2359#endif
2360        return err;
2361}
2362
2363/**
2364 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2365 * @vsi: ptr to the VSI
2366 *
2367 * Free all receive software resources
2368 **/
2369static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2370{
2371        int i;
2372
2373        if (!vsi->rx_rings)
2374                return;
2375
2376        for (i = 0; i < vsi->num_queue_pairs; i++)
2377                if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2378                        i40e_free_rx_resources(vsi->rx_rings[i]);
2379#ifdef I40E_FCOE
2380        i40e_fcoe_free_ddp_resources(vsi);
2381#endif
2382}
2383
2384/**
2385 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2386 * @ring: The Tx ring to configure
2387 *
2388 * This enables/disables XPS for a given Tx descriptor ring
2389 * based on the TCs enabled for the VSI that ring belongs to.
2390 **/
2391static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2392{
2393        struct i40e_vsi *vsi = ring->vsi;
2394        cpumask_var_t mask;
2395
2396        if (ring->q_vector && ring->netdev) {
2397                /* Single TC mode enable XPS */
2398                if (vsi->tc_config.numtc <= 1 &&
2399                    !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
2400                        netif_set_xps_queue(ring->netdev,
2401                                            &ring->q_vector->affinity_mask,
2402                                            ring->queue_index);
2403                } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2404                        /* Disable XPS to allow selection based on TC */
2405                        bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2406                        netif_set_xps_queue(ring->netdev, mask,
2407                                            ring->queue_index);
2408                        free_cpumask_var(mask);
2409                }
2410        }
2411}
2412
2413/**
2414 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2415 * @ring: The Tx ring to configure
2416 *
2417 * Configure the Tx descriptor ring in the HMC context.
2418 **/
2419static int i40e_configure_tx_ring(struct i40e_ring *ring)
2420{
2421        struct i40e_vsi *vsi = ring->vsi;
2422        u16 pf_q = vsi->base_queue + ring->queue_index;
2423        struct i40e_hw *hw = &vsi->back->hw;
2424        struct i40e_hmc_obj_txq tx_ctx;
2425        i40e_status err = 0;
2426        u32 qtx_ctl = 0;
2427
2428        /* some ATR related tx ring init */
2429        if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2430                ring->atr_sample_rate = vsi->back->atr_sample_rate;
2431                ring->atr_count = 0;
2432        } else {
2433                ring->atr_sample_rate = 0;
2434        }
2435
2436        /* configure XPS */
2437        i40e_config_xps_tx_ring(ring);
2438
2439        /* clear the context structure first */
2440        memset(&tx_ctx, 0, sizeof(tx_ctx));
2441
2442        tx_ctx.new_context = 1;
2443        tx_ctx.base = (ring->dma / 128);
2444        tx_ctx.qlen = ring->count;
2445        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2446                                               I40E_FLAG_FD_ATR_ENABLED));
2447#ifdef I40E_FCOE
2448        tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2449#endif
2450        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2451        /* FDIR VSI tx ring can still use RS bit and writebacks */
2452        if (vsi->type != I40E_VSI_FDIR)
2453                tx_ctx.head_wb_ena = 1;
2454        tx_ctx.head_wb_addr = ring->dma +
2455                              (ring->count * sizeof(struct i40e_tx_desc));
2456
2457        /* As part of VSI creation/update, FW allocates certain
2458         * Tx arbitration queue sets for each TC enabled for
2459         * the VSI. The FW returns the handles to these queue
2460         * sets as part of the response buffer to Add VSI,
2461         * Update VSI, etc. AQ commands. It is expected that
2462         * these queue set handles be associated with the Tx
2463         * queues by the driver as part of the TX queue context
2464         * initialization. This has to be done regardless of
2465         * DCB as by default everything is mapped to TC0.
2466         */
2467        tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2468        tx_ctx.rdylist_act = 0;
2469
2470        /* clear the context in the HMC */
2471        err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2472        if (err) {
2473                dev_info(&vsi->back->pdev->dev,
2474                         "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2475                         ring->queue_index, pf_q, err);
2476                return -ENOMEM;
2477        }
2478
2479        /* set the context in the HMC */
2480        err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2481        if (err) {
2482                dev_info(&vsi->back->pdev->dev,
2483                         "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2484                         ring->queue_index, pf_q, err);
2485                return -ENOMEM;
2486        }
2487
2488        /* Now associate this queue with this PCI function */
2489        if (vsi->type == I40E_VSI_VMDQ2) {
2490                qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2491                qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2492                           I40E_QTX_CTL_VFVM_INDX_MASK;
2493        } else {
2494                qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2495        }
2496
2497        qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2498                    I40E_QTX_CTL_PF_INDX_MASK);
2499        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2500        i40e_flush(hw);
2501
2502        clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
2503
2504        /* cache tail off for easier writes later */
2505        ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2506
2507        return 0;
2508}
2509
2510/**
2511 * i40e_configure_rx_ring - Configure a receive ring context
2512 * @ring: The Rx ring to configure
2513 *
2514 * Configure the Rx descriptor ring in the HMC context.
2515 **/
2516static int i40e_configure_rx_ring(struct i40e_ring *ring)
2517{
2518        struct i40e_vsi *vsi = ring->vsi;
2519        u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2520        u16 pf_q = vsi->base_queue + ring->queue_index;
2521        struct i40e_hw *hw = &vsi->back->hw;
2522        struct i40e_hmc_obj_rxq rx_ctx;
2523        i40e_status err = 0;
2524
2525        ring->state = 0;
2526
2527        /* clear the context structure first */
2528        memset(&rx_ctx, 0, sizeof(rx_ctx));
2529
2530        ring->rx_buf_len = vsi->rx_buf_len;
2531        ring->rx_hdr_len = vsi->rx_hdr_len;
2532
2533        rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2534        rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2535
2536        rx_ctx.base = (ring->dma / 128);
2537        rx_ctx.qlen = ring->count;
2538
2539        if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2540                set_ring_16byte_desc_enabled(ring);
2541                rx_ctx.dsize = 0;
2542        } else {
2543                rx_ctx.dsize = 1;
2544        }
2545
2546        rx_ctx.dtype = vsi->dtype;
2547        if (vsi->dtype) {
2548                set_ring_ps_enabled(ring);
2549                rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2550                                  I40E_RX_SPLIT_IP      |
2551                                  I40E_RX_SPLIT_TCP_UDP |
2552                                  I40E_RX_SPLIT_SCTP;
2553        } else {
2554                rx_ctx.hsplit_0 = 0;
2555        }
2556
2557        rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2558                                  (chain_len * ring->rx_buf_len));
2559        if (hw->revision_id == 0)
2560                rx_ctx.lrxqthresh = 0;
2561        else
2562                rx_ctx.lrxqthresh = 2;
2563        rx_ctx.crcstrip = 1;
2564        rx_ctx.l2tsel = 1;
2565        rx_ctx.showiv = 1;
2566#ifdef I40E_FCOE
2567        rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2568#endif
2569        /* set the prefena field to 1 because the manual says to */
2570        rx_ctx.prefena = 1;
2571
2572        /* clear the context in the HMC */
2573        err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2574        if (err) {
2575                dev_info(&vsi->back->pdev->dev,
2576                         "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2577                         ring->queue_index, pf_q, err);
2578                return -ENOMEM;
2579        }
2580
2581        /* set the context in the HMC */
2582        err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2583        if (err) {
2584                dev_info(&vsi->back->pdev->dev,
2585                         "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2586                         ring->queue_index, pf_q, err);
2587                return -ENOMEM;
2588        }
2589
2590        /* cache tail for quicker writes, and clear the reg before use */
2591        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2592        writel(0, ring->tail);
2593
2594        i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
2595
2596        return 0;
2597}
2598
2599/**
2600 * i40e_vsi_configure_tx - Configure the VSI for Tx
2601 * @vsi: VSI structure describing this set of rings and resources
2602 *
2603 * Configure the Tx VSI for operation.
2604 **/
2605static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2606{
2607        int err = 0;
2608        u16 i;
2609
2610        for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2611                err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2612
2613        return err;
2614}
2615
2616/**
2617 * i40e_vsi_configure_rx - Configure the VSI for Rx
2618 * @vsi: the VSI being configured
2619 *
2620 * Configure the Rx VSI for operation.
2621 **/
2622static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2623{
2624        int err = 0;
2625        u16 i;
2626
2627        if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2628                vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2629                               + ETH_FCS_LEN + VLAN_HLEN;
2630        else
2631                vsi->max_frame = I40E_RXBUFFER_2048;
2632
2633        /* figure out correct receive buffer length */
2634        switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2635                                    I40E_FLAG_RX_PS_ENABLED)) {
2636        case I40E_FLAG_RX_1BUF_ENABLED:
2637                vsi->rx_hdr_len = 0;
2638                vsi->rx_buf_len = vsi->max_frame;
2639                vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2640                break;
2641        case I40E_FLAG_RX_PS_ENABLED:
2642                vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2643                vsi->rx_buf_len = I40E_RXBUFFER_2048;
2644                vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2645                break;
2646        default:
2647                vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2648                vsi->rx_buf_len = I40E_RXBUFFER_2048;
2649                vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2650                break;
2651        }
2652
2653#ifdef I40E_FCOE
2654        /* setup rx buffer for FCoE */
2655        if ((vsi->type == I40E_VSI_FCOE) &&
2656            (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2657                vsi->rx_hdr_len = 0;
2658                vsi->rx_buf_len = I40E_RXBUFFER_3072;
2659                vsi->max_frame = I40E_RXBUFFER_3072;
2660                vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2661        }
2662
2663#endif /* I40E_FCOE */
2664        /* round up for the chip's needs */
2665        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2666                                (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
2667        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2668                                (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
2669
2670        /* set up individual rings */
2671        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2672                err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2673
2674        return err;
2675}
2676
2677/**
2678 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2679 * @vsi: ptr to the VSI
2680 **/
2681static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2682{
2683        struct i40e_ring *tx_ring, *rx_ring;
2684        u16 qoffset, qcount;
2685        int i, n;
2686
2687        if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2688                return;
2689
2690        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2691                if (!(vsi->tc_config.enabled_tc & (1 << n)))
2692                        continue;
2693
2694                qoffset = vsi->tc_config.tc_info[n].qoffset;
2695                qcount = vsi->tc_config.tc_info[n].qcount;
2696                for (i = qoffset; i < (qoffset + qcount); i++) {
2697                        rx_ring = vsi->rx_rings[i];
2698                        tx_ring = vsi->tx_rings[i];
2699                        rx_ring->dcb_tc = n;
2700                        tx_ring->dcb_tc = n;
2701                }
2702        }
2703}
2704
2705/**
2706 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2707 * @vsi: ptr to the VSI
2708 **/
2709static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2710{
2711        if (vsi->netdev)
2712                i40e_set_rx_mode(vsi->netdev);
2713}
2714
2715/**
2716 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2717 * @vsi: Pointer to the targeted VSI
2718 *
2719 * This function replays the hlist on the hw where all the SB Flow Director
2720 * filters were saved.
2721 **/
2722static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2723{
2724        struct i40e_fdir_filter *filter;
2725        struct i40e_pf *pf = vsi->back;
2726        struct hlist_node *node;
2727
2728        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2729                return;
2730
2731        hlist_for_each_entry_safe(filter, node,
2732                                  &pf->fdir_filter_list, fdir_node) {
2733                i40e_add_del_fdir(vsi, filter, true);
2734        }
2735}
2736
2737/**
2738 * i40e_vsi_configure - Set up the VSI for action
2739 * @vsi: the VSI being configured
2740 **/
2741static int i40e_vsi_configure(struct i40e_vsi *vsi)
2742{
2743        int err;
2744
2745        i40e_set_vsi_rx_mode(vsi);
2746        i40e_restore_vlan(vsi);
2747        i40e_vsi_config_dcb_rings(vsi);
2748        err = i40e_vsi_configure_tx(vsi);
2749        if (!err)
2750                err = i40e_vsi_configure_rx(vsi);
2751
2752        return err;
2753}
2754
2755/**
2756 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2757 * @vsi: the VSI being configured
2758 **/
2759static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2760{
2761        struct i40e_pf *pf = vsi->back;
2762        struct i40e_q_vector *q_vector;
2763        struct i40e_hw *hw = &pf->hw;
2764        u16 vector;
2765        int i, q;
2766        u32 val;
2767        u32 qp;
2768
2769        /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2770         * and PFINT_LNKLSTn registers, e.g.:
2771         *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2772         */
2773        qp = vsi->base_queue;
2774        vector = vsi->base_vector;
2775        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2776                q_vector = vsi->q_vectors[i];
2777                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2778                q_vector->rx.latency_range = I40E_LOW_LATENCY;
2779                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2780                     q_vector->rx.itr);
2781                q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2782                q_vector->tx.latency_range = I40E_LOW_LATENCY;
2783                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2784                     q_vector->tx.itr);
2785
2786                /* Linked list for the queuepairs assigned to this vector */
2787                wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2788                for (q = 0; q < q_vector->num_ringpairs; q++) {
2789                        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2790                              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2791                              (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2792                              (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2793                              (I40E_QUEUE_TYPE_TX
2794                                      << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2795
2796                        wr32(hw, I40E_QINT_RQCTL(qp), val);
2797
2798                        val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2799                              (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2800                              (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2801                              ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2802                              (I40E_QUEUE_TYPE_RX
2803                                      << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2804
2805                        /* Terminate the linked list */
2806                        if (q == (q_vector->num_ringpairs - 1))
2807                                val |= (I40E_QUEUE_END_OF_LIST
2808                                           << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2809
2810                        wr32(hw, I40E_QINT_TQCTL(qp), val);
2811                        qp++;
2812                }
2813        }
2814
2815        i40e_flush(hw);
2816}
2817
2818/**
2819 * i40e_enable_misc_int_causes - enable the non-queue interrupts
2820 * @hw: ptr to the hardware info
2821 **/
2822static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
2823{
2824        u32 val;
2825
2826        /* clear things first */
2827        wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2828        rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2829
2830        val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2831              I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2832              I40E_PFINT_ICR0_ENA_GRST_MASK          |
2833              I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2834              I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2835              I40E_PFINT_ICR0_ENA_TIMESYNC_MASK      |
2836              I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2837              I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2838              I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2839
2840        wr32(hw, I40E_PFINT_ICR0_ENA, val);
2841
2842        /* SW_ITR_IDX = 0, but don't change INTENA */
2843        wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2844                                        I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2845
2846        /* OTHER_ITR_IDX = 0 */
2847        wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2848}
2849
2850/**
2851 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
2852 * @vsi: the VSI being configured
2853 **/
2854static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2855{
2856        struct i40e_q_vector *q_vector = vsi->q_vectors[0];
2857        struct i40e_pf *pf = vsi->back;
2858        struct i40e_hw *hw = &pf->hw;
2859        u32 val;
2860
2861        /* set the ITR configuration */
2862        q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2863        q_vector->rx.latency_range = I40E_LOW_LATENCY;
2864        wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
2865        q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2866        q_vector->tx.latency_range = I40E_LOW_LATENCY;
2867        wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
2868
2869        i40e_enable_misc_int_causes(hw);
2870
2871        /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2872        wr32(hw, I40E_PFINT_LNKLST0, 0);
2873
2874        /* Associate the queue pair to the vector and enable the queue int */
2875        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
2876              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2877              (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2878
2879        wr32(hw, I40E_QINT_RQCTL(0), val);
2880
2881        val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
2882              (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2883              (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2884
2885        wr32(hw, I40E_QINT_TQCTL(0), val);
2886        i40e_flush(hw);
2887}
2888
2889/**
2890 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
2891 * @pf: board private structure
2892 **/
2893void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
2894{
2895        struct i40e_hw *hw = &pf->hw;
2896
2897        wr32(hw, I40E_PFINT_DYN_CTL0,
2898             I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2899        i40e_flush(hw);
2900}
2901
2902/**
2903 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
2904 * @pf: board private structure
2905 **/
2906void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
2907{
2908        struct i40e_hw *hw = &pf->hw;
2909        u32 val;
2910
2911        val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
2912              I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
2913              (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
2914
2915        wr32(hw, I40E_PFINT_DYN_CTL0, val);
2916        i40e_flush(hw);
2917}
2918
2919/**
2920 * i40e_irq_dynamic_enable - Enable default interrupt generation settings
2921 * @vsi: pointer to a vsi
2922 * @vector: enable a particular Hw Interrupt vector
2923 **/
2924void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
2925{
2926        struct i40e_pf *pf = vsi->back;
2927        struct i40e_hw *hw = &pf->hw;
2928        u32 val;
2929
2930        val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2931              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
2932              (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
2933        wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2934        /* skip the flush */
2935}
2936
2937/**
2938 * i40e_irq_dynamic_disable - Disable default interrupt generation settings
2939 * @vsi: pointer to a vsi
2940 * @vector: enable a particular Hw Interrupt vector
2941 **/
2942void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
2943{
2944        struct i40e_pf *pf = vsi->back;
2945        struct i40e_hw *hw = &pf->hw;
2946        u32 val;
2947
2948        val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
2949        wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
2950        i40e_flush(hw);
2951}
2952
2953/**
2954 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
2955 * @irq: interrupt number
2956 * @data: pointer to a q_vector
2957 **/
2958static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
2959{
2960        struct i40e_q_vector *q_vector = data;
2961
2962        if (!q_vector->tx.ring && !q_vector->rx.ring)
2963                return IRQ_HANDLED;
2964
2965        napi_schedule(&q_vector->napi);
2966
2967        return IRQ_HANDLED;
2968}
2969
2970/**
2971 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
2972 * @vsi: the VSI being configured
2973 * @basename: name for the vector
2974 *
2975 * Allocates MSI-X vectors and requests interrupts from the kernel.
2976 **/
2977static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
2978{
2979        int q_vectors = vsi->num_q_vectors;
2980        struct i40e_pf *pf = vsi->back;
2981        int base = vsi->base_vector;
2982        int rx_int_idx = 0;
2983        int tx_int_idx = 0;
2984        int vector, err;
2985
2986        for (vector = 0; vector < q_vectors; vector++) {
2987                struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
2988
2989                if (q_vector->tx.ring && q_vector->rx.ring) {
2990                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2991                                 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
2992                        tx_int_idx++;
2993                } else if (q_vector->rx.ring) {
2994                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2995                                 "%s-%s-%d", basename, "rx", rx_int_idx++);
2996                } else if (q_vector->tx.ring) {
2997                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2998                                 "%s-%s-%d", basename, "tx", tx_int_idx++);
2999                } else {
3000                        /* skip this unused q_vector */
3001                        continue;
3002                }
3003                err = request_irq(pf->msix_entries[base + vector].vector,
3004                                  vsi->irq_handler,
3005                                  0,
3006                                  q_vector->name,
3007                                  q_vector);
3008                if (err) {
3009                        dev_info(&pf->pdev->dev,
3010                                 "%s: request_irq failed, error: %d\n",
3011                                 __func__, err);
3012                        goto free_queue_irqs;
3013                }
3014                /* assign the mask for this irq */
3015                irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3016                                      &q_vector->affinity_mask);
3017        }
3018
3019        vsi->irqs_ready = true;
3020        return 0;
3021
3022free_queue_irqs:
3023        while (vector) {
3024                vector--;
3025                irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3026                                      NULL);
3027                free_irq(pf->msix_entries[base + vector].vector,
3028                         &(vsi->q_vectors[vector]));
3029        }
3030        return err;
3031}
3032
3033/**
3034 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3035 * @vsi: the VSI being un-configured
3036 **/
3037static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3038{
3039        struct i40e_pf *pf = vsi->back;
3040        struct i40e_hw *hw = &pf->hw;
3041        int base = vsi->base_vector;
3042        int i;
3043
3044        for (i = 0; i < vsi->num_queue_pairs; i++) {
3045                wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3046                wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3047        }
3048
3049        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3050                for (i = vsi->base_vector;
3051                     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3052                        wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3053
3054                i40e_flush(hw);
3055                for (i = 0; i < vsi->num_q_vectors; i++)
3056                        synchronize_irq(pf->msix_entries[i + base].vector);
3057        } else {
3058                /* Legacy and MSI mode - this stops all interrupt handling */
3059                wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3060                wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3061                i40e_flush(hw);
3062                synchronize_irq(pf->pdev->irq);
3063        }
3064}
3065
3066/**
3067 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3068 * @vsi: the VSI being configured
3069 **/
3070static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3071{
3072        struct i40e_pf *pf = vsi->back;
3073        int i;
3074
3075        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3076                for (i = vsi->base_vector;
3077                     i < (vsi->num_q_vectors + vsi->base_vector); i++)
3078                        i40e_irq_dynamic_enable(vsi, i);
3079        } else {
3080                i40e_irq_dynamic_enable_icr0(pf);
3081        }
3082
3083        i40e_flush(&pf->hw);
3084        return 0;
3085}
3086
3087/**
3088 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3089 * @pf: board private structure
3090 **/
3091static void i40e_stop_misc_vector(struct i40e_pf *pf)
3092{
3093        /* Disable ICR 0 */
3094        wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3095        i40e_flush(&pf->hw);
3096}
3097
3098/**
3099 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3100 * @irq: interrupt number
3101 * @data: pointer to a q_vector
3102 *
3103 * This is the handler used for all MSI/Legacy interrupts, and deals
3104 * with both queue and non-queue interrupts.  This is also used in
3105 * MSIX mode to handle the non-queue interrupts.
3106 **/
3107static irqreturn_t i40e_intr(int irq, void *data)
3108{
3109        struct i40e_pf *pf = (struct i40e_pf *)data;
3110        struct i40e_hw *hw = &pf->hw;
3111        irqreturn_t ret = IRQ_NONE;
3112        u32 icr0, icr0_remaining;
3113        u32 val, ena_mask;
3114
3115        icr0 = rd32(hw, I40E_PFINT_ICR0);
3116        ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3117
3118        /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3119        if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3120                goto enable_intr;
3121
3122        /* if interrupt but no bits showing, must be SWINT */
3123        if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3124            (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3125                pf->sw_int_count++;
3126
3127        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3128        if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3129
3130                /* temporarily disable queue cause for NAPI processing */
3131                u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3132                qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3133                wr32(hw, I40E_QINT_RQCTL(0), qval);
3134
3135                qval = rd32(hw, I40E_QINT_TQCTL(0));
3136                qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3137                wr32(hw, I40E_QINT_TQCTL(0), qval);
3138
3139                if (!test_bit(__I40E_DOWN, &pf->state))
3140                        napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3141        }
3142
3143        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3144                ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3145                set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3146        }
3147
3148        if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3149                ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3150                set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3151        }
3152
3153        if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3154                ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3155                set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3156        }
3157
3158        if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3159                if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3160                        set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3161                ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3162                val = rd32(hw, I40E_GLGEN_RSTAT);
3163                val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3164                       >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3165                if (val == I40E_RESET_CORER) {
3166                        pf->corer_count++;
3167                } else if (val == I40E_RESET_GLOBR) {
3168                        pf->globr_count++;
3169                } else if (val == I40E_RESET_EMPR) {
3170                        pf->empr_count++;
3171                        set_bit(__I40E_EMP_RESET_REQUESTED, &pf->state);
3172                }
3173        }
3174
3175        if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3176                icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3177                dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3178        }
3179
3180        if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3181                u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3182
3183                if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3184                        icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3185                        i40e_ptp_tx_hwtstamp(pf);
3186                }
3187        }
3188
3189        /* If a critical error is pending we have no choice but to reset the
3190         * device.
3191         * Report and mask out any remaining unexpected interrupts.
3192         */
3193        icr0_remaining = icr0 & ena_mask;
3194        if (icr0_remaining) {
3195                dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3196                         icr0_remaining);
3197                if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3198                    (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3199                    (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3200                        dev_info(&pf->pdev->dev, "device will be reset\n");
3201                        set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3202                        i40e_service_event_schedule(pf);
3203                }
3204                ena_mask &= ~icr0_remaining;
3205        }
3206        ret = IRQ_HANDLED;
3207
3208enable_intr:
3209        /* re-enable interrupt causes */
3210        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3211        if (!test_bit(__I40E_DOWN, &pf->state)) {
3212                i40e_service_event_schedule(pf);
3213                i40e_irq_dynamic_enable_icr0(pf);
3214        }
3215
3216        return ret;
3217}
3218
3219/**
3220 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3221 * @tx_ring:  tx ring to clean
3222 * @budget:   how many cleans we're allowed
3223 *
3224 * Returns true if there's any budget left (e.g. the clean is finished)
3225 **/
3226static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3227{
3228        struct i40e_vsi *vsi = tx_ring->vsi;
3229        u16 i = tx_ring->next_to_clean;
3230        struct i40e_tx_buffer *tx_buf;
3231        struct i40e_tx_desc *tx_desc;
3232
3233        tx_buf = &tx_ring->tx_bi[i];
3234        tx_desc = I40E_TX_DESC(tx_ring, i);
3235        i -= tx_ring->count;
3236
3237        do {
3238                struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3239
3240                /* if next_to_watch is not set then there is no work pending */
3241                if (!eop_desc)
3242                        break;
3243
3244                /* prevent any other reads prior to eop_desc */
3245                read_barrier_depends();
3246
3247                /* if the descriptor isn't done, no work yet to do */
3248                if (!(eop_desc->cmd_type_offset_bsz &
3249                      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3250                        break;
3251
3252                /* clear next_to_watch to prevent false hangs */
3253                tx_buf->next_to_watch = NULL;
3254
3255                tx_desc->buffer_addr = 0;
3256                tx_desc->cmd_type_offset_bsz = 0;
3257                /* move past filter desc */
3258                tx_buf++;
3259                tx_desc++;
3260                i++;
3261                if (unlikely(!i)) {
3262                        i -= tx_ring->count;
3263                        tx_buf = tx_ring->tx_bi;
3264                        tx_desc = I40E_TX_DESC(tx_ring, 0);
3265                }
3266                /* unmap skb header data */
3267                dma_unmap_single(tx_ring->dev,
3268                                 dma_unmap_addr(tx_buf, dma),
3269                                 dma_unmap_len(tx_buf, len),
3270                                 DMA_TO_DEVICE);
3271                if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3272                        kfree(tx_buf->raw_buf);
3273
3274                tx_buf->raw_buf = NULL;
3275                tx_buf->tx_flags = 0;
3276                tx_buf->next_to_watch = NULL;
3277                dma_unmap_len_set(tx_buf, len, 0);
3278                tx_desc->buffer_addr = 0;
3279                tx_desc->cmd_type_offset_bsz = 0;
3280
3281                /* move us past the eop_desc for start of next FD desc */
3282                tx_buf++;
3283                tx_desc++;
3284                i++;
3285                if (unlikely(!i)) {
3286                        i -= tx_ring->count;
3287                        tx_buf = tx_ring->tx_bi;
3288                        tx_desc = I40E_TX_DESC(tx_ring, 0);
3289                }
3290
3291                /* update budget accounting */
3292                budget--;
3293        } while (likely(budget));
3294
3295        i += tx_ring->count;
3296        tx_ring->next_to_clean = i;
3297
3298        if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
3299                i40e_irq_dynamic_enable(vsi,
3300                                tx_ring->q_vector->v_idx + vsi->base_vector);
3301        }
3302        return budget > 0;
3303}
3304
3305/**
3306 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3307 * @irq: interrupt number
3308 * @data: pointer to a q_vector
3309 **/
3310static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3311{
3312        struct i40e_q_vector *q_vector = data;
3313        struct i40e_vsi *vsi;
3314
3315        if (!q_vector->tx.ring)
3316                return IRQ_HANDLED;
3317
3318        vsi = q_vector->tx.ring->vsi;
3319        i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3320
3321        return IRQ_HANDLED;
3322}
3323
3324/**
3325 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3326 * @vsi: the VSI being configured
3327 * @v_idx: vector index
3328 * @qp_idx: queue pair index
3329 **/
3330static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3331{
3332        struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3333        struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3334        struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3335
3336        tx_ring->q_vector = q_vector;
3337        tx_ring->next = q_vector->tx.ring;
3338        q_vector->tx.ring = tx_ring;
3339        q_vector->tx.count++;
3340
3341        rx_ring->q_vector = q_vector;
3342        rx_ring->next = q_vector->rx.ring;
3343        q_vector->rx.ring = rx_ring;
3344        q_vector->rx.count++;
3345}
3346
3347/**
3348 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3349 * @vsi: the VSI being configured
3350 *
3351 * This function maps descriptor rings to the queue-specific vectors
3352 * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3353 * one vector per queue pair, but on a constrained vector budget, we
3354 * group the queue pairs as "efficiently" as possible.
3355 **/
3356static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3357{
3358        int qp_remaining = vsi->num_queue_pairs;
3359        int q_vectors = vsi->num_q_vectors;
3360        int num_ringpairs;
3361        int v_start = 0;
3362        int qp_idx = 0;
3363
3364        /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3365         * group them so there are multiple queues per vector.
3366         * It is also important to go through all the vectors available to be
3367         * sure that if we don't use all the vectors, that the remaining vectors
3368         * are cleared. This is especially important when decreasing the
3369         * number of queues in use.
3370         */
3371        for (; v_start < q_vectors; v_start++) {
3372                struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3373
3374                num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3375
3376                q_vector->num_ringpairs = num_ringpairs;
3377
3378                q_vector->rx.count = 0;
3379                q_vector->tx.count = 0;
3380                q_vector->rx.ring = NULL;
3381                q_vector->tx.ring = NULL;
3382
3383                while (num_ringpairs--) {
3384                        map_vector_to_qp(vsi, v_start, qp_idx);
3385                        qp_idx++;
3386                        qp_remaining--;
3387                }
3388        }
3389}
3390
3391/**
3392 * i40e_vsi_request_irq - Request IRQ from the OS
3393 * @vsi: the VSI being configured
3394 * @basename: name for the vector
3395 **/
3396static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3397{
3398        struct i40e_pf *pf = vsi->back;
3399        int err;
3400
3401        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3402                err = i40e_vsi_request_irq_msix(vsi, basename);
3403        else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3404                err = request_irq(pf->pdev->irq, i40e_intr, 0,
3405                                  pf->misc_int_name, pf);
3406        else
3407                err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3408                                  pf->misc_int_name, pf);
3409
3410        if (err)
3411                dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3412
3413        return err;
3414}
3415
3416#ifdef CONFIG_NET_POLL_CONTROLLER
3417/**
3418 * i40e_netpoll - A Polling 'interrupt'handler
3419 * @netdev: network interface device structure
3420 *
3421 * This is used by netconsole to send skbs without having to re-enable
3422 * interrupts.  It's not called while the normal interrupt routine is executing.
3423 **/
3424#ifdef I40E_FCOE
3425void i40e_netpoll(struct net_device *netdev)
3426#else
3427static void i40e_netpoll(struct net_device *netdev)
3428#endif
3429{
3430        struct i40e_netdev_priv *np = netdev_priv(netdev);
3431        struct i40e_vsi *vsi = np->vsi;
3432        struct i40e_pf *pf = vsi->back;
3433        int i;
3434
3435        /* if interface is down do nothing */
3436        if (test_bit(__I40E_DOWN, &vsi->state))
3437                return;
3438
3439        pf->flags |= I40E_FLAG_IN_NETPOLL;
3440        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3441                for (i = 0; i < vsi->num_q_vectors; i++)
3442                        i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3443        } else {
3444                i40e_intr(pf->pdev->irq, netdev);
3445        }
3446        pf->flags &= ~I40E_FLAG_IN_NETPOLL;
3447}
3448#endif
3449
3450/**
3451 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3452 * @pf: the PF being configured
3453 * @pf_q: the PF queue
3454 * @enable: enable or disable state of the queue
3455 *
3456 * This routine will wait for the given Tx queue of the PF to reach the
3457 * enabled or disabled state.
3458 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3459 * multiple retries; else will return 0 in case of success.
3460 **/
3461static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3462{
3463        int i;
3464        u32 tx_reg;
3465
3466        for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3467                tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3468                if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3469                        break;
3470
3471                usleep_range(10, 20);
3472        }
3473        if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3474                return -ETIMEDOUT;
3475
3476        return 0;
3477}
3478
3479/**
3480 * i40e_vsi_control_tx - Start or stop a VSI's rings
3481 * @vsi: the VSI being configured
3482 * @enable: start or stop the rings
3483 **/
3484static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3485{
3486        struct i40e_pf *pf = vsi->back;
3487        struct i40e_hw *hw = &pf->hw;
3488        int i, j, pf_q, ret = 0;
3489        u32 tx_reg;
3490
3491        pf_q = vsi->base_queue;
3492        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3493
3494                /* warn the TX unit of coming changes */
3495                i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3496                if (!enable)
3497                        usleep_range(10, 20);
3498
3499                for (j = 0; j < 50; j++) {
3500                        tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3501                        if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3502                            ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3503                                break;
3504                        usleep_range(1000, 2000);
3505                }
3506                /* Skip if the queue is already in the requested state */
3507                if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3508                        continue;
3509
3510                /* turn on/off the queue */
3511                if (enable) {
3512                        wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3513                        tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3514                } else {
3515                        tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3516                }
3517
3518                wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3519                /* No waiting for the Tx queue to disable */
3520                if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3521                        continue;
3522
3523                /* wait for the change to finish */
3524                ret = i40e_pf_txq_wait(pf, pf_q, enable);
3525                if (ret) {
3526                        dev_info(&pf->pdev->dev,
3527                                 "%s: VSI seid %d Tx ring %d %sable timeout\n",
3528                                 __func__, vsi->seid, pf_q,
3529                                 (enable ? "en" : "dis"));
3530                        break;
3531                }
3532        }
3533
3534        if (hw->revision_id == 0)
3535                mdelay(50);
3536        return ret;
3537}
3538
3539/**
3540 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3541 * @pf: the PF being configured
3542 * @pf_q: the PF queue
3543 * @enable: enable or disable state of the queue
3544 *
3545 * This routine will wait for the given Rx queue of the PF to reach the
3546 * enabled or disabled state.
3547 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3548 * multiple retries; else will return 0 in case of success.
3549 **/
3550static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3551{
3552        int i;
3553        u32 rx_reg;
3554
3555        for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3556                rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3557                if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3558                        break;
3559
3560                usleep_range(10, 20);
3561        }
3562        if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3563                return -ETIMEDOUT;
3564
3565        return 0;
3566}
3567
3568/**
3569 * i40e_vsi_control_rx - Start or stop a VSI's rings
3570 * @vsi: the VSI being configured
3571 * @enable: start or stop the rings
3572 **/
3573static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3574{
3575        struct i40e_pf *pf = vsi->back;
3576        struct i40e_hw *hw = &pf->hw;
3577        int i, j, pf_q, ret = 0;
3578        u32 rx_reg;
3579
3580        pf_q = vsi->base_queue;
3581        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3582                for (j = 0; j < 50; j++) {
3583                        rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3584                        if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3585                            ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3586                                break;
3587                        usleep_range(1000, 2000);
3588                }
3589
3590                /* Skip if the queue is already in the requested state */
3591                if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3592                        continue;
3593
3594                /* turn on/off the queue */
3595                if (enable)
3596                        rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3597                else
3598                        rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3599                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3600
3601                /* wait for the change to finish */
3602                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3603                if (ret) {
3604                        dev_info(&pf->pdev->dev,
3605                                 "%s: VSI seid %d Rx ring %d %sable timeout\n",
3606                                 __func__, vsi->seid, pf_q,
3607                                 (enable ? "en" : "dis"));
3608                        break;
3609                }
3610        }
3611
3612        return ret;
3613}
3614
3615/**
3616 * i40e_vsi_control_rings - Start or stop a VSI's rings
3617 * @vsi: the VSI being configured
3618 * @enable: start or stop the rings
3619 **/
3620int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3621{
3622        int ret = 0;
3623
3624        /* do rx first for enable and last for disable */
3625        if (request) {
3626                ret = i40e_vsi_control_rx(vsi, request);
3627                if (ret)
3628                        return ret;
3629                ret = i40e_vsi_control_tx(vsi, request);
3630        } else {
3631                /* Ignore return value, we need to shutdown whatever we can */
3632                i40e_vsi_control_tx(vsi, request);
3633                i40e_vsi_control_rx(vsi, request);
3634        }
3635
3636        return ret;
3637}
3638
3639/**
3640 * i40e_vsi_free_irq - Free the irq association with the OS
3641 * @vsi: the VSI being configured
3642 **/
3643static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3644{
3645        struct i40e_pf *pf = vsi->back;
3646        struct i40e_hw *hw = &pf->hw;
3647        int base = vsi->base_vector;
3648        u32 val, qp;
3649        int i;
3650
3651        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3652                if (!vsi->q_vectors)
3653                        return;
3654
3655                if (!vsi->irqs_ready)
3656                        return;
3657
3658                vsi->irqs_ready = false;
3659                for (i = 0; i < vsi->num_q_vectors; i++) {
3660                        u16 vector = i + base;
3661
3662                        /* free only the irqs that were actually requested */
3663                        if (!vsi->q_vectors[i] ||
3664                            !vsi->q_vectors[i]->num_ringpairs)
3665                                continue;
3666
3667                        /* clear the affinity_mask in the IRQ descriptor */
3668                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
3669                                              NULL);
3670                        free_irq(pf->msix_entries[vector].vector,
3671                                 vsi->q_vectors[i]);
3672
3673                        /* Tear down the interrupt queue link list
3674                         *
3675                         * We know that they come in pairs and always
3676                         * the Rx first, then the Tx.  To clear the
3677                         * link list, stick the EOL value into the
3678                         * next_q field of the registers.
3679                         */
3680                        val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3681                        qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3682                                >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3683                        val |= I40E_QUEUE_END_OF_LIST
3684                                << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3685                        wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3686
3687                        while (qp != I40E_QUEUE_END_OF_LIST) {
3688                                u32 next;
3689
3690                                val = rd32(hw, I40E_QINT_RQCTL(qp));
3691
3692                                val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3693                                         I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3694                                         I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3695                                         I40E_QINT_RQCTL_INTEVENT_MASK);
3696
3697                                val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3698                                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3699
3700                                wr32(hw, I40E_QINT_RQCTL(qp), val);
3701
3702                                val = rd32(hw, I40E_QINT_TQCTL(qp));
3703
3704                                next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3705                                        >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3706
3707                                val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3708                                         I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3709                                         I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3710                                         I40E_QINT_TQCTL_INTEVENT_MASK);
3711
3712                                val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3713                                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3714
3715                                wr32(hw, I40E_QINT_TQCTL(qp), val);
3716                                qp = next;
3717                        }
3718                }
3719        } else {
3720                free_irq(pf->pdev->irq, pf);
3721
3722                val = rd32(hw, I40E_PFINT_LNKLST0);
3723                qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3724                        >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3725                val |= I40E_QUEUE_END_OF_LIST
3726                        << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3727                wr32(hw, I40E_PFINT_LNKLST0, val);
3728
3729                val = rd32(hw, I40E_QINT_RQCTL(qp));
3730                val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3731                         I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3732                         I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3733                         I40E_QINT_RQCTL_INTEVENT_MASK);
3734
3735                val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3736                        I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3737
3738                wr32(hw, I40E_QINT_RQCTL(qp), val);
3739
3740                val = rd32(hw, I40E_QINT_TQCTL(qp));
3741
3742                val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3743                         I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3744                         I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3745                         I40E_QINT_TQCTL_INTEVENT_MASK);
3746
3747                val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3748                        I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3749
3750                wr32(hw, I40E_QINT_TQCTL(qp), val);
3751        }
3752}
3753
3754/**
3755 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3756 * @vsi: the VSI being configured
3757 * @v_idx: Index of vector to be freed
3758 *
3759 * This function frees the memory allocated to the q_vector.  In addition if
3760 * NAPI is enabled it will delete any references to the NAPI struct prior
3761 * to freeing the q_vector.
3762 **/
3763static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3764{
3765        struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3766        struct i40e_ring *ring;
3767
3768        if (!q_vector)
3769                return;
3770
3771        /* disassociate q_vector from rings */
3772        i40e_for_each_ring(ring, q_vector->tx)
3773                ring->q_vector = NULL;
3774
3775        i40e_for_each_ring(ring, q_vector->rx)
3776                ring->q_vector = NULL;
3777
3778        /* only VSI w/ an associated netdev is set up w/ NAPI */
3779        if (vsi->netdev)
3780                netif_napi_del(&q_vector->napi);
3781
3782        vsi->q_vectors[v_idx] = NULL;
3783
3784        kfree_rcu(q_vector, rcu);
3785}
3786
3787/**
3788 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3789 * @vsi: the VSI being un-configured
3790 *
3791 * This frees the memory allocated to the q_vectors and
3792 * deletes references to the NAPI struct.
3793 **/
3794static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3795{
3796        int v_idx;
3797
3798        for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3799                i40e_free_q_vector(vsi, v_idx);
3800}
3801
3802/**
3803 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3804 * @pf: board private structure
3805 **/
3806static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3807{
3808        /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3809        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3810                pci_disable_msix(pf->pdev);
3811                kfree(pf->msix_entries);
3812                pf->msix_entries = NULL;
3813        } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3814                pci_disable_msi(pf->pdev);
3815        }
3816        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3817}
3818
3819/**
3820 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3821 * @pf: board private structure
3822 *
3823 * We go through and clear interrupt specific resources and reset the structure
3824 * to pre-load conditions
3825 **/
3826static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3827{
3828        int i;
3829
3830        i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3831        for (i = 0; i < pf->num_alloc_vsi; i++)
3832                if (pf->vsi[i])
3833                        i40e_vsi_free_q_vectors(pf->vsi[i]);
3834        i40e_reset_interrupt_capability(pf);
3835}
3836
3837/**
3838 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3839 * @vsi: the VSI being configured
3840 **/
3841static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3842{
3843        int q_idx;
3844
3845        if (!vsi->netdev)
3846                return;
3847
3848        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3849                napi_enable(&vsi->q_vectors[q_idx]->napi);
3850}
3851
3852/**
3853 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3854 * @vsi: the VSI being configured
3855 **/
3856static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3857{
3858        int q_idx;
3859
3860        if (!vsi->netdev)
3861                return;
3862
3863        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3864                napi_disable(&vsi->q_vectors[q_idx]->napi);
3865}
3866
3867/**
3868 * i40e_vsi_close - Shut down a VSI
3869 * @vsi: the vsi to be quelled
3870 **/
3871static void i40e_vsi_close(struct i40e_vsi *vsi)
3872{
3873        if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
3874                i40e_down(vsi);
3875        i40e_vsi_free_irq(vsi);
3876        i40e_vsi_free_tx_resources(vsi);
3877        i40e_vsi_free_rx_resources(vsi);
3878}
3879
3880/**
3881 * i40e_quiesce_vsi - Pause a given VSI
3882 * @vsi: the VSI being paused
3883 **/
3884static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
3885{
3886        if (test_bit(__I40E_DOWN, &vsi->state))
3887                return;
3888
3889        /* No need to disable FCoE VSI when Tx suspended */
3890        if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
3891            vsi->type == I40E_VSI_FCOE) {
3892                dev_dbg(&vsi->back->pdev->dev,
3893                        "%s: VSI seid %d skipping FCoE VSI disable\n",
3894                         __func__, vsi->seid);
3895                return;
3896        }
3897
3898        set_bit(__I40E_NEEDS_RESTART, &vsi->state);
3899        if (vsi->netdev && netif_running(vsi->netdev)) {
3900                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
3901        } else {
3902                i40e_vsi_close(vsi);
3903        }
3904}
3905
3906/**
3907 * i40e_unquiesce_vsi - Resume a given VSI
3908 * @vsi: the VSI being resumed
3909 **/
3910static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
3911{
3912        if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
3913                return;
3914
3915        clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
3916        if (vsi->netdev && netif_running(vsi->netdev))
3917                vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
3918        else
3919                i40e_vsi_open(vsi);   /* this clears the DOWN bit */
3920}
3921
3922/**
3923 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
3924 * @pf: the PF
3925 **/
3926static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
3927{
3928        int v;
3929
3930        for (v = 0; v < pf->num_alloc_vsi; v++) {
3931                if (pf->vsi[v])
3932                        i40e_quiesce_vsi(pf->vsi[v]);
3933        }
3934}
3935
3936/**
3937 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
3938 * @pf: the PF
3939 **/
3940static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
3941{
3942        int v;
3943
3944        for (v = 0; v < pf->num_alloc_vsi; v++) {
3945                if (pf->vsi[v])
3946                        i40e_unquiesce_vsi(pf->vsi[v]);
3947        }
3948}
3949
3950#ifdef CONFIG_I40E_DCB
3951/**
3952 * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
3953 * @vsi: the VSI being configured
3954 *
3955 * This function waits for the given VSI's Tx queues to be disabled.
3956 **/
3957static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
3958{
3959        struct i40e_pf *pf = vsi->back;
3960        int i, pf_q, ret;
3961
3962        pf_q = vsi->base_queue;
3963        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3964                /* Check and wait for the disable status of the queue */
3965                ret = i40e_pf_txq_wait(pf, pf_q, false);
3966                if (ret) {
3967                        dev_info(&pf->pdev->dev,
3968                                 "%s: VSI seid %d Tx ring %d disable timeout\n",
3969                                 __func__, vsi->seid, pf_q);
3970                        return ret;
3971                }
3972        }
3973
3974        return 0;
3975}
3976
3977/**
3978 * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
3979 * @pf: the PF
3980 *
3981 * This function waits for the Tx queues to be in disabled state for all the
3982 * VSIs that are managed by this PF.
3983 **/
3984static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
3985{
3986        int v, ret = 0;
3987
3988        for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
3989                /* No need to wait for FCoE VSI queues */
3990                if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
3991                        ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
3992                        if (ret)
3993                                break;
3994                }
3995        }
3996
3997        return ret;
3998}
3999
4000#endif
4001/**
4002 * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4003 * @dcbcfg: the corresponding DCBx configuration structure
4004 *
4005 * Return the number of TCs from given DCBx configuration
4006 **/
4007static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4008{
4009        u8 num_tc = 0;
4010        int i;
4011
4012        /* Scan the ETS Config Priority Table to find
4013         * traffic class enabled for a given priority
4014         * and use the traffic class index to get the
4015         * number of traffic classes enabled
4016         */
4017        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4018                if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4019                        num_tc = dcbcfg->etscfg.prioritytable[i];
4020        }
4021
4022        /* Traffic class index starts from zero so
4023         * increment to return the actual count
4024         */
4025        return num_tc + 1;
4026}
4027
4028/**
4029 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4030 * @dcbcfg: the corresponding DCBx configuration structure
4031 *
4032 * Query the current DCB configuration and return the number of
4033 * traffic classes enabled from the given DCBX config
4034 **/
4035static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4036{
4037        u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4038        u8 enabled_tc = 1;
4039        u8 i;
4040
4041        for (i = 0; i < num_tc; i++)
4042                enabled_tc |= 1 << i;
4043
4044        return enabled_tc;
4045}
4046
4047/**
4048 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4049 * @pf: PF being queried
4050 *
4051 * Return number of traffic classes enabled for the given PF
4052 **/
4053static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4054{
4055        struct i40e_hw *hw = &pf->hw;
4056        u8 i, enabled_tc;
4057        u8 num_tc = 0;
4058        struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4059
4060        /* If DCB is not enabled then always in single TC */
4061        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4062                return 1;
4063
4064        /* MFP mode return count of enabled TCs for this PF */
4065        if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4066                enabled_tc = pf->hw.func_caps.enabled_tcmap;
4067                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4068                        if (enabled_tc & (1 << i))
4069                                num_tc++;
4070                }
4071                return num_tc;
4072        }
4073
4074        /* SFP mode will be enabled for all TCs on port */
4075        return i40e_dcb_get_num_tc(dcbcfg);
4076}
4077
4078/**
4079 * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4080 * @pf: PF being queried
4081 *
4082 * Return a bitmap for first enabled traffic class for this PF.
4083 **/
4084static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4085{
4086        u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4087        u8 i = 0;
4088
4089        if (!enabled_tc)
4090                return 0x1; /* TC0 */
4091
4092        /* Find the first enabled TC */
4093        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4094                if (enabled_tc & (1 << i))
4095                        break;
4096        }
4097
4098        return 1 << i;
4099}
4100
4101/**
4102 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4103 * @pf: PF being queried
4104 *
4105 * Return a bitmap for enabled traffic classes for this PF.
4106 **/
4107static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4108{
4109        /* If DCB is not enabled for this PF then just return default TC */
4110        if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4111                return i40e_pf_get_default_tc(pf);
4112
4113        /* MFP mode will have enabled TCs set by FW */
4114        if (pf->flags & I40E_FLAG_MFP_ENABLED)
4115                return pf->hw.func_caps.enabled_tcmap;
4116
4117        /* SFP mode we want PF to be enabled for all TCs */
4118        return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4119}
4120
4121/**
4122 * i40e_vsi_get_bw_info - Query VSI BW Information
4123 * @vsi: the VSI being queried
4124 *
4125 * Returns 0 on success, negative value on failure
4126 **/
4127static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4128{
4129        struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4130        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4131        struct i40e_pf *pf = vsi->back;
4132        struct i40e_hw *hw = &pf->hw;
4133        i40e_status aq_ret;
4134        u32 tc_bw_max;
4135        int i;
4136
4137        /* Get the VSI level BW configuration */
4138        aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4139        if (aq_ret) {
4140                dev_info(&pf->pdev->dev,
4141                         "couldn't get pf vsi bw config, err %d, aq_err %d\n",
4142                         aq_ret, pf->hw.aq.asq_last_status);
4143                return -EINVAL;
4144        }
4145
4146        /* Get the VSI level BW configuration per TC */
4147        aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4148                                                  NULL);
4149        if (aq_ret) {
4150                dev_info(&pf->pdev->dev,
4151                         "couldn't get pf vsi ets bw config, err %d, aq_err %d\n",
4152                         aq_ret, pf->hw.aq.asq_last_status);
4153                return -EINVAL;
4154        }
4155
4156        if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4157                dev_info(&pf->pdev->dev,
4158                         "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4159                         bw_config.tc_valid_bits,
4160                         bw_ets_config.tc_valid_bits);
4161                /* Still continuing */
4162        }
4163
4164        vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4165        vsi->bw_max_quanta = bw_config.max_bw;
4166        tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4167                    (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4168        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4169                vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4170                vsi->bw_ets_limit_credits[i] =
4171                                        le16_to_cpu(bw_ets_config.credits[i]);
4172                /* 3 bits out of 4 for each TC */
4173                vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4174        }
4175
4176        return 0;
4177}
4178
4179/**
4180 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4181 * @vsi: the VSI being configured
4182 * @enabled_tc: TC bitmap
4183 * @bw_credits: BW shared credits per TC
4184 *
4185 * Returns 0 on success, negative value on failure
4186 **/
4187static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4188                                       u8 *bw_share)
4189{
4190        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4191        i40e_status aq_ret;
4192        int i;
4193
4194        bw_data.tc_valid_bits = enabled_tc;
4195        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4196                bw_data.tc_bw_credits[i] = bw_share[i];
4197
4198        aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4199                                          NULL);
4200        if (aq_ret) {
4201                dev_info(&vsi->back->pdev->dev,
4202                         "AQ command Config VSI BW allocation per TC failed = %d\n",
4203                         vsi->back->hw.aq.asq_last_status);
4204                return -EINVAL;
4205        }
4206
4207        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4208                vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4209
4210        return 0;
4211}
4212
4213/**
4214 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4215 * @vsi: the VSI being configured
4216 * @enabled_tc: TC map to be enabled
4217 *
4218 **/
4219static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4220{
4221        struct net_device *netdev = vsi->netdev;
4222        struct i40e_pf *pf = vsi->back;
4223        struct i40e_hw *hw = &pf->hw;
4224        u8 netdev_tc = 0;
4225        int i;
4226        struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4227
4228        if (!netdev)
4229                return;
4230
4231        if (!enabled_tc) {
4232                netdev_reset_tc(netdev);
4233                return;
4234        }
4235
4236        /* Set up actual enabled TCs on the VSI */
4237        if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4238                return;
4239
4240        /* set per TC queues for the VSI */
4241        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4242                /* Only set TC queues for enabled tcs
4243                 *
4244                 * e.g. For a VSI that has TC0 and TC3 enabled the
4245                 * enabled_tc bitmap would be 0x00001001; the driver
4246                 * will set the numtc for netdev as 2 that will be
4247                 * referenced by the netdev layer as TC 0 and 1.
4248                 */
4249                if (vsi->tc_config.enabled_tc & (1 << i))
4250                        netdev_set_tc_queue(netdev,
4251                                        vsi->tc_config.tc_info[i].netdev_tc,
4252                                        vsi->tc_config.tc_info[i].qcount,
4253                                        vsi->tc_config.tc_info[i].qoffset);
4254        }
4255
4256        /* Assign UP2TC map for the VSI */
4257        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4258                /* Get the actual TC# for the UP */
4259                u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4260                /* Get the mapped netdev TC# for the UP */
4261                netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4262                netdev_set_prio_tc_map(netdev, i, netdev_tc);
4263        }
4264}
4265
4266/**
4267 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4268 * @vsi: the VSI being configured
4269 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4270 **/
4271static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4272                                      struct i40e_vsi_context *ctxt)
4273{
4274        /* copy just the sections touched not the entire info
4275         * since not all sections are valid as returned by
4276         * update vsi params
4277         */
4278        vsi->info.mapping_flags = ctxt->info.mapping_flags;
4279        memcpy(&vsi->info.queue_mapping,
4280               &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4281        memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4282               sizeof(vsi->info.tc_mapping));
4283}
4284
4285/**
4286 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4287 * @vsi: VSI to be configured
4288 * @enabled_tc: TC bitmap
4289 *
4290 * This configures a particular VSI for TCs that are mapped to the
4291 * given TC bitmap. It uses default bandwidth share for TCs across
4292 * VSIs to configure TC for a particular VSI.
4293 *
4294 * NOTE:
4295 * It is expected that the VSI queues have been quisced before calling
4296 * this function.
4297 **/
4298static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4299{
4300        u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4301        struct i40e_vsi_context ctxt;
4302        int ret = 0;
4303        int i;
4304
4305        /* Check if enabled_tc is same as existing or new TCs */
4306        if (vsi->tc_config.enabled_tc == enabled_tc)
4307                return ret;
4308
4309        /* Enable ETS TCs with equal BW Share for now across all VSIs */
4310        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4311                if (enabled_tc & (1 << i))
4312                        bw_share[i] = 1;
4313        }
4314
4315        ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4316        if (ret) {
4317                dev_info(&vsi->back->pdev->dev,
4318                         "Failed configuring TC map %d for VSI %d\n",
4319                         enabled_tc, vsi->seid);
4320                goto out;
4321        }
4322
4323        /* Update Queue Pairs Mapping for currently enabled UPs */
4324        ctxt.seid = vsi->seid;
4325        ctxt.pf_num = vsi->back->hw.pf_id;
4326        ctxt.vf_num = 0;
4327        ctxt.uplink_seid = vsi->uplink_seid;
4328        memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
4329        i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4330
4331        /* Update the VSI after updating the VSI queue-mapping information */
4332        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4333        if (ret) {
4334                dev_info(&vsi->back->pdev->dev,
4335                         "update vsi failed, aq_err=%d\n",
4336                         vsi->back->hw.aq.asq_last_status);
4337                goto out;
4338        }
4339        /* update the local VSI info with updated queue map */
4340        i40e_vsi_update_queue_map(vsi, &ctxt);
4341        vsi->info.valid_sections = 0;
4342
4343        /* Update current VSI BW information */
4344        ret = i40e_vsi_get_bw_info(vsi);
4345        if (ret) {
4346                dev_info(&vsi->back->pdev->dev,
4347                         "Failed updating vsi bw info, aq_err=%d\n",
4348                         vsi->back->hw.aq.asq_last_status);
4349                goto out;
4350        }
4351
4352        /* Update the netdev TC setup */
4353        i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4354out:
4355        return ret;
4356}
4357
4358/**
4359 * i40e_veb_config_tc - Configure TCs for given VEB
4360 * @veb: given VEB
4361 * @enabled_tc: TC bitmap
4362 *
4363 * Configures given TC bitmap for VEB (switching) element
4364 **/
4365int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4366{
4367        struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4368        struct i40e_pf *pf = veb->pf;
4369        int ret = 0;
4370        int i;
4371
4372        /* No TCs or already enabled TCs just return */
4373        if (!enabled_tc || veb->enabled_tc == enabled_tc)
4374                return ret;
4375
4376        bw_data.tc_valid_bits = enabled_tc;
4377        /* bw_data.absolute_credits is not set (relative) */
4378
4379        /* Enable ETS TCs with equal BW Share for now */
4380        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4381                if (enabled_tc & (1 << i))
4382                        bw_data.tc_bw_share_credits[i] = 1;
4383        }
4384
4385        ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4386                                                   &bw_data, NULL);
4387        if (ret) {
4388                dev_info(&pf->pdev->dev,
4389                         "veb bw config failed, aq_err=%d\n",
4390                         pf->hw.aq.asq_last_status);
4391                goto out;
4392        }
4393
4394        /* Update the BW information */
4395        ret = i40e_veb_get_bw_info(veb);
4396        if (ret) {
4397                dev_info(&pf->pdev->dev,
4398                         "Failed getting veb bw config, aq_err=%d\n",
4399                         pf->hw.aq.asq_last_status);
4400        }
4401
4402out:
4403        return ret;
4404}
4405
4406#ifdef CONFIG_I40E_DCB
4407/**
4408 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4409 * @pf: PF struct
4410 *
4411 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4412 * the caller would've quiesce all the VSIs before calling
4413 * this function
4414 **/
4415static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4416{
4417        u8 tc_map = 0;
4418        int ret;
4419        u8 v;
4420
4421        /* Enable the TCs available on PF to all VEBs */
4422        tc_map = i40e_pf_get_tc_map(pf);
4423        for (v = 0; v < I40E_MAX_VEB; v++) {
4424                if (!pf->veb[v])
4425                        continue;
4426                ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4427                if (ret) {
4428                        dev_info(&pf->pdev->dev,
4429                                 "Failed configuring TC for VEB seid=%d\n",
4430                                 pf->veb[v]->seid);
4431                        /* Will try to configure as many components */
4432                }
4433        }
4434
4435        /* Update each VSI */
4436        for (v = 0; v < pf->num_alloc_vsi; v++) {
4437                if (!pf->vsi[v])
4438                        continue;
4439
4440                /* - Enable all TCs for the LAN VSI
4441#ifdef I40E_FCOE
4442                 * - For FCoE VSI only enable the TC configured
4443                 *   as per the APP TLV
4444#endif
4445                 * - For all others keep them at TC0 for now
4446                 */
4447                if (v == pf->lan_vsi)
4448                        tc_map = i40e_pf_get_tc_map(pf);
4449                else
4450                        tc_map = i40e_pf_get_default_tc(pf);
4451#ifdef I40E_FCOE
4452                if (pf->vsi[v]->type == I40E_VSI_FCOE)
4453                        tc_map = i40e_get_fcoe_tc_map(pf);
4454#endif /* #ifdef I40E_FCOE */
4455
4456                ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4457                if (ret) {
4458                        dev_info(&pf->pdev->dev,
4459                                 "Failed configuring TC for VSI seid=%d\n",
4460                                 pf->vsi[v]->seid);
4461                        /* Will try to configure as many components */
4462                } else {
4463                        /* Re-configure VSI vectors based on updated TC map */
4464                        i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4465                        if (pf->vsi[v]->netdev)
4466                                i40e_dcbnl_set_all(pf->vsi[v]);
4467                }
4468        }
4469}
4470
4471/**
4472 * i40e_resume_port_tx - Resume port Tx
4473 * @pf: PF struct
4474 *
4475 * Resume a port's Tx and issue a PF reset in case of failure to
4476 * resume.
4477 **/
4478static int i40e_resume_port_tx(struct i40e_pf *pf)
4479{
4480        struct i40e_hw *hw = &pf->hw;
4481        int ret;
4482
4483        ret = i40e_aq_resume_port_tx(hw, NULL);
4484        if (ret) {
4485                dev_info(&pf->pdev->dev,
4486                         "AQ command Resume Port Tx failed = %d\n",
4487                          pf->hw.aq.asq_last_status);
4488                /* Schedule PF reset to recover */
4489                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4490                i40e_service_event_schedule(pf);
4491        }
4492
4493        return ret;
4494}
4495
4496/**
4497 * i40e_init_pf_dcb - Initialize DCB configuration
4498 * @pf: PF being configured
4499 *
4500 * Query the current DCB configuration and cache it
4501 * in the hardware structure
4502 **/
4503static int i40e_init_pf_dcb(struct i40e_pf *pf)
4504{
4505        struct i40e_hw *hw = &pf->hw;
4506        int err = 0;
4507
4508        if (pf->hw.func_caps.npar_enable)
4509                goto out;
4510
4511        /* Get the initial DCB configuration */
4512        err = i40e_init_dcb(hw);
4513        if (!err) {
4514                /* Device/Function is not DCBX capable */
4515                if ((!hw->func_caps.dcb) ||
4516                    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4517                        dev_info(&pf->pdev->dev,
4518                                 "DCBX offload is not supported or is disabled for this PF.\n");
4519
4520                        if (pf->flags & I40E_FLAG_MFP_ENABLED)
4521                                goto out;
4522
4523                } else {
4524                        /* When status is not DISABLED then DCBX in FW */
4525                        pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4526                                       DCB_CAP_DCBX_VER_IEEE;
4527
4528                        pf->flags |= I40E_FLAG_DCB_CAPABLE;
4529                        /* Enable DCB tagging only when more than one TC */
4530                        if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4531                                pf->flags |= I40E_FLAG_DCB_ENABLED;
4532                        dev_dbg(&pf->pdev->dev,
4533                                "DCBX offload is supported for this PF.\n");
4534                }
4535        } else {
4536                dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
4537                         pf->hw.aq.asq_last_status);
4538        }
4539
4540