linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/bitmap.h>
  38#include <linux/crc32.h>
  39#include <linux/ctype.h>
  40#include <linux/debugfs.h>
  41#include <linux/err.h>
  42#include <linux/etherdevice.h>
  43#include <linux/firmware.h>
  44#include <linux/if.h>
  45#include <linux/if_vlan.h>
  46#include <linux/init.h>
  47#include <linux/log2.h>
  48#include <linux/mdio.h>
  49#include <linux/module.h>
  50#include <linux/moduleparam.h>
  51#include <linux/mutex.h>
  52#include <linux/netdevice.h>
  53#include <linux/pci.h>
  54#include <linux/aer.h>
  55#include <linux/rtnetlink.h>
  56#include <linux/sched.h>
  57#include <linux/seq_file.h>
  58#include <linux/sockios.h>
  59#include <linux/vmalloc.h>
  60#include <linux/workqueue.h>
  61#include <net/neighbour.h>
  62#include <net/netevent.h>
  63#include <net/addrconf.h>
  64#include <net/bonding.h>
  65#include <linux/uaccess.h>
  66#include <linux/crash_dump.h>
  67#include <net/udp_tunnel.h>
  68#include <net/xfrm.h>
  69#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
  70#include <net/tls.h>
  71#endif
  72
  73#include "cxgb4.h"
  74#include "cxgb4_filter.h"
  75#include "t4_regs.h"
  76#include "t4_values.h"
  77#include "t4_msg.h"
  78#include "t4fw_api.h"
  79#include "t4fw_version.h"
  80#include "cxgb4_dcb.h"
  81#include "srq.h"
  82#include "cxgb4_debugfs.h"
  83#include "clip_tbl.h"
  84#include "l2t.h"
  85#include "smt.h"
  86#include "sched.h"
  87#include "cxgb4_tc_u32.h"
  88#include "cxgb4_tc_flower.h"
  89#include "cxgb4_tc_mqprio.h"
  90#include "cxgb4_tc_matchall.h"
  91#include "cxgb4_ptp.h"
  92#include "cxgb4_cudbg.h"
  93
  94char cxgb4_driver_name[] = KBUILD_MODNAME;
  95
  96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
  97
  98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  99                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 100                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 101
 102/* Macros needed to support the PCI Device ID Table ...
 103 */
 104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
 105        static const struct pci_device_id cxgb4_pci_tbl[] = {
 106#define CXGB4_UNIFIED_PF 0x4
 107
 108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
 109
 110/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
 111 * called for both.
 112 */
 113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
 114
 115#define CH_PCI_ID_TABLE_ENTRY(devid) \
 116                {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
 117
 118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
 119                { 0, } \
 120        }
 121
 122#include "t4_pci_id_tbl.h"
 123
 124#define FW4_FNAME "cxgb4/t4fw.bin"
 125#define FW5_FNAME "cxgb4/t5fw.bin"
 126#define FW6_FNAME "cxgb4/t6fw.bin"
 127#define FW4_CFNAME "cxgb4/t4-config.txt"
 128#define FW5_CFNAME "cxgb4/t5-config.txt"
 129#define FW6_CFNAME "cxgb4/t6-config.txt"
 130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
 131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
 132#define PHY_AQ1202_DEVICEID 0x4409
 133#define PHY_BCM84834_DEVICEID 0x4486
 134
 135MODULE_DESCRIPTION(DRV_DESC);
 136MODULE_AUTHOR("Chelsio Communications");
 137MODULE_LICENSE("Dual BSD/GPL");
 138MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 139MODULE_FIRMWARE(FW4_FNAME);
 140MODULE_FIRMWARE(FW5_FNAME);
 141MODULE_FIRMWARE(FW6_FNAME);
 142
 143/*
 144 * The driver uses the best interrupt scheme available on a platform in the
 145 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 146 * of these schemes the driver may consider as follows:
 147 *
 148 * msi = 2: choose from among all three options
 149 * msi = 1: only consider MSI and INTx interrupts
 150 * msi = 0: force INTx interrupts
 151 */
 152static int msi = 2;
 153
 154module_param(msi, int, 0644);
 155MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
 156
 157/*
 158 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 160 * boundaries.  This is a requirement for many architectures which will throw
 161 * a machine check fault if an attempt is made to access one of the 4-byte IP
 162 * header fields on a non-4-byte boundary.  And it's a major performance issue
 163 * even on some architectures which allow it like some implementations of the
 164 * x86 ISA.  However, some architectures don't mind this and for some very
 165 * edge-case performance sensitive applications (like forwarding large volumes
 166 * of small packets), setting this DMA offset to 0 will decrease the number of
 167 * PCI-E Bus transfers enough to measurably affect performance.
 168 */
 169static int rx_dma_offset = 2;
 170
 171/* TX Queue select used to determine what algorithm to use for selecting TX
 172 * queue. Select between the kernel provided function (select_queue=0) or user
 173 * cxgb_select_queue function (select_queue=1)
 174 *
 175 * Default: select_queue=0
 176 */
 177static int select_queue;
 178module_param(select_queue, int, 0644);
 179MODULE_PARM_DESC(select_queue,
 180                 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
 181
 182static struct dentry *cxgb4_debugfs_root;
 183
 184LIST_HEAD(adapter_list);
 185DEFINE_MUTEX(uld_mutex);
 186LIST_HEAD(uld_list);
 187
 188static int cfg_queues(struct adapter *adap);
 189
 190static void link_report(struct net_device *dev)
 191{
 192        if (!netif_carrier_ok(dev))
 193                netdev_info(dev, "link down\n");
 194        else {
 195                static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 196
 197                const char *s;
 198                const struct port_info *p = netdev_priv(dev);
 199
 200                switch (p->link_cfg.speed) {
 201                case 100:
 202                        s = "100Mbps";
 203                        break;
 204                case 1000:
 205                        s = "1Gbps";
 206                        break;
 207                case 10000:
 208                        s = "10Gbps";
 209                        break;
 210                case 25000:
 211                        s = "25Gbps";
 212                        break;
 213                case 40000:
 214                        s = "40Gbps";
 215                        break;
 216                case 50000:
 217                        s = "50Gbps";
 218                        break;
 219                case 100000:
 220                        s = "100Gbps";
 221                        break;
 222                default:
 223                        pr_info("%s: unsupported speed: %d\n",
 224                                dev->name, p->link_cfg.speed);
 225                        return;
 226                }
 227
 228                netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
 229                            fc[p->link_cfg.fc]);
 230        }
 231}
 232
 233#ifdef CONFIG_CHELSIO_T4_DCB
 234/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
 235static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
 236{
 237        struct port_info *pi = netdev_priv(dev);
 238        struct adapter *adap = pi->adapter;
 239        struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
 240        int i;
 241
 242        /* We use a simple mapping of Port TX Queue Index to DCB
 243         * Priority when we're enabling DCB.
 244         */
 245        for (i = 0; i < pi->nqsets; i++, txq++) {
 246                u32 name, value;
 247                int err;
 248
 249                name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
 250                        FW_PARAMS_PARAM_X_V(
 251                                FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
 252                        FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
 253                value = enable ? i : 0xffffffff;
 254
 255                /* Since we can be called while atomic (from "interrupt
 256                 * level") we need to issue the Set Parameters Commannd
 257                 * without sleeping (timeout < 0).
 258                 */
 259                err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
 260                                            &name, &value,
 261                                            -FW_CMD_MAX_TIMEOUT);
 262
 263                if (err)
 264                        dev_err(adap->pdev_dev,
 265                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
 266                                enable ? "set" : "unset", pi->port_id, i, -err);
 267                else
 268                        txq->dcb_prio = enable ? value : 0;
 269        }
 270}
 271
 272int cxgb4_dcb_enabled(const struct net_device *dev)
 273{
 274        struct port_info *pi = netdev_priv(dev);
 275
 276        if (!pi->dcb.enabled)
 277                return 0;
 278
 279        return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
 280                (pi->dcb.state == CXGB4_DCB_STATE_HOST));
 281}
 282#endif /* CONFIG_CHELSIO_T4_DCB */
 283
 284void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 285{
 286        struct net_device *dev = adapter->port[port_id];
 287
 288        /* Skip changes from disabled ports. */
 289        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
 290                if (link_stat)
 291                        netif_carrier_on(dev);
 292                else {
 293#ifdef CONFIG_CHELSIO_T4_DCB
 294                        if (cxgb4_dcb_enabled(dev)) {
 295                                cxgb4_dcb_reset(dev);
 296                                dcb_tx_queue_prio_enable(dev, false);
 297                        }
 298#endif /* CONFIG_CHELSIO_T4_DCB */
 299                        netif_carrier_off(dev);
 300                }
 301
 302                link_report(dev);
 303        }
 304}
 305
 306void t4_os_portmod_changed(struct adapter *adap, int port_id)
 307{
 308        static const char *mod_str[] = {
 309                NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 310        };
 311
 312        struct net_device *dev = adap->port[port_id];
 313        struct port_info *pi = netdev_priv(dev);
 314
 315        if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 316                netdev_info(dev, "port module unplugged\n");
 317        else if (pi->mod_type < ARRAY_SIZE(mod_str))
 318                netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 319        else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
 320                netdev_info(dev, "%s: unsupported port module inserted\n",
 321                            dev->name);
 322        else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
 323                netdev_info(dev, "%s: unknown port module inserted\n",
 324                            dev->name);
 325        else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
 326                netdev_info(dev, "%s: transceiver module error\n", dev->name);
 327        else
 328                netdev_info(dev, "%s: unknown module type %d inserted\n",
 329                            dev->name, pi->mod_type);
 330
 331        /* If the interface is running, then we'll need any "sticky" Link
 332         * Parameters redone with a new Transceiver Module.
 333         */
 334        pi->link_cfg.redo_l1cfg = netif_running(dev);
 335}
 336
 337int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
 338module_param(dbfifo_int_thresh, int, 0644);
 339MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
 340
 341/*
 342 * usecs to sleep while draining the dbfifo
 343 */
 344static int dbfifo_drain_delay = 1000;
 345module_param(dbfifo_drain_delay, int, 0644);
 346MODULE_PARM_DESC(dbfifo_drain_delay,
 347                 "usecs to sleep while draining the dbfifo");
 348
 349static inline int cxgb4_set_addr_hash(struct port_info *pi)
 350{
 351        struct adapter *adap = pi->adapter;
 352        u64 vec = 0;
 353        bool ucast = false;
 354        struct hash_mac_addr *entry;
 355
 356        /* Calculate the hash vector for the updated list and program it */
 357        list_for_each_entry(entry, &adap->mac_hlist, list) {
 358                ucast |= is_unicast_ether_addr(entry->addr);
 359                vec |= (1ULL << hash_mac_addr(entry->addr));
 360        }
 361        return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
 362                                vec, false);
 363}
 364
 365static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
 366{
 367        struct port_info *pi = netdev_priv(netdev);
 368        struct adapter *adap = pi->adapter;
 369        int ret;
 370        u64 mhash = 0;
 371        u64 uhash = 0;
 372        /* idx stores the index of allocated filters,
 373         * its size should be modified based on the number of
 374         * MAC addresses that we allocate filters for
 375         */
 376
 377        u16 idx[1] = {};
 378        bool free = false;
 379        bool ucast = is_unicast_ether_addr(mac_addr);
 380        const u8 *maclist[1] = {mac_addr};
 381        struct hash_mac_addr *new_entry;
 382
 383        ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
 384                                   idx, ucast ? &uhash : &mhash, false);
 385        if (ret < 0)
 386                goto out;
 387        /* if hash != 0, then add the addr to hash addr list
 388         * so on the end we will calculate the hash for the
 389         * list and program it
 390         */
 391        if (uhash || mhash) {
 392                new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
 393                if (!new_entry)
 394                        return -ENOMEM;
 395                ether_addr_copy(new_entry->addr, mac_addr);
 396                list_add_tail(&new_entry->list, &adap->mac_hlist);
 397                ret = cxgb4_set_addr_hash(pi);
 398        }
 399out:
 400        return ret < 0 ? ret : 0;
 401}
 402
 403static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
 404{
 405        struct port_info *pi = netdev_priv(netdev);
 406        struct adapter *adap = pi->adapter;
 407        int ret;
 408        const u8 *maclist[1] = {mac_addr};
 409        struct hash_mac_addr *entry, *tmp;
 410
 411        /* If the MAC address to be removed is in the hash addr
 412         * list, delete it from the list and update hash vector
 413         */
 414        list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
 415                if (ether_addr_equal(entry->addr, mac_addr)) {
 416                        list_del(&entry->list);
 417                        kfree(entry);
 418                        return cxgb4_set_addr_hash(pi);
 419                }
 420        }
 421
 422        ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
 423        return ret < 0 ? -EINVAL : 0;
 424}
 425
 426/*
 427 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 428 * If @mtu is -1 it is left unchanged.
 429 */
 430static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 431{
 432        struct port_info *pi = netdev_priv(dev);
 433        struct adapter *adapter = pi->adapter;
 434
 435        __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
 436        __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
 437
 438        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
 439                             mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
 440                             (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 441                             sleep_ok);
 442}
 443
 444/**
 445 *      cxgb4_change_mac - Update match filter for a MAC address.
 446 *      @pi: the port_info
 447 *      @viid: the VI id
 448 *      @tcam_idx: TCAM index of existing filter for old value of MAC address,
 449 *                 or -1
 450 *      @addr: the new MAC address value
 451 *      @persist: whether a new MAC allocation should be persistent
 452 *      @smt_idx: the destination to store the new SMT index.
 453 *
 454 *      Modifies an MPS filter and sets it to the new MAC address if
 455 *      @tcam_idx >= 0, or adds the MAC address to a new filter if
 456 *      @tcam_idx < 0. In the latter case the address is added persistently
 457 *      if @persist is %true.
 458 *      Addresses are programmed to hash region, if tcam runs out of entries.
 459 *
 460 */
 461int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
 462                     int *tcam_idx, const u8 *addr, bool persist,
 463                     u8 *smt_idx)
 464{
 465        struct adapter *adapter = pi->adapter;
 466        struct hash_mac_addr *entry, *new_entry;
 467        int ret;
 468
 469        ret = t4_change_mac(adapter, adapter->mbox, viid,
 470                            *tcam_idx, addr, persist, smt_idx);
 471        /* We ran out of TCAM entries. try programming hash region. */
 472        if (ret == -ENOMEM) {
 473                /* If the MAC address to be updated is in the hash addr
 474                 * list, update it from the list
 475                 */
 476                list_for_each_entry(entry, &adapter->mac_hlist, list) {
 477                        if (entry->iface_mac) {
 478                                ether_addr_copy(entry->addr, addr);
 479                                goto set_hash;
 480                        }
 481                }
 482                new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
 483                if (!new_entry)
 484                        return -ENOMEM;
 485                ether_addr_copy(new_entry->addr, addr);
 486                new_entry->iface_mac = true;
 487                list_add_tail(&new_entry->list, &adapter->mac_hlist);
 488set_hash:
 489                ret = cxgb4_set_addr_hash(pi);
 490        } else if (ret >= 0) {
 491                *tcam_idx = ret;
 492                ret = 0;
 493        }
 494
 495        return ret;
 496}
 497
 498/*
 499 *      link_start - enable a port
 500 *      @dev: the port to enable
 501 *
 502 *      Performs the MAC and PHY actions needed to enable a port.
 503 */
 504static int link_start(struct net_device *dev)
 505{
 506        struct port_info *pi = netdev_priv(dev);
 507        unsigned int mb = pi->adapter->mbox;
 508        int ret;
 509
 510        /*
 511         * We do not set address filters and promiscuity here, the stack does
 512         * that step explicitly.
 513         */
 514        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
 515                            dev->mtu, -1, -1, -1,
 516                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
 517        if (ret == 0)
 518                ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
 519                                            dev->dev_addr, true, &pi->smt_idx);
 520        if (ret == 0)
 521                ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
 522                                    &pi->link_cfg);
 523        if (ret == 0) {
 524                local_bh_disable();
 525                ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
 526                                          true, CXGB4_DCB_ENABLED);
 527                local_bh_enable();
 528        }
 529
 530        return ret;
 531}
 532
 533#ifdef CONFIG_CHELSIO_T4_DCB
 534/* Handle a Data Center Bridging update message from the firmware. */
 535static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
 536{
 537        int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
 538        struct net_device *dev = adap->port[adap->chan_map[port]];
 539        int old_dcb_enabled = cxgb4_dcb_enabled(dev);
 540        int new_dcb_enabled;
 541
 542        cxgb4_dcb_handle_fw_update(adap, pcmd);
 543        new_dcb_enabled = cxgb4_dcb_enabled(dev);
 544
 545        /* If the DCB has become enabled or disabled on the port then we're
 546         * going to need to set up/tear down DCB Priority parameters for the
 547         * TX Queues associated with the port.
 548         */
 549        if (new_dcb_enabled != old_dcb_enabled)
 550                dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
 551}
 552#endif /* CONFIG_CHELSIO_T4_DCB */
 553
 554/* Response queue handler for the FW event queue.
 555 */
 556static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 557                          const struct pkt_gl *gl)
 558{
 559        u8 opcode = ((const struct rss_header *)rsp)->opcode;
 560
 561        rsp++;                                          /* skip RSS header */
 562
 563        /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
 564         */
 565        if (unlikely(opcode == CPL_FW4_MSG &&
 566           ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
 567                rsp++;
 568                opcode = ((const struct rss_header *)rsp)->opcode;
 569                rsp++;
 570                if (opcode != CPL_SGE_EGR_UPDATE) {
 571                        dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
 572                                , opcode);
 573                        goto out;
 574                }
 575        }
 576
 577        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
 578                const struct cpl_sge_egr_update *p = (void *)rsp;
 579                unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
 580                struct sge_txq *txq;
 581
 582                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
 583                txq->restarts++;
 584                if (txq->q_type == CXGB4_TXQ_ETH) {
 585                        struct sge_eth_txq *eq;
 586
 587                        eq = container_of(txq, struct sge_eth_txq, q);
 588                        t4_sge_eth_txq_egress_update(q->adap, eq, -1);
 589                } else {
 590                        struct sge_uld_txq *oq;
 591
 592                        oq = container_of(txq, struct sge_uld_txq, q);
 593                        tasklet_schedule(&oq->qresume_tsk);
 594                }
 595        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 596                const struct cpl_fw6_msg *p = (void *)rsp;
 597
 598#ifdef CONFIG_CHELSIO_T4_DCB
 599                const struct fw_port_cmd *pcmd = (const void *)p->data;
 600                unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
 601                unsigned int action =
 602                        FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
 603
 604                if (cmd == FW_PORT_CMD &&
 605                    (action == FW_PORT_ACTION_GET_PORT_INFO ||
 606                     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
 607                        int port = FW_PORT_CMD_PORTID_G(
 608                                        be32_to_cpu(pcmd->op_to_portid));
 609                        struct net_device *dev;
 610                        int dcbxdis, state_input;
 611
 612                        dev = q->adap->port[q->adap->chan_map[port]];
 613                        dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
 614                          ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
 615                          : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
 616                               & FW_PORT_CMD_DCBXDIS32_F));
 617                        state_input = (dcbxdis
 618                                       ? CXGB4_DCB_INPUT_FW_DISABLED
 619                                       : CXGB4_DCB_INPUT_FW_ENABLED);
 620
 621                        cxgb4_dcb_state_fsm(dev, state_input);
 622                }
 623
 624                if (cmd == FW_PORT_CMD &&
 625                    action == FW_PORT_ACTION_L2_DCB_CFG)
 626                        dcb_rpl(q->adap, pcmd);
 627                else
 628#endif
 629                        if (p->type == 0)
 630                                t4_handle_fw_rpl(q->adap, p->data);
 631        } else if (opcode == CPL_L2T_WRITE_RPL) {
 632                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 633
 634                do_l2t_write_rpl(q->adap, p);
 635        } else if (opcode == CPL_SMT_WRITE_RPL) {
 636                const struct cpl_smt_write_rpl *p = (void *)rsp;
 637
 638                do_smt_write_rpl(q->adap, p);
 639        } else if (opcode == CPL_SET_TCB_RPL) {
 640                const struct cpl_set_tcb_rpl *p = (void *)rsp;
 641
 642                filter_rpl(q->adap, p);
 643        } else if (opcode == CPL_ACT_OPEN_RPL) {
 644                const struct cpl_act_open_rpl *p = (void *)rsp;
 645
 646                hash_filter_rpl(q->adap, p);
 647        } else if (opcode == CPL_ABORT_RPL_RSS) {
 648                const struct cpl_abort_rpl_rss *p = (void *)rsp;
 649
 650                hash_del_filter_rpl(q->adap, p);
 651        } else if (opcode == CPL_SRQ_TABLE_RPL) {
 652                const struct cpl_srq_table_rpl *p = (void *)rsp;
 653
 654                do_srq_table_rpl(q->adap, p);
 655        } else
 656                dev_err(q->adap->pdev_dev,
 657                        "unexpected CPL %#x on FW event queue\n", opcode);
 658out:
 659        return 0;
 660}
 661
 662static void disable_msi(struct adapter *adapter)
 663{
 664        if (adapter->flags & CXGB4_USING_MSIX) {
 665                pci_disable_msix(adapter->pdev);
 666                adapter->flags &= ~CXGB4_USING_MSIX;
 667        } else if (adapter->flags & CXGB4_USING_MSI) {
 668                pci_disable_msi(adapter->pdev);
 669                adapter->flags &= ~CXGB4_USING_MSI;
 670        }
 671}
 672
 673/*
 674 * Interrupt handler for non-data events used with MSI-X.
 675 */
 676static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 677{
 678        struct adapter *adap = cookie;
 679        u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
 680
 681        if (v & PFSW_F) {
 682                adap->swintr = 1;
 683                t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
 684        }
 685        if (adap->flags & CXGB4_MASTER_PF)
 686                t4_slow_intr_handler(adap);
 687        return IRQ_HANDLED;
 688}
 689
 690int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
 691                       cpumask_var_t *aff_mask, int idx)
 692{
 693        int rv;
 694
 695        if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
 696                dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
 697                return -ENOMEM;
 698        }
 699
 700        cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
 701                        *aff_mask);
 702
 703        rv = irq_set_affinity_hint(vec, *aff_mask);
 704        if (rv)
 705                dev_warn(adap->pdev_dev,
 706                         "irq_set_affinity_hint %u failed %d\n",
 707                         vec, rv);
 708
 709        return 0;
 710}
 711
 712void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
 713{
 714        irq_set_affinity_hint(vec, NULL);
 715        free_cpumask_var(aff_mask);
 716}
 717
 718static int request_msix_queue_irqs(struct adapter *adap)
 719{
 720        struct sge *s = &adap->sge;
 721        struct msix_info *minfo;
 722        int err, ethqidx;
 723
 724        if (s->fwevtq_msix_idx < 0)
 725                return -ENOMEM;
 726
 727        err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
 728                          t4_sge_intr_msix, 0,
 729                          adap->msix_info[s->fwevtq_msix_idx].desc,
 730                          &s->fw_evtq);
 731        if (err)
 732                return err;
 733
 734        for_each_ethrxq(s, ethqidx) {
 735                minfo = s->ethrxq[ethqidx].msix;
 736                err = request_irq(minfo->vec,
 737                                  t4_sge_intr_msix, 0,
 738                                  minfo->desc,
 739                                  &s->ethrxq[ethqidx].rspq);
 740                if (err)
 741                        goto unwind;
 742
 743                cxgb4_set_msix_aff(adap, minfo->vec,
 744                                   &minfo->aff_mask, ethqidx);
 745        }
 746        return 0;
 747
 748unwind:
 749        while (--ethqidx >= 0) {
 750                minfo = s->ethrxq[ethqidx].msix;
 751                cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
 752                free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
 753        }
 754        free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
 755        return err;
 756}
 757
 758static void free_msix_queue_irqs(struct adapter *adap)
 759{
 760        struct sge *s = &adap->sge;
 761        struct msix_info *minfo;
 762        int i;
 763
 764        free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
 765        for_each_ethrxq(s, i) {
 766                minfo = s->ethrxq[i].msix;
 767                cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
 768                free_irq(minfo->vec, &s->ethrxq[i].rspq);
 769        }
 770}
 771
 772static int setup_ppod_edram(struct adapter *adap)
 773{
 774        unsigned int param, val;
 775        int ret;
 776
 777        /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
 778         * if firmware supports ppod edram feature or not. If firmware
 779         * returns 1, then driver can enable this feature by sending
 780         * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
 781         * enable ppod edram feature.
 782         */
 783        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 784                FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
 785
 786        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
 787        if (ret < 0) {
 788                dev_warn(adap->pdev_dev,
 789                         "querying PPOD_EDRAM support failed: %d\n",
 790                         ret);
 791                return -1;
 792        }
 793
 794        if (val != 1)
 795                return -1;
 796
 797        ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
 798        if (ret < 0) {
 799                dev_err(adap->pdev_dev,
 800                        "setting PPOD_EDRAM failed: %d\n", ret);
 801                return -1;
 802        }
 803        return 0;
 804}
 805
 806static void adap_config_hpfilter(struct adapter *adapter)
 807{
 808        u32 param, val = 0;
 809        int ret;
 810
 811        /* Enable HP filter region. Older fw will fail this request and
 812         * it is fine.
 813         */
 814        param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
 815        ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
 816                            1, &param, &val);
 817
 818        /* An error means FW doesn't know about HP filter support,
 819         * it's not a problem, don't return an error.
 820         */
 821        if (ret < 0)
 822                dev_err(adapter->pdev_dev,
 823                        "HP filter region isn't supported by FW\n");
 824}
 825
 826static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
 827                            u16 rss_size, u16 viid)
 828{
 829        struct adapter *adap = pi->adapter;
 830        int ret;
 831
 832        ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
 833                                  rss_size);
 834        if (ret)
 835                return ret;
 836
 837        /* If Tunnel All Lookup isn't specified in the global RSS
 838         * Configuration, then we need to specify a default Ingress
 839         * Queue for any ingress packets which aren't hashed.  We'll
 840         * use our first ingress queue ...
 841         */
 842        return t4_config_vi_rss(adap, adap->mbox, viid,
 843                                FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
 844                                FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
 845                                FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
 846                                FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
 847                                FW_RSS_VI_CONFIG_CMD_UDPEN_F,
 848                                rss[0]);
 849}
 850
 851/**
 852 *      cxgb4_write_rss - write the RSS table for a given port
 853 *      @pi: the port
 854 *      @queues: array of queue indices for RSS
 855 *
 856 *      Sets up the portion of the HW RSS table for the port's VI to distribute
 857 *      packets to the Rx queues in @queues.
 858 *      Should never be called before setting up sge eth rx queues
 859 */
 860int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 861{
 862        struct adapter *adapter = pi->adapter;
 863        const struct sge_eth_rxq *rxq;
 864        int i, err;
 865        u16 *rss;
 866
 867        rxq = &adapter->sge.ethrxq[pi->first_qset];
 868        rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
 869        if (!rss)
 870                return -ENOMEM;
 871
 872        /* map the queue indices to queue ids */
 873        for (i = 0; i < pi->rss_size; i++, queues++)
 874                rss[i] = rxq[*queues].rspq.abs_id;
 875
 876        err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
 877        kfree(rss);
 878        return err;
 879}
 880
 881/**
 882 *      setup_rss - configure RSS
 883 *      @adap: the adapter
 884 *
 885 *      Sets up RSS for each port.
 886 */
 887static int setup_rss(struct adapter *adap)
 888{
 889        int i, j, err;
 890
 891        for_each_port(adap, i) {
 892                const struct port_info *pi = adap2pinfo(adap, i);
 893
 894                /* Fill default values with equal distribution */
 895                for (j = 0; j < pi->rss_size; j++)
 896                        pi->rss[j] = j % pi->nqsets;
 897
 898                err = cxgb4_write_rss(pi, pi->rss);
 899                if (err)
 900                        return err;
 901        }
 902        return 0;
 903}
 904
 905/*
 906 * Return the channel of the ingress queue with the given qid.
 907 */
 908static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
 909{
 910        qid -= p->ingr_start;
 911        return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
 912}
 913
 914void cxgb4_quiesce_rx(struct sge_rspq *q)
 915{
 916        if (q->handler)
 917                napi_disable(&q->napi);
 918}
 919
 920/*
 921 * Wait until all NAPI handlers are descheduled.
 922 */
 923static void quiesce_rx(struct adapter *adap)
 924{
 925        int i;
 926
 927        for (i = 0; i < adap->sge.ingr_sz; i++) {
 928                struct sge_rspq *q = adap->sge.ingr_map[i];
 929
 930                if (!q)
 931                        continue;
 932
 933                cxgb4_quiesce_rx(q);
 934        }
 935}
 936
 937/* Disable interrupt and napi handler */
 938static void disable_interrupts(struct adapter *adap)
 939{
 940        struct sge *s = &adap->sge;
 941
 942        if (adap->flags & CXGB4_FULL_INIT_DONE) {
 943                t4_intr_disable(adap);
 944                if (adap->flags & CXGB4_USING_MSIX) {
 945                        free_msix_queue_irqs(adap);
 946                        free_irq(adap->msix_info[s->nd_msix_idx].vec,
 947                                 adap);
 948                } else {
 949                        free_irq(adap->pdev->irq, adap);
 950                }
 951                quiesce_rx(adap);
 952        }
 953}
 954
 955void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
 956{
 957        if (q->handler)
 958                napi_enable(&q->napi);
 959
 960        /* 0-increment GTS to start the timer and enable interrupts */
 961        t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
 962                     SEINTARM_V(q->intr_params) |
 963                     INGRESSQID_V(q->cntxt_id));
 964}
 965
 966/*
 967 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 968 */
 969static void enable_rx(struct adapter *adap)
 970{
 971        int i;
 972
 973        for (i = 0; i < adap->sge.ingr_sz; i++) {
 974                struct sge_rspq *q = adap->sge.ingr_map[i];
 975
 976                if (!q)
 977                        continue;
 978
 979                cxgb4_enable_rx(adap, q);
 980        }
 981}
 982
 983static int setup_non_data_intr(struct adapter *adap)
 984{
 985        int msix;
 986
 987        adap->sge.nd_msix_idx = -1;
 988        if (!(adap->flags & CXGB4_USING_MSIX))
 989                return 0;
 990
 991        /* Request MSI-X vector for non-data interrupt */
 992        msix = cxgb4_get_msix_idx_from_bmap(adap);
 993        if (msix < 0)
 994                return -ENOMEM;
 995
 996        snprintf(adap->msix_info[msix].desc,
 997                 sizeof(adap->msix_info[msix].desc),
 998                 "%s", adap->port[0]->name);
 999
1000        adap->sge.nd_msix_idx = msix;
1001        return 0;
1002}
1003
1004static int setup_fw_sge_queues(struct adapter *adap)
1005{
1006        struct sge *s = &adap->sge;
1007        int msix, err = 0;
1008
1009        bitmap_zero(s->starving_fl, s->egr_sz);
1010        bitmap_zero(s->txq_maperr, s->egr_sz);
1011
1012        if (adap->flags & CXGB4_USING_MSIX) {
1013                s->fwevtq_msix_idx = -1;
1014                msix = cxgb4_get_msix_idx_from_bmap(adap);
1015                if (msix < 0)
1016                        return -ENOMEM;
1017
1018                snprintf(adap->msix_info[msix].desc,
1019                         sizeof(adap->msix_info[msix].desc),
1020                         "%s-FWeventq", adap->port[0]->name);
1021        } else {
1022                err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023                                       NULL, NULL, NULL, -1);
1024                if (err)
1025                        return err;
1026                msix = -((int)s->intrq.abs_id + 1);
1027        }
1028
1029        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030                               msix, NULL, fwevtq_handler, NULL, -1);
1031        if (err && msix >= 0)
1032                cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034        s->fwevtq_msix_idx = msix;
1035        return err;
1036}
1037
1038/**
1039 *      setup_sge_queues - configure SGE Tx/Rx/response queues
1040 *      @adap: the adapter
1041 *
1042 *      Determines how many sets of SGE queues to use and initializes them.
1043 *      We support multiple queue sets per port if we have MSI-X, otherwise
1044 *      just one queue set per port.
1045 */
1046static int setup_sge_queues(struct adapter *adap)
1047{
1048        struct sge_uld_rxq_info *rxq_info = NULL;
1049        struct sge *s = &adap->sge;
1050        unsigned int cmplqid = 0;
1051        int err, i, j, msix = 0;
1052
1053        if (is_uld(adap))
1054                rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
1056        if (!(adap->flags & CXGB4_USING_MSIX))
1057                msix = -((int)s->intrq.abs_id + 1);
1058
1059        for_each_port(adap, i) {
1060                struct net_device *dev = adap->port[i];
1061                struct port_info *pi = netdev_priv(dev);
1062                struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063                struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065                for (j = 0; j < pi->nqsets; j++, q++) {
1066                        if (msix >= 0) {
1067                                msix = cxgb4_get_msix_idx_from_bmap(adap);
1068                                if (msix < 0) {
1069                                        err = msix;
1070                                        goto freeout;
1071                                }
1072
1073                                snprintf(adap->msix_info[msix].desc,
1074                                         sizeof(adap->msix_info[msix].desc),
1075                                         "%s-Rx%d", dev->name, j);
1076                                q->msix = &adap->msix_info[msix];
1077                        }
1078
1079                        err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080                                               msix, &q->fl,
1081                                               t4_ethrx_handler,
1082                                               NULL,
1083                                               t4_get_tp_ch_map(adap,
1084                                                                pi->tx_chan));
1085                        if (err)
1086                                goto freeout;
1087                        q->rspq.idx = j;
1088                        memset(&q->stats, 0, sizeof(q->stats));
1089                }
1090
1091                q = &s->ethrxq[pi->first_qset];
1092                for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093                        err = t4_sge_alloc_eth_txq(adap, t, dev,
1094                                        netdev_get_tx_queue(dev, j),
1095                                        q->rspq.cntxt_id,
1096                                        !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097                        if (err)
1098                                goto freeout;
1099                }
1100        }
1101
1102        for_each_port(adap, i) {
1103                /* Note that cmplqid below is 0 if we don't
1104                 * have RDMA queues, and that's the right value.
1105                 */
1106                if (rxq_info)
1107                        cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
1109                err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110                                            s->fw_evtq.cntxt_id, cmplqid);
1111                if (err)
1112                        goto freeout;
1113        }
1114
1115        if (!is_t4(adap->params.chip)) {
1116                err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117                                           netdev_get_tx_queue(adap->port[0], 0)
1118                                           , s->fw_evtq.cntxt_id, false);
1119                if (err)
1120                        goto freeout;
1121        }
1122
1123        t4_write_reg(adap, is_t4(adap->params.chip) ?
1124                                MPS_TRC_RSS_CONTROL_A :
1125                                MPS_T5_TRC_RSS_CONTROL_A,
1126                     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127                     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128        return 0;
1129freeout:
1130        dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131        t4_free_sge_resources(adap);
1132        return err;
1133}
1134
1135static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136                             struct net_device *sb_dev)
1137{
1138        int txq;
1139
1140#ifdef CONFIG_CHELSIO_T4_DCB
1141        /* If a Data Center Bridging has been successfully negotiated on this
1142         * link then we'll use the skb's priority to map it to a TX Queue.
1143         * The skb's priority is determined via the VLAN Tag Priority Code
1144         * Point field.
1145         */
1146        if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147                u16 vlan_tci;
1148                int err;
1149
1150                err = vlan_get_tag(skb, &vlan_tci);
1151                if (unlikely(err)) {
1152                        if (net_ratelimit())
1153                                netdev_warn(dev,
1154                                            "TX Packet without VLAN Tag on DCB Link\n");
1155                        txq = 0;
1156                } else {
1157                        txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1158#ifdef CONFIG_CHELSIO_T4_FCOE
1159                        if (skb->protocol == htons(ETH_P_FCOE))
1160                                txq = skb->priority & 0x7;
1161#endif /* CONFIG_CHELSIO_T4_FCOE */
1162                }
1163                return txq;
1164        }
1165#endif /* CONFIG_CHELSIO_T4_DCB */
1166
1167        if (dev->num_tc) {
1168                struct port_info *pi = netdev2pinfo(dev);
1169                u8 ver, proto;
1170
1171                ver = ip_hdr(skb)->version;
1172                proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173                                     ip_hdr(skb)->protocol;
1174
1175                /* Send unsupported traffic pattern to normal NIC queues. */
1176                txq = netdev_pick_tx(dev, skb, sb_dev);
1177                if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178                    skb->encapsulation ||
1179                    cxgb4_is_ktls_skb(skb) ||
1180                    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1181                        txq = txq % pi->nqsets;
1182
1183                return txq;
1184        }
1185
1186        if (select_queue) {
1187                txq = (skb_rx_queue_recorded(skb)
1188                        ? skb_get_rx_queue(skb)
1189                        : smp_processor_id());
1190
1191                while (unlikely(txq >= dev->real_num_tx_queues))
1192                        txq -= dev->real_num_tx_queues;
1193
1194                return txq;
1195        }
1196
1197        return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1198}
1199
1200static int closest_timer(const struct sge *s, int time)
1201{
1202        int i, delta, match = 0, min_delta = INT_MAX;
1203
1204        for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1205                delta = time - s->timer_val[i];
1206                if (delta < 0)
1207                        delta = -delta;
1208                if (delta < min_delta) {
1209                        min_delta = delta;
1210                        match = i;
1211                }
1212        }
1213        return match;
1214}
1215
1216static int closest_thres(const struct sge *s, int thres)
1217{
1218        int i, delta, match = 0, min_delta = INT_MAX;
1219
1220        for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1221                delta = thres - s->counter_val[i];
1222                if (delta < 0)
1223                        delta = -delta;
1224                if (delta < min_delta) {
1225                        min_delta = delta;
1226                        match = i;
1227                }
1228        }
1229        return match;
1230}
1231
1232/**
1233 *      cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234 *      @q: the Rx queue
1235 *      @us: the hold-off time in us, or 0 to disable timer
1236 *      @cnt: the hold-off packet count, or 0 to disable counter
1237 *
1238 *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1239 *      one of the two needs to be enabled for the queue to generate interrupts.
1240 */
1241int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1242                               unsigned int us, unsigned int cnt)
1243{
1244        struct adapter *adap = q->adap;
1245
1246        if ((us | cnt) == 0)
1247                cnt = 1;
1248
1249        if (cnt) {
1250                int err;
1251                u32 v, new_idx;
1252
1253                new_idx = closest_thres(&adap->sge, cnt);
1254                if (q->desc && q->pktcnt_idx != new_idx) {
1255                        /* the queue has already been created, update it */
1256                        v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1257                            FW_PARAMS_PARAM_X_V(
1258                                        FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1259                            FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1260                        err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1261                                            &v, &new_idx);
1262                        if (err)
1263                                return err;
1264                }
1265                q->pktcnt_idx = new_idx;
1266        }
1267
1268        us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1269        q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1270        return 0;
1271}
1272
1273static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1274{
1275        netdev_features_t changed = dev->features ^ features;
1276        const struct port_info *pi = netdev_priv(dev);
1277        int err;
1278
1279        if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1280                return 0;
1281
1282        err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283                            pi->viid_mirror, -1, -1, -1, -1,
1284                            !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1285        if (unlikely(err))
1286                dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1287        return err;
1288}
1289
1290static int setup_debugfs(struct adapter *adap)
1291{
1292        if (IS_ERR_OR_NULL(adap->debugfs_root))
1293                return -1;
1294
1295#ifdef CONFIG_DEBUG_FS
1296        t4_setup_debugfs(adap);
1297#endif
1298        return 0;
1299}
1300
1301static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302                                       struct sge_eth_rxq *mirror_rxq)
1303{
1304        if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305            !(adap->flags & CXGB4_SHUTTING_DOWN))
1306                cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
1308        if (adap->flags & CXGB4_USING_MSIX) {
1309                cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310                                     mirror_rxq->msix->aff_mask);
1311                free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312                cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313        }
1314
1315        free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316}
1317
1318static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319{
1320        struct port_info *pi = netdev2pinfo(dev);
1321        struct adapter *adap = netdev2adap(dev);
1322        struct sge_eth_rxq *mirror_rxq;
1323        struct sge *s = &adap->sge;
1324        int ret = 0, msix = 0;
1325        u16 i, rxqid;
1326        u16 *rss;
1327
1328        if (!pi->vi_mirror_count)
1329                return 0;
1330
1331        if (s->mirror_rxq[pi->port_id])
1332                return 0;
1333
1334        mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335        if (!mirror_rxq)
1336                return -ENOMEM;
1337
1338        s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
1340        if (!(adap->flags & CXGB4_USING_MSIX))
1341                msix = -((int)adap->sge.intrq.abs_id + 1);
1342
1343        for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344                mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
1346                /* Allocate Mirror Rxqs */
1347                if (msix >= 0) {
1348                        msix = cxgb4_get_msix_idx_from_bmap(adap);
1349                        if (msix < 0) {
1350                                ret = msix;
1351                                goto out_free_queues;
1352                        }
1353
1354                        mirror_rxq->msix = &adap->msix_info[msix];
1355                        snprintf(mirror_rxq->msix->desc,
1356                                 sizeof(mirror_rxq->msix->desc),
1357                                 "%s-mirrorrxq%d", dev->name, i);
1358                }
1359
1360                init_rspq(adap, &mirror_rxq->rspq,
1361                          CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362                          CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363                          CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364                          CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
1366                mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
1368                ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369                                       dev, msix, &mirror_rxq->fl,
1370                                       t4_ethrx_handler, NULL, 0);
1371                if (ret)
1372                        goto out_free_msix_idx;
1373
1374                /* Setup MSI-X vectors for Mirror Rxqs */
1375                if (adap->flags & CXGB4_USING_MSIX) {
1376                        ret = request_irq(mirror_rxq->msix->vec,
1377                                          t4_sge_intr_msix, 0,
1378                                          mirror_rxq->msix->desc,
1379                                          &mirror_rxq->rspq);
1380                        if (ret)
1381                                goto out_free_rxq;
1382
1383                        cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384                                           &mirror_rxq->msix->aff_mask, i);
1385                }
1386
1387                /* Start NAPI for Mirror Rxqs */
1388                cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389        }
1390
1391        /* Setup RSS for Mirror Rxqs */
1392        rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393        if (!rss) {
1394                ret = -ENOMEM;
1395                goto out_free_queues;
1396        }
1397
1398        mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399        for (i = 0; i < pi->rss_size; i++)
1400                rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
1402        ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403        kfree(rss);
1404        if (ret)
1405                goto out_free_queues;
1406
1407        return 0;
1408
1409out_free_rxq:
1410        free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
1412out_free_msix_idx:
1413        cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
1415out_free_queues:
1416        while (rxqid-- > 0)
1417                cxgb4_port_mirror_free_rxq(adap,
1418                                           &s->mirror_rxq[pi->port_id][rxqid]);
1419
1420        kfree(s->mirror_rxq[pi->port_id]);
1421        s->mirror_rxq[pi->port_id] = NULL;
1422        return ret;
1423}
1424
1425static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426{
1427        struct port_info *pi = netdev2pinfo(dev);
1428        struct adapter *adap = netdev2adap(dev);
1429        struct sge *s = &adap->sge;
1430        u16 i;
1431
1432        if (!pi->vi_mirror_count)
1433                return;
1434
1435        if (!s->mirror_rxq[pi->port_id])
1436                return;
1437
1438        for (i = 0; i < pi->nmirrorqsets; i++)
1439                cxgb4_port_mirror_free_rxq(adap,
1440                                           &s->mirror_rxq[pi->port_id][i]);
1441
1442        kfree(s->mirror_rxq[pi->port_id]);
1443        s->mirror_rxq[pi->port_id] = NULL;
1444}
1445
1446static int cxgb4_port_mirror_start(struct net_device *dev)
1447{
1448        struct port_info *pi = netdev2pinfo(dev);
1449        struct adapter *adap = netdev2adap(dev);
1450        int ret, idx = -1;
1451
1452        if (!pi->vi_mirror_count)
1453                return 0;
1454
1455        /* Mirror VIs can be created dynamically after stack had
1456         * already setup Rx modes like MTU, promisc, allmulti, etc.
1457         * on main VI. So, parse what the stack had setup on the
1458         * main VI and update the same on the mirror VI.
1459         */
1460        ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461                            dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462                            (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464        if (ret) {
1465                dev_err(adap->pdev_dev,
1466                        "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467                        pi->viid_mirror, ret);
1468                return ret;
1469        }
1470
1471        /* Enable replication bit for the device's MAC address
1472         * in MPS TCAM, so that the packets for the main VI are
1473         * replicated to mirror VI.
1474         */
1475        ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476                                    dev->dev_addr, true, NULL);
1477        if (ret) {
1478                dev_err(adap->pdev_dev,
1479                        "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480                        pi->viid_mirror, ret);
1481                return ret;
1482        }
1483
1484        /* Enabling a Virtual Interface can result in an interrupt
1485         * during the processing of the VI Enable command and, in some
1486         * paths, result in an attempt to issue another command in the
1487         * interrupt context. Thus, we disable interrupts during the
1488         * course of the VI Enable command ...
1489         */
1490        local_bh_disable();
1491        ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492                                  false);
1493        local_bh_enable();
1494        if (ret)
1495                dev_err(adap->pdev_dev,
1496                        "Failed starting Mirror VI 0x%x, ret: %d\n",
1497                        pi->viid_mirror, ret);
1498
1499        return ret;
1500}
1501
1502static void cxgb4_port_mirror_stop(struct net_device *dev)
1503{
1504        struct port_info *pi = netdev2pinfo(dev);
1505        struct adapter *adap = netdev2adap(dev);
1506
1507        if (!pi->vi_mirror_count)
1508                return;
1509
1510        t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511                            false);
1512}
1513
1514int cxgb4_port_mirror_alloc(struct net_device *dev)
1515{
1516        struct port_info *pi = netdev2pinfo(dev);
1517        struct adapter *adap = netdev2adap(dev);
1518        int ret = 0;
1519
1520        if (!pi->nmirrorqsets)
1521                return -EOPNOTSUPP;
1522
1523        mutex_lock(&pi->vi_mirror_mutex);
1524        if (pi->viid_mirror) {
1525                pi->vi_mirror_count++;
1526                goto out_unlock;
1527        }
1528
1529        ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530                                  &pi->viid_mirror);
1531        if (ret)
1532                goto out_unlock;
1533
1534        pi->vi_mirror_count = 1;
1535
1536        if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537                ret = cxgb4_port_mirror_alloc_queues(dev);
1538                if (ret)
1539                        goto out_free_vi;
1540
1541                ret = cxgb4_port_mirror_start(dev);
1542                if (ret)
1543                        goto out_free_queues;
1544        }
1545
1546        mutex_unlock(&pi->vi_mirror_mutex);
1547        return 0;
1548
1549out_free_queues:
1550        cxgb4_port_mirror_free_queues(dev);
1551
1552out_free_vi:
1553        pi->vi_mirror_count = 0;
1554        t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555        pi->viid_mirror = 0;
1556
1557out_unlock:
1558        mutex_unlock(&pi->vi_mirror_mutex);
1559        return ret;
1560}
1561
1562void cxgb4_port_mirror_free(struct net_device *dev)
1563{
1564        struct port_info *pi = netdev2pinfo(dev);
1565        struct adapter *adap = netdev2adap(dev);
1566
1567        mutex_lock(&pi->vi_mirror_mutex);
1568        if (!pi->viid_mirror)
1569                goto out_unlock;
1570
1571        if (pi->vi_mirror_count > 1) {
1572                pi->vi_mirror_count--;
1573                goto out_unlock;
1574        }
1575
1576        cxgb4_port_mirror_stop(dev);
1577        cxgb4_port_mirror_free_queues(dev);
1578
1579        pi->vi_mirror_count = 0;
1580        t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581        pi->viid_mirror = 0;
1582
1583out_unlock:
1584        mutex_unlock(&pi->vi_mirror_mutex);
1585}
1586
1587/*
1588 * upper-layer driver support
1589 */
1590
1591/*
1592 * Allocate an active-open TID and set it to the supplied value.
1593 */
1594int cxgb4_alloc_atid(struct tid_info *t, void *data)
1595{
1596        int atid = -1;
1597
1598        spin_lock_bh(&t->atid_lock);
1599        if (t->afree) {
1600                union aopen_entry *p = t->afree;
1601
1602                atid = (p - t->atid_tab) + t->atid_base;
1603                t->afree = p->next;
1604                p->data = data;
1605                t->atids_in_use++;
1606        }
1607        spin_unlock_bh(&t->atid_lock);
1608        return atid;
1609}
1610EXPORT_SYMBOL(cxgb4_alloc_atid);
1611
1612/*
1613 * Release an active-open TID.
1614 */
1615void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1616{
1617        union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1618
1619        spin_lock_bh(&t->atid_lock);
1620        p->next = t->afree;
1621        t->afree = p;
1622        t->atids_in_use--;
1623        spin_unlock_bh(&t->atid_lock);
1624}
1625EXPORT_SYMBOL(cxgb4_free_atid);
1626
1627/*
1628 * Allocate a server TID and set it to the supplied value.
1629 */
1630int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1631{
1632        int stid;
1633
1634        spin_lock_bh(&t->stid_lock);
1635        if (family == PF_INET) {
1636                stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1637                if (stid < t->nstids)
1638                        __set_bit(stid, t->stid_bmap);
1639                else
1640                        stid = -1;
1641        } else {
1642                stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1643                if (stid < 0)
1644                        stid = -1;
1645        }
1646        if (stid >= 0) {
1647                t->stid_tab[stid].data = data;
1648                stid += t->stid_base;
1649                /* IPv6 requires max of 520 bits or 16 cells in TCAM
1650                 * This is equivalent to 4 TIDs. With CLIP enabled it
1651                 * needs 2 TIDs.
1652                 */
1653                if (family == PF_INET6) {
1654                        t->stids_in_use += 2;
1655                        t->v6_stids_in_use += 2;
1656                } else {
1657                        t->stids_in_use++;
1658                }
1659        }
1660        spin_unlock_bh(&t->stid_lock);
1661        return stid;
1662}
1663EXPORT_SYMBOL(cxgb4_alloc_stid);
1664
1665/* Allocate a server filter TID and set it to the supplied value.
1666 */
1667int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1668{
1669        int stid;
1670
1671        spin_lock_bh(&t->stid_lock);
1672        if (family == PF_INET) {
1673                stid = find_next_zero_bit(t->stid_bmap,
1674                                t->nstids + t->nsftids, t->nstids);
1675                if (stid < (t->nstids + t->nsftids))
1676                        __set_bit(stid, t->stid_bmap);
1677                else
1678                        stid = -1;
1679        } else {
1680                stid = -1;
1681        }
1682        if (stid >= 0) {
1683                t->stid_tab[stid].data = data;
1684                stid -= t->nstids;
1685                stid += t->sftid_base;
1686                t->sftids_in_use++;
1687        }
1688        spin_unlock_bh(&t->stid_lock);
1689        return stid;
1690}
1691EXPORT_SYMBOL(cxgb4_alloc_sftid);
1692
1693/* Release a server TID.
1694 */
1695void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1696{
1697        /* Is it a server filter TID? */
1698        if (t->nsftids && (stid >= t->sftid_base)) {
1699                stid -= t->sftid_base;
1700                stid += t->nstids;
1701        } else {
1702                stid -= t->stid_base;
1703        }
1704
1705        spin_lock_bh(&t->stid_lock);
1706        if (family == PF_INET)
1707                __clear_bit(stid, t->stid_bmap);
1708        else
1709                bitmap_release_region(t->stid_bmap, stid, 1);
1710        t->stid_tab[stid].data = NULL;
1711        if (stid < t->nstids) {
1712                if (family == PF_INET6) {
1713                        t->stids_in_use -= 2;
1714                        t->v6_stids_in_use -= 2;
1715                } else {
1716                        t->stids_in_use--;
1717                }
1718        } else {
1719                t->sftids_in_use--;
1720        }
1721
1722        spin_unlock_bh(&t->stid_lock);
1723}
1724EXPORT_SYMBOL(cxgb4_free_stid);
1725
1726/*
1727 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1728 */
1729static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1730                           unsigned int tid)
1731{
1732        struct cpl_tid_release *req;
1733
1734        set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1735        req = __skb_put(skb, sizeof(*req));
1736        INIT_TP_WR(req, tid);
1737        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1738}
1739
1740/*
1741 * Queue a TID release request and if necessary schedule a work queue to
1742 * process it.
1743 */
1744static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1745                                    unsigned int tid)
1746{
1747        struct adapter *adap = container_of(t, struct adapter, tids);
1748        void **p = &t->tid_tab[tid - t->tid_base];
1749
1750        spin_lock_bh(&adap->tid_release_lock);
1751        *p = adap->tid_release_head;
1752        /* Low 2 bits encode the Tx channel number */
1753        adap->tid_release_head = (void **)((uintptr_t)p | chan);
1754        if (!adap->tid_release_task_busy) {
1755                adap->tid_release_task_busy = true;
1756                queue_work(adap->workq, &adap->tid_release_task);
1757        }
1758        spin_unlock_bh(&adap->tid_release_lock);
1759}
1760
1761/*
1762 * Process the list of pending TID release requests.
1763 */
1764static void process_tid_release_list(struct work_struct *work)
1765{
1766        struct sk_buff *skb;
1767        struct adapter *adap;
1768
1769        adap = container_of(work, struct adapter, tid_release_task);
1770
1771        spin_lock_bh(&adap->tid_release_lock);
1772        while (adap->tid_release_head) {
1773                void **p = adap->tid_release_head;
1774                unsigned int chan = (uintptr_t)p & 3;
1775                p = (void *)p - chan;
1776
1777                adap->tid_release_head = *p;
1778                *p = NULL;
1779                spin_unlock_bh(&adap->tid_release_lock);
1780
1781                while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1782                                         GFP_KERNEL)))
1783                        schedule_timeout_uninterruptible(1);
1784
1785                mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786                t4_ofld_send(adap, skb);
1787                spin_lock_bh(&adap->tid_release_lock);
1788        }
1789        adap->tid_release_task_busy = false;
1790        spin_unlock_bh(&adap->tid_release_lock);
1791}
1792
1793/*
1794 * Release a TID and inform HW.  If we are unable to allocate the release
1795 * message we defer to a work queue.
1796 */
1797void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1798                      unsigned short family)
1799{
1800        struct adapter *adap = container_of(t, struct adapter, tids);
1801        struct sk_buff *skb;
1802
1803        WARN_ON(tid_out_of_range(&adap->tids, tid));
1804
1805        if (t->tid_tab[tid - adap->tids.tid_base]) {
1806                t->tid_tab[tid - adap->tids.tid_base] = NULL;
1807                atomic_dec(&t->conns_in_use);
1808                if (t->hash_base && (tid >= t->hash_base)) {
1809                        if (family == AF_INET6)
1810                                atomic_sub(2, &t->hash_tids_in_use);
1811                        else
1812                                atomic_dec(&t->hash_tids_in_use);
1813                } else {
1814                        if (family == AF_INET6)
1815                                atomic_sub(2, &t->tids_in_use);
1816                        else
1817                                atomic_dec(&t->tids_in_use);
1818                }
1819        }
1820
1821        skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1822        if (likely(skb)) {
1823                mk_tid_release(skb, chan, tid);
1824                t4_ofld_send(adap, skb);
1825        } else
1826                cxgb4_queue_tid_release(t, chan, tid);
1827}
1828EXPORT_SYMBOL(cxgb4_remove_tid);
1829
1830/*
1831 * Allocate and initialize the TID tables.  Returns 0 on success.
1832 */
1833static int tid_init(struct tid_info *t)
1834{
1835        struct adapter *adap = container_of(t, struct adapter, tids);
1836        unsigned int max_ftids = t->nftids + t->nsftids;
1837        unsigned int natids = t->natids;
1838        unsigned int hpftid_bmap_size;
1839        unsigned int eotid_bmap_size;
1840        unsigned int stid_bmap_size;
1841        unsigned int ftid_bmap_size;
1842        size_t size;
1843
1844        stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1845        ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1846        hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1847        eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1848        size = t->ntids * sizeof(*t->tid_tab) +
1849               natids * sizeof(*t->atid_tab) +
1850               t->nstids * sizeof(*t->stid_tab) +
1851               t->nsftids * sizeof(*t->stid_tab) +
1852               stid_bmap_size * sizeof(long) +
1853               t->nhpftids * sizeof(*t->hpftid_tab) +
1854               hpftid_bmap_size * sizeof(long) +
1855               max_ftids * sizeof(*t->ftid_tab) +
1856               ftid_bmap_size * sizeof(long) +
1857               t->neotids * sizeof(*t->eotid_tab) +
1858               eotid_bmap_size * sizeof(long);
1859
1860        t->tid_tab = kvzalloc(size, GFP_KERNEL);
1861        if (!t->tid_tab)
1862                return -ENOMEM;
1863
1864        t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1865        t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1866        t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1867        t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868        t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869        t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1870        t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1871        t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872        t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1873        spin_lock_init(&t->stid_lock);
1874        spin_lock_init(&t->atid_lock);
1875        spin_lock_init(&t->ftid_lock);
1876
1877        t->stids_in_use = 0;
1878        t->v6_stids_in_use = 0;
1879        t->sftids_in_use = 0;
1880        t->afree = NULL;
1881        t->atids_in_use = 0;
1882        atomic_set(&t->tids_in_use, 0);
1883        atomic_set(&t->conns_in_use, 0);
1884        atomic_set(&t->hash_tids_in_use, 0);
1885        atomic_set(&t->eotids_in_use, 0);
1886
1887        /* Setup the free list for atid_tab and clear the stid bitmap. */
1888        if (natids) {
1889                while (--natids)
1890                        t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1891                t->afree = t->atid_tab;
1892        }
1893
1894        if (is_offload(adap)) {
1895                bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1896                /* Reserve stid 0 for T4/T5 adapters */
1897                if (!t->stid_base &&
1898                    CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1899                        __set_bit(0, t->stid_bmap);
1900
1901                if (t->neotids)
1902                        bitmap_zero(t->eotid_bmap, t->neotids);
1903        }
1904
1905        if (t->nhpftids)
1906                bitmap_zero(t->hpftid_bmap, t->nhpftids);
1907        bitmap_zero(t->ftid_bmap, t->nftids);
1908        return 0;
1909}
1910
1911/**
1912 *      cxgb4_create_server - create an IP server
1913 *      @dev: the device
1914 *      @stid: the server TID
1915 *      @sip: local IP address to bind server to
1916 *      @sport: the server's TCP port
1917 *      @vlan: the VLAN header information
1918 *      @queue: queue to direct messages from this server to
1919 *
1920 *      Create an IP server for the given port and address.
1921 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1922 */
1923int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1924                        __be32 sip, __be16 sport, __be16 vlan,
1925                        unsigned int queue)
1926{
1927        unsigned int chan;
1928        struct sk_buff *skb;
1929        struct adapter *adap;
1930        struct cpl_pass_open_req *req;
1931        int ret;
1932
1933        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1934        if (!skb)
1935                return -ENOMEM;
1936
1937        adap = netdev2adap(dev);
1938        req = __skb_put(skb, sizeof(*req));
1939        INIT_TP_WR(req, 0);
1940        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1941        req->local_port = sport;
1942        req->peer_port = htons(0);
1943        req->local_ip = sip;
1944        req->peer_ip = htonl(0);
1945        chan = rxq_to_chan(&adap->sge, queue);
1946        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1947        req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1948                                SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1949        ret = t4_mgmt_tx(adap, skb);
1950        return net_xmit_eval(ret);
1951}
1952EXPORT_SYMBOL(cxgb4_create_server);
1953
1954/*      cxgb4_create_server6 - create an IPv6 server
1955 *      @dev: the device
1956 *      @stid: the server TID
1957 *      @sip: local IPv6 address to bind server to
1958 *      @sport: the server's TCP port
1959 *      @queue: queue to direct messages from this server to
1960 *
1961 *      Create an IPv6 server for the given port and address.
1962 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1963 */
1964int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1965                         const struct in6_addr *sip, __be16 sport,
1966                         unsigned int queue)
1967{
1968        unsigned int chan;
1969        struct sk_buff *skb;
1970        struct adapter *adap;
1971        struct cpl_pass_open_req6 *req;
1972        int ret;
1973
1974        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1975        if (!skb)
1976                return -ENOMEM;
1977
1978        adap = netdev2adap(dev);
1979        req = __skb_put(skb, sizeof(*req));
1980        INIT_TP_WR(req, 0);
1981        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1982        req->local_port = sport;
1983        req->peer_port = htons(0);
1984        req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1985        req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1986        req->peer_ip_hi = cpu_to_be64(0);
1987        req->peer_ip_lo = cpu_to_be64(0);
1988        chan = rxq_to_chan(&adap->sge, queue);
1989        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1990        req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1991                                SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1992        ret = t4_mgmt_tx(adap, skb);
1993        return net_xmit_eval(ret);
1994}
1995EXPORT_SYMBOL(cxgb4_create_server6);
1996
1997int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1998                        unsigned int queue, bool ipv6)
1999{
2000        struct sk_buff *skb;
2001        struct adapter *adap;
2002        struct cpl_close_listsvr_req *req;
2003        int ret;
2004
2005        adap = netdev2adap(dev);
2006
2007        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2008        if (!skb)
2009                return -ENOMEM;
2010
2011        req = __skb_put(skb, sizeof(*req));
2012        INIT_TP_WR(req, 0);
2013        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2014        req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2015                                LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2016        ret = t4_mgmt_tx(adap, skb);
2017        return net_xmit_eval(ret);
2018}
2019EXPORT_SYMBOL(cxgb4_remove_server);
2020
2021/**
2022 *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2023 *      @mtus: the HW MTU table
2024 *      @mtu: the target MTU
2025 *      @idx: index of selected entry in the MTU table
2026 *
2027 *      Returns the index and the value in the HW MTU table that is closest to
2028 *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2029 *      table, in which case that smallest available value is selected.
2030 */
2031unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2032                            unsigned int *idx)
2033{
2034        unsigned int i = 0;
2035
2036        while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2037                ++i;
2038        if (idx)
2039                *idx = i;
2040        return mtus[i];
2041}
2042EXPORT_SYMBOL(cxgb4_best_mtu);
2043
2044/**
2045 *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2046 *     @mtus: the HW MTU table
2047 *     @header_size: Header Size
2048 *     @data_size_max: maximum Data Segment Size
2049 *     @data_size_align: desired Data Segment Size Alignment (2^N)
2050 *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
2051 *
2052 *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
2053 *     MTU Table based solely on a Maximum MTU parameter, we break that
2054 *     parameter up into a Header Size and Maximum Data Segment Size, and
2055 *     provide a desired Data Segment Size Alignment.  If we find an MTU in
2056 *     the Hardware MTU Table which will result in a Data Segment Size with
2057 *     the requested alignment _and_ that MTU isn't "too far" from the
2058 *     closest MTU, then we'll return that rather than the closest MTU.
2059 */
2060unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2061                                    unsigned short header_size,
2062                                    unsigned short data_size_max,
2063                                    unsigned short data_size_align,
2064                                    unsigned int *mtu_idxp)
2065{
2066        unsigned short max_mtu = header_size + data_size_max;
2067        unsigned short data_size_align_mask = data_size_align - 1;
2068        int mtu_idx, aligned_mtu_idx;
2069
2070        /* Scan the MTU Table till we find an MTU which is larger than our
2071         * Maximum MTU or we reach the end of the table.  Along the way,
2072         * record the last MTU found, if any, which will result in a Data
2073         * Segment Length matching the requested alignment.
2074         */
2075        for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2076                unsigned short data_size = mtus[mtu_idx] - header_size;
2077
2078                /* If this MTU minus the Header Size would result in a
2079                 * Data Segment Size of the desired alignment, remember it.
2080                 */
2081                if ((data_size & data_size_align_mask) == 0)
2082                        aligned_mtu_idx = mtu_idx;
2083
2084                /* If we're not at the end of the Hardware MTU Table and the
2085                 * next element is larger than our Maximum MTU, drop out of
2086                 * the loop.
2087                 */
2088                if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2089                        break;
2090        }
2091
2092        /* If we fell out of the loop because we ran to the end of the table,
2093         * then we just have to use the last [largest] entry.
2094         */
2095        if (mtu_idx == NMTUS)
2096                mtu_idx--;
2097
2098        /* If we found an MTU which resulted in the requested Data Segment
2099         * Length alignment and that's "not far" from the largest MTU which is
2100         * less than or equal to the maximum MTU, then use that.
2101         */
2102        if (aligned_mtu_idx >= 0 &&
2103            mtu_idx - aligned_mtu_idx <= 1)
2104                mtu_idx = aligned_mtu_idx;
2105
2106        /* If the caller has passed in an MTU Index pointer, pass the
2107         * MTU Index back.  Return the MTU value.
2108         */
2109        if (mtu_idxp)
2110                *mtu_idxp = mtu_idx;
2111        return mtus[mtu_idx];
2112}
2113EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2114
2115/**
2116 *      cxgb4_port_chan - get the HW channel of a port
2117 *      @dev: the net device for the port
2118 *
2119 *      Return the HW Tx channel of the given port.
2120 */
2121unsigned int cxgb4_port_chan(const struct net_device *dev)
2122{
2123        return netdev2pinfo(dev)->tx_chan;
2124}
2125EXPORT_SYMBOL(cxgb4_port_chan);
2126
2127/**
2128 *      cxgb4_port_e2cchan - get the HW c-channel of a port
2129 *      @dev: the net device for the port
2130 *
2131 *      Return the HW RX c-channel of the given port.
2132 */
2133unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134{
2135        return netdev2pinfo(dev)->rx_cchan;
2136}
2137EXPORT_SYMBOL(cxgb4_port_e2cchan);
2138
2139unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2140{
2141        struct adapter *adap = netdev2adap(dev);
2142        u32 v1, v2, lp_count, hp_count;
2143
2144        v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2145        v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2146        if (is_t4(adap->params.chip)) {
2147                lp_count = LP_COUNT_G(v1);
2148                hp_count = HP_COUNT_G(v1);
2149        } else {
2150                lp_count = LP_COUNT_T5_G(v1);
2151                hp_count = HP_COUNT_T5_G(v2);
2152        }
2153        return lpfifo ? lp_count : hp_count;
2154}
2155EXPORT_SYMBOL(cxgb4_dbfifo_count);
2156
2157/**
2158 *      cxgb4_port_viid - get the VI id of a port
2159 *      @dev: the net device for the port
2160 *
2161 *      Return the VI id of the given port.
2162 */
2163unsigned int cxgb4_port_viid(const struct net_device *dev)
2164{
2165        return netdev2pinfo(dev)->viid;
2166}
2167EXPORT_SYMBOL(cxgb4_port_viid);
2168
2169/**
2170 *      cxgb4_port_idx - get the index of a port
2171 *      @dev: the net device for the port
2172 *
2173 *      Return the index of the given port.
2174 */
2175unsigned int cxgb4_port_idx(const struct net_device *dev)
2176{
2177        return netdev2pinfo(dev)->port_id;
2178}
2179EXPORT_SYMBOL(cxgb4_port_idx);
2180
2181void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2182                         struct tp_tcp_stats *v6)
2183{
2184        struct adapter *adap = pci_get_drvdata(pdev);
2185
2186        spin_lock(&adap->stats_lock);
2187        t4_tp_get_tcp_stats(adap, v4, v6, false);
2188        spin_unlock(&adap->stats_lock);
2189}
2190EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2191
2192void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2193                      const unsigned int *pgsz_order)
2194{
2195        struct adapter *adap = netdev2adap(dev);
2196
2197        t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2198        t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2199                     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2200                     HPZ3_V(pgsz_order[3]));
2201}
2202EXPORT_SYMBOL(cxgb4_iscsi_init);
2203
2204int cxgb4_flush_eq_cache(struct net_device *dev)
2205{
2206        struct adapter *adap = netdev2adap(dev);
2207
2208        return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2209}
2210EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2211
2212static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2213{
2214        u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2215        __be64 indices;
2216        int ret;
2217
2218        spin_lock(&adap->win0_lock);
2219        ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2220                           sizeof(indices), (__be32 *)&indices,
2221                           T4_MEMORY_READ);
2222        spin_unlock(&adap->win0_lock);
2223        if (!ret) {
2224                *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2225                *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2226        }
2227        return ret;
2228}
2229
2230int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2231                        u16 size)
2232{
2233        struct adapter *adap = netdev2adap(dev);
2234        u16 hw_pidx, hw_cidx;
2235        int ret;
2236
2237        ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2238        if (ret)
2239                goto out;
2240
2241        if (pidx != hw_pidx) {
2242                u16 delta;
2243                u32 val;
2244
2245                if (pidx >= hw_pidx)
2246                        delta = pidx - hw_pidx;
2247                else
2248                        delta = size - hw_pidx + pidx;
2249
2250                if (is_t4(adap->params.chip))
2251                        val = PIDX_V(delta);
2252                else
2253                        val = PIDX_T5_V(delta);
2254                wmb();
2255                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2256                             QID_V(qid) | val);
2257        }
2258out:
2259        return ret;
2260}
2261EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2262
2263int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2264{
2265        u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2266        u32 edc0_end, edc1_end, mc0_end, mc1_end;
2267        u32 offset, memtype, memaddr;
2268        struct adapter *adap;
2269        u32 hma_size = 0;
2270        int ret;
2271
2272        adap = netdev2adap(dev);
2273
2274        offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2275
2276        /* Figure out where the offset lands in the Memory Type/Address scheme.
2277         * This code assumes that the memory is laid out starting at offset 0
2278         * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2279         * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
2280         * MC0, and some have both MC0 and MC1.
2281         */
2282        size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2283        edc0_size = EDRAM0_SIZE_G(size) << 20;
2284        size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2285        edc1_size = EDRAM1_SIZE_G(size) << 20;
2286        size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2287        mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2288
2289        if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2290                size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2291                hma_size = EXT_MEM1_SIZE_G(size) << 20;
2292        }
2293        edc0_end = edc0_size;
2294        edc1_end = edc0_end + edc1_size;
2295        mc0_end = edc1_end + mc0_size;
2296
2297        if (offset < edc0_end) {
2298                memtype = MEM_EDC0;
2299                memaddr = offset;
2300        } else if (offset < edc1_end) {
2301                memtype = MEM_EDC1;
2302                memaddr = offset - edc0_end;
2303        } else {
2304                if (hma_size && (offset < (edc1_end + hma_size))) {
2305                        memtype = MEM_HMA;
2306                        memaddr = offset - edc1_end;
2307                } else if (offset < mc0_end) {
2308                        memtype = MEM_MC0;
2309                        memaddr = offset - edc1_end;
2310                } else if (is_t5(adap->params.chip)) {
2311                        size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2312                        mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2313                        mc1_end = mc0_end + mc1_size;
2314                        if (offset < mc1_end) {
2315                                memtype = MEM_MC1;
2316                                memaddr = offset - mc0_end;
2317                        } else {
2318                                /* offset beyond the end of any memory */
2319                                goto err;
2320                        }
2321                } else {
2322                        /* T4/T6 only has a single memory channel */
2323                        goto err;
2324                }
2325        }
2326
2327        spin_lock(&adap->win0_lock);
2328        ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2329        spin_unlock(&adap->win0_lock);
2330        return ret;
2331
2332err:
2333        dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2334                stag, offset);
2335        return -EINVAL;
2336}
2337EXPORT_SYMBOL(cxgb4_read_tpte);
2338
2339u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2340{
2341        u32 hi, lo;
2342        struct adapter *adap;
2343
2344        adap = netdev2adap(dev);
2345        lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2346        hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2347
2348        return ((u64)hi << 32) | (u64)lo;
2349}
2350EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2351
2352int cxgb4_bar2_sge_qregs(struct net_device *dev,
2353                         unsigned int qid,
2354                         enum cxgb4_bar2_qtype qtype,
2355                         int user,
2356                         u64 *pbar2_qoffset,
2357                         unsigned int *pbar2_qid)
2358{
2359        return t4_bar2_sge_qregs(netdev2adap(dev),
2360                                 qid,
2361                                 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2362                                  ? T4_BAR2_QTYPE_EGRESS
2363                                  : T4_BAR2_QTYPE_INGRESS),
2364                                 user,
2365                                 pbar2_qoffset,
2366                                 pbar2_qid);
2367}
2368EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2369
2370static struct pci_driver cxgb4_driver;
2371
2372static void check_neigh_update(struct neighbour *neigh)
2373{
2374        const struct device *parent;
2375        const struct net_device *netdev = neigh->dev;
2376
2377        if (is_vlan_dev(netdev))
2378                netdev = vlan_dev_real_dev(netdev);
2379        parent = netdev->dev.parent;
2380        if (parent && parent->driver == &cxgb4_driver.driver)
2381                t4_l2t_update(dev_get_drvdata(parent), neigh);
2382}
2383
2384static int netevent_cb(struct notifier_block *nb, unsigned long event,
2385                       void *data)
2386{
2387        switch (event) {
2388        case NETEVENT_NEIGH_UPDATE:
2389                check_neigh_update(data);
2390                break;
2391        case NETEVENT_REDIRECT:
2392        default:
2393                break;
2394        }
2395        return 0;
2396}
2397
2398static bool netevent_registered;
2399static struct notifier_block cxgb4_netevent_nb = {
2400        .notifier_call = netevent_cb
2401};
2402
2403static void drain_db_fifo(struct adapter *adap, int usecs)
2404{
2405        u32 v1, v2, lp_count, hp_count;
2406
2407        do {
2408                v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2409                v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2410                if (is_t4(adap->params.chip)) {
2411                        lp_count = LP_COUNT_G(v1);
2412                        hp_count = HP_COUNT_G(v1);
2413                } else {
2414                        lp_count = LP_COUNT_T5_G(v1);
2415                        hp_count = HP_COUNT_T5_G(v2);
2416                }
2417
2418                if (lp_count == 0 && hp_count == 0)
2419                        break;
2420                set_current_state(TASK_UNINTERRUPTIBLE);
2421                schedule_timeout(usecs_to_jiffies(usecs));
2422        } while (1);
2423}
2424
2425static void disable_txq_db(struct sge_txq *q)
2426{
2427        unsigned long flags;
2428
2429        spin_lock_irqsave(&q->db_lock, flags);
2430        q->db_disabled = 1;
2431        spin_unlock_irqrestore(&q->db_lock, flags);
2432}
2433
2434static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2435{
2436        spin_lock_irq(&q->db_lock);
2437        if (q->db_pidx_inc) {
2438                /* Make sure that all writes to the TX descriptors
2439                 * are committed before we tell HW about them.
2440                 */
2441                wmb();
2442                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2443                             QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2444                q->db_pidx_inc = 0;
2445        }
2446        q->db_disabled = 0;
2447        spin_unlock_irq(&q->db_lock);
2448}
2449
2450static void disable_dbs(struct adapter *adap)
2451{
2452        int i;
2453
2454        for_each_ethrxq(&adap->sge, i)
2455                disable_txq_db(&adap->sge.ethtxq[i].q);
2456        if (is_offload(adap)) {
2457                struct sge_uld_txq_info *txq_info =
2458                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2459
2460                if (txq_info) {
2461                        for_each_ofldtxq(&adap->sge, i) {
2462                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2463
2464                                disable_txq_db(&txq->q);
2465                        }
2466                }
2467        }
2468        for_each_port(adap, i)
2469                disable_txq_db(&adap->sge.ctrlq[i].q);
2470}
2471
2472static void enable_dbs(struct adapter *adap)
2473{
2474        int i;
2475
2476        for_each_ethrxq(&adap->sge, i)
2477                enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2478        if (is_offload(adap)) {
2479                struct sge_uld_txq_info *txq_info =
2480                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2481
2482                if (txq_info) {
2483                        for_each_ofldtxq(&adap->sge, i) {
2484                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2485
2486                                enable_txq_db(adap, &txq->q);
2487                        }
2488                }
2489        }
2490        for_each_port(adap, i)
2491                enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2492}
2493
2494static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2495{
2496        enum cxgb4_uld type = CXGB4_ULD_RDMA;
2497
2498        if (adap->uld && adap->uld[type].handle)
2499                adap->uld[type].control(adap->uld[type].handle, cmd);
2500}
2501
2502static void process_db_full(struct work_struct *work)
2503{
2504        struct adapter *adap;
2505
2506        adap = container_of(work, struct adapter, db_full_task);
2507
2508        drain_db_fifo(adap, dbfifo_drain_delay);
2509        enable_dbs(adap);
2510        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2511        if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2512                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2513                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2514                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2515        else
2516                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2517                                 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2518}
2519
2520static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2521{
2522        u16 hw_pidx, hw_cidx;
2523        int ret;
2524
2525        spin_lock_irq(&q->db_lock);
2526        ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2527        if (ret)
2528                goto out;
2529        if (q->db_pidx != hw_pidx) {
2530                u16 delta;
2531                u32 val;
2532
2533                if (q->db_pidx >= hw_pidx)
2534                        delta = q->db_pidx - hw_pidx;
2535                else
2536                        delta = q->size - hw_pidx + q->db_pidx;
2537
2538                if (is_t4(adap->params.chip))
2539                        val = PIDX_V(delta);
2540                else
2541                        val = PIDX_T5_V(delta);
2542                wmb();
2543                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2544                             QID_V(q->cntxt_id) | val);
2545        }
2546out:
2547        q->db_disabled = 0;
2548        q->db_pidx_inc = 0;
2549        spin_unlock_irq(&q->db_lock);
2550        if (ret)
2551                CH_WARN(adap, "DB drop recovery failed.\n");
2552}
2553
2554static void recover_all_queues(struct adapter *adap)
2555{
2556        int i;
2557
2558        for_each_ethrxq(&adap->sge, i)
2559                sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2560        if (is_offload(adap)) {
2561                struct sge_uld_txq_info *txq_info =
2562                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2563                if (txq_info) {
2564                        for_each_ofldtxq(&adap->sge, i) {
2565                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2566
2567                                sync_txq_pidx(adap, &txq->q);
2568                        }
2569                }
2570        }
2571        for_each_port(adap, i)
2572                sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2573}
2574
2575static void process_db_drop(struct work_struct *work)
2576{
2577        struct adapter *adap;
2578
2579        adap = container_of(work, struct adapter, db_drop_task);
2580
2581        if (is_t4(adap->params.chip)) {
2582                drain_db_fifo(adap, dbfifo_drain_delay);
2583                notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2584                drain_db_fifo(adap, dbfifo_drain_delay);
2585                recover_all_queues(adap);
2586                drain_db_fifo(adap, dbfifo_drain_delay);
2587                enable_dbs(adap);
2588                notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2589        } else if (is_t5(adap->params.chip)) {
2590                u32 dropped_db = t4_read_reg(adap, 0x010ac);
2591                u16 qid = (dropped_db >> 15) & 0x1ffff;
2592                u16 pidx_inc = dropped_db & 0x1fff;
2593                u64 bar2_qoffset;
2594                unsigned int bar2_qid;
2595                int ret;
2596
2597                ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2598                                        0, &bar2_qoffset, &bar2_qid);
2599                if (ret)
2600                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
2601                                "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2602                else
2603                        writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2604                               adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2605
2606                /* Re-enable BAR2 WC */
2607                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2608        }
2609
2610        if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2611                t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2612}
2613
2614void t4_db_full(struct adapter *adap)
2615{
2616        if (is_t4(adap->params.chip)) {
2617                disable_dbs(adap);
2618                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2620                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2621                queue_work(adap->workq, &adap->db_full_task);
2622        }
2623}
2624
2625void t4_db_dropped(struct adapter *adap)
2626{
2627        if (is_t4(adap->params.chip)) {
2628                disable_dbs(adap);
2629                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2630        }
2631        queue_work(adap->workq, &adap->db_drop_task);
2632}
2633
2634void t4_register_netevent_notifier(void)
2635{
2636        if (!netevent_registered) {
2637                register_netevent_notifier(&cxgb4_netevent_nb);
2638                netevent_registered = true;
2639        }
2640}
2641
2642static void detach_ulds(struct adapter *adap)
2643{
2644        unsigned int i;
2645
2646        if (!is_uld(adap))
2647                return;
2648
2649        mutex_lock(&uld_mutex);
2650        list_del(&adap->list_node);
2651
2652        for (i = 0; i < CXGB4_ULD_MAX; i++)
2653                if (adap->uld && adap->uld[i].handle)
2654                        adap->uld[i].state_change(adap->uld[i].handle,
2655                                             CXGB4_STATE_DETACH);
2656
2657        if (netevent_registered && list_empty(&adapter_list)) {
2658                unregister_netevent_notifier(&cxgb4_netevent_nb);
2659                netevent_registered = false;
2660        }
2661        mutex_unlock(&uld_mutex);
2662}
2663
2664static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2665{
2666        unsigned int i;
2667
2668        mutex_lock(&uld_mutex);
2669        for (i = 0; i < CXGB4_ULD_MAX; i++)
2670                if (adap->uld && adap->uld[i].handle)
2671                        adap->uld[i].state_change(adap->uld[i].handle,
2672                                                  new_state);
2673        mutex_unlock(&uld_mutex);
2674}
2675
2676#if IS_ENABLED(CONFIG_IPV6)
2677static int cxgb4_inet6addr_handler(struct notifier_block *this,
2678                                   unsigned long event, void *data)
2679{
2680        struct inet6_ifaddr *ifa = data;
2681        struct net_device *event_dev = ifa->idev->dev;
2682        const struct device *parent = NULL;
2683#if IS_ENABLED(CONFIG_BONDING)
2684        struct adapter *adap;
2685#endif
2686        if (is_vlan_dev(event_dev))
2687                event_dev = vlan_dev_real_dev(event_dev);
2688#if IS_ENABLED(CONFIG_BONDING)
2689        if (event_dev->flags & IFF_MASTER) {
2690                list_for_each_entry(adap, &adapter_list, list_node) {
2691                        switch (event) {
2692                        case NETDEV_UP:
2693                                cxgb4_clip_get(adap->port[0],
2694                                               (const u32 *)ifa, 1);
2695                                break;
2696                        case NETDEV_DOWN:
2697                                cxgb4_clip_release(adap->port[0],
2698                                                   (const u32 *)ifa, 1);
2699                                break;
2700                        default:
2701                                break;
2702                        }
2703                }
2704                return NOTIFY_OK;
2705        }
2706#endif
2707
2708        if (event_dev)
2709                parent = event_dev->dev.parent;
2710
2711        if (parent && parent->driver == &cxgb4_driver.driver) {
2712                switch (event) {
2713                case NETDEV_UP:
2714                        cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2715                        break;
2716                case NETDEV_DOWN:
2717                        cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2718                        break;
2719                default:
2720                        break;
2721                }
2722        }
2723        return NOTIFY_OK;
2724}
2725
2726static bool inet6addr_registered;
2727static struct notifier_block cxgb4_inet6addr_notifier = {
2728        .notifier_call = cxgb4_inet6addr_handler
2729};
2730
2731static void update_clip(const struct adapter *adap)
2732{
2733        int i;
2734        struct net_device *dev;
2735        int ret;
2736
2737        rcu_read_lock();
2738
2739        for (i = 0; i < MAX_NPORTS; i++) {
2740                dev = adap->port[i];
2741                ret = 0;
2742
2743                if (dev)
2744                        ret = cxgb4_update_root_dev_clip(dev);
2745
2746                if (ret < 0)
2747                        break;
2748        }
2749        rcu_read_unlock();
2750}
2751#endif /* IS_ENABLED(CONFIG_IPV6) */
2752
2753/**
2754 *      cxgb_up - enable the adapter
2755 *      @adap: adapter being enabled
2756 *
2757 *      Called when the first port is enabled, this function performs the
2758 *      actions necessary to make an adapter operational, such as completing
2759 *      the initialization of HW modules, and enabling interrupts.
2760 *
2761 *      Must be called with the rtnl lock held.
2762 */
2763static int cxgb_up(struct adapter *adap)
2764{
2765        struct sge *s = &adap->sge;
2766        int err;
2767
2768        mutex_lock(&uld_mutex);
2769        err = setup_sge_queues(adap);
2770        if (err)
2771                goto rel_lock;
2772        err = setup_rss(adap);
2773        if (err)
2774                goto freeq;
2775
2776        if (adap->flags & CXGB4_USING_MSIX) {
2777                if (s->nd_msix_idx < 0) {
2778                        err = -ENOMEM;
2779                        goto irq_err;
2780                }
2781
2782                err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2783                                  t4_nondata_intr, 0,
2784                                  adap->msix_info[s->nd_msix_idx].desc, adap);
2785                if (err)
2786                        goto irq_err;
2787
2788                err = request_msix_queue_irqs(adap);
2789                if (err)
2790                        goto irq_err_free_nd_msix;
2791        } else {
2792                err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2793                                  (adap->flags & CXGB4_USING_MSI) ? 0
2794                                                                  : IRQF_SHARED,
2795                                  adap->port[0]->name, adap);
2796                if (err)
2797                        goto irq_err;
2798        }
2799
2800        enable_rx(adap);
2801        t4_sge_start(adap);
2802        t4_intr_enable(adap);
2803        adap->flags |= CXGB4_FULL_INIT_DONE;
2804        mutex_unlock(&uld_mutex);
2805
2806        notify_ulds(adap, CXGB4_STATE_UP);
2807#if IS_ENABLED(CONFIG_IPV6)
2808        update_clip(adap);
2809#endif
2810        return err;
2811
2812irq_err_free_nd_msix:
2813        free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2814irq_err:
2815        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2816freeq:
2817        t4_free_sge_resources(adap);
2818rel_lock:
2819        mutex_unlock(&uld_mutex);
2820        return err;
2821}
2822
2823static void cxgb_down(struct adapter *adapter)
2824{
2825        cancel_work_sync(&adapter->tid_release_task);
2826        cancel_work_sync(&adapter->db_full_task);
2827        cancel_work_sync(&adapter->db_drop_task);
2828        adapter->tid_release_task_busy = false;
2829        adapter->tid_release_head = NULL;
2830
2831        t4_sge_stop(adapter);
2832        t4_free_sge_resources(adapter);
2833
2834        adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2835}
2836
2837/*
2838 * net_device operations
2839 */
2840static int cxgb_open(struct net_device *dev)
2841{
2842        struct port_info *pi = netdev_priv(dev);
2843        struct adapter *adapter = pi->adapter;
2844        int err;
2845
2846        netif_carrier_off(dev);
2847
2848        if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2849                err = cxgb_up(adapter);
2850                if (err < 0)
2851                        return err;
2852        }
2853
2854        /* It's possible that the basic port information could have
2855         * changed since we first read it.
2856         */
2857        err = t4_update_port_info(pi);
2858        if (err < 0)
2859                return err;
2860
2861        err = link_start(dev);
2862        if (err)
2863                return err;
2864
2865        if (pi->nmirrorqsets) {
2866                mutex_lock(&pi->vi_mirror_mutex);
2867                err = cxgb4_port_mirror_alloc_queues(dev);
2868                if (err)
2869                        goto out_unlock;
2870
2871                err = cxgb4_port_mirror_start(dev);
2872                if (err)
2873                        goto out_free_queues;
2874                mutex_unlock(&pi->vi_mirror_mutex);
2875        }
2876
2877        netif_tx_start_all_queues(dev);
2878        return 0;
2879
2880out_free_queues:
2881        cxgb4_port_mirror_free_queues(dev);
2882
2883out_unlock:
2884        mutex_unlock(&pi->vi_mirror_mutex);
2885        return err;
2886}
2887
2888static int cxgb_close(struct net_device *dev)
2889{
2890        struct port_info *pi = netdev_priv(dev);
2891        struct adapter *adapter = pi->adapter;
2892        int ret;
2893
2894        netif_tx_stop_all_queues(dev);
2895        netif_carrier_off(dev);
2896        ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2897                                  false, false, false);
2898#ifdef CONFIG_CHELSIO_T4_DCB
2899        cxgb4_dcb_reset(dev);
2900        dcb_tx_queue_prio_enable(dev, false);
2901#endif
2902        if (ret)
2903                return ret;
2904
2905        if (pi->nmirrorqsets) {
2906                mutex_lock(&pi->vi_mirror_mutex);
2907                cxgb4_port_mirror_stop(dev);
2908                cxgb4_port_mirror_free_queues(dev);
2909                mutex_unlock(&pi->vi_mirror_mutex);
2910        }
2911
2912        return 0;
2913}
2914
2915int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2916                __be32 sip, __be16 sport, __be16 vlan,
2917                unsigned int queue, unsigned char port, unsigned char mask)
2918{
2919        int ret;
2920        struct filter_entry *f;
2921        struct adapter *adap;
2922        int i;
2923        u8 *val;
2924
2925        adap = netdev2adap(dev);
2926
2927        /* Adjust stid to correct filter index */
2928        stid -= adap->tids.sftid_base;
2929        stid += adap->tids.nftids;
2930
2931        /* Check to make sure the filter requested is writable ...
2932         */
2933        f = &adap->tids.ftid_tab[stid];
2934        ret = writable_filter(f);
2935        if (ret)
2936                return ret;
2937
2938        /* Clear out any old resources being used by the filter before
2939         * we start constructing the new filter.
2940         */
2941        if (f->valid)
2942                clear_filter(adap, f);
2943
2944        /* Clear out filter specifications */
2945        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2946        f->fs.val.lport = be16_to_cpu(sport);
2947        f->fs.mask.lport  = ~0;
2948        val = (u8 *)&sip;
2949        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2950                for (i = 0; i < 4; i++) {
2951                        f->fs.val.lip[i] = val[i];
2952                        f->fs.mask.lip[i] = ~0;
2953                }
2954                if (adap->params.tp.vlan_pri_map & PORT_F) {
2955                        f->fs.val.iport = port;
2956                        f->fs.mask.iport = mask;
2957                }
2958        }
2959
2960        if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2961                f->fs.val.proto = IPPROTO_TCP;
2962                f->fs.mask.proto = ~0;
2963        }
2964
2965        f->fs.dirsteer = 1;
2966        f->fs.iq = queue;
2967        /* Mark filter as locked */
2968        f->locked = 1;
2969        f->fs.rpttid = 1;
2970
2971        /* Save the actual tid. We need this to get the corresponding
2972         * filter entry structure in filter_rpl.
2973         */
2974        f->tid = stid + adap->tids.ftid_base;
2975        ret = set_filter_wr(adap, stid);
2976        if (ret) {
2977                clear_filter(adap, f);
2978                return ret;
2979        }
2980
2981        return 0;
2982}
2983EXPORT_SYMBOL(cxgb4_create_server_filter);
2984
2985int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2986                unsigned int queue, bool ipv6)
2987{
2988        struct filter_entry *f;
2989        struct adapter *adap;
2990
2991        adap = netdev2adap(dev);
2992
2993        /* Adjust stid to correct filter index */
2994        stid -= adap->tids.sftid_base;
2995        stid += adap->tids.nftids;
2996
2997        f = &adap->tids.ftid_tab[stid];
2998        /* Unlock the filter */
2999        f->locked = 0;
3000
3001        return delete_filter(adap, stid);
3002}
3003EXPORT_SYMBOL(cxgb4_remove_server_filter);
3004
3005static void cxgb_get_stats(struct net_device *dev,
3006                           struct rtnl_link_stats64 *ns)
3007{
3008        struct port_stats stats;
3009        struct port_info *p = netdev_priv(dev);
3010        struct adapter *adapter = p->adapter;
3011
3012        /* Block retrieving statistics during EEH error
3013         * recovery. Otherwise, the recovery might fail
3014         * and the PCI device will be removed permanently
3015         */
3016        spin_lock(&adapter->stats_lock);
3017        if (!netif_device_present(dev)) {
3018                spin_unlock(&adapter->stats_lock);
3019                return;
3020        }
3021        t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3022                                 &p->stats_base);
3023        spin_unlock(&adapter->stats_lock);
3024
3025        ns->tx_bytes   = stats.tx_octets;
3026        ns->tx_packets = stats.tx_frames;
3027        ns->rx_bytes   = stats.rx_octets;
3028        ns->rx_packets = stats.rx_frames;
3029        ns->multicast  = stats.rx_mcast_frames;
3030
3031        /* detailed rx_errors */
3032        ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3033                               stats.rx_runt;
3034        ns->rx_over_errors   = 0;
3035        ns->rx_crc_errors    = stats.rx_fcs_err;
3036        ns->rx_frame_errors  = stats.rx_symbol_err;
3037        ns->rx_dropped       = stats.rx_ovflow0 + stats.rx_ovflow1 +
3038                               stats.rx_ovflow2 + stats.rx_ovflow3 +
3039                               stats.rx_trunc0 + stats.rx_trunc1 +
3040                               stats.rx_trunc2 + stats.rx_trunc3;
3041        ns->rx_missed_errors = 0;
3042
3043        /* detailed tx_errors */
3044        ns->tx_aborted_errors   = 0;
3045        ns->tx_carrier_errors   = 0;
3046        ns->tx_fifo_errors      = 0;
3047        ns->tx_heartbeat_errors = 0;
3048        ns->tx_window_errors    = 0;
3049
3050        ns->tx_errors = stats.tx_error_frames;
3051        ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3052                ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3053}
3054
3055static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3056{
3057        unsigned int mbox;
3058        int ret = 0, prtad, devad;
3059        struct port_info *pi = netdev_priv(dev);
3060        struct adapter *adapter = pi->adapter;
3061        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3062
3063        switch (cmd) {
3064        case SIOCGMIIPHY:
3065                if (pi->mdio_addr < 0)
3066                        return -EOPNOTSUPP;
3067                data->phy_id = pi->mdio_addr;
3068                break;
3069        case SIOCGMIIREG:
3070        case SIOCSMIIREG:
3071                if (mdio_phy_id_is_c45(data->phy_id)) {
3072                        prtad = mdio_phy_id_prtad(data->phy_id);
3073                        devad = mdio_phy_id_devad(data->phy_id);
3074                } else if (data->phy_id < 32) {
3075                        prtad = data->phy_id;
3076                        devad = 0;
3077                        data->reg_num &= 0x1f;
3078                } else
3079                        return -EINVAL;
3080
3081                mbox = pi->adapter->pf;
3082                if (cmd == SIOCGMIIREG)
3083                        ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3084                                         data->reg_num, &data->val_out);
3085                else
3086                        ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3087                                         data->reg_num, data->val_in);
3088                break;
3089        case SIOCGHWTSTAMP:
3090                return copy_to_user(req->ifr_data, &pi->tstamp_config,
3091                                    sizeof(pi->tstamp_config)) ?
3092                        -EFAULT : 0;
3093        case SIOCSHWTSTAMP:
3094                if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3095                                   sizeof(pi->tstamp_config)))
3096                        return -EFAULT;
3097
3098                if (!is_t4(adapter->params.chip)) {
3099                        switch (pi->tstamp_config.tx_type) {
3100                        case HWTSTAMP_TX_OFF:
3101                        case HWTSTAMP_TX_ON:
3102                                break;
3103                        default:
3104                                return -ERANGE;
3105                        }
3106
3107                        switch (pi->tstamp_config.rx_filter) {
3108                        case HWTSTAMP_FILTER_NONE:
3109                                pi->rxtstamp = false;
3110                                break;
3111                        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3112                        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3113                                cxgb4_ptprx_timestamping(pi, pi->port_id,
3114                                                         PTP_TS_L4);
3115                                break;
3116                        case HWTSTAMP_FILTER_PTP_V2_EVENT:
3117                                cxgb4_ptprx_timestamping(pi, pi->port_id,
3118                                                         PTP_TS_L2_L4);
3119                                break;
3120                        case HWTSTAMP_FILTER_ALL:
3121                        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3122                        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3123                        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3124                        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3125                                pi->rxtstamp = true;
3126                                break;
3127                        default:
3128                                pi->tstamp_config.rx_filter =
3129                                        HWTSTAMP_FILTER_NONE;
3130                                return -ERANGE;
3131                        }
3132
3133                        if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3134                            (pi->tstamp_config.rx_filter ==
3135                                HWTSTAMP_FILTER_NONE)) {
3136                                if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3137                                        pi->ptp_enable = false;
3138                        }
3139
3140                        if (pi->tstamp_config.rx_filter !=
3141                                HWTSTAMP_FILTER_NONE) {
3142                                if (cxgb4_ptp_redirect_rx_packet(adapter,
3143                                                                 pi) >= 0)
3144                                        pi->ptp_enable = true;
3145                        }
3146                } else {
3147                        /* For T4 Adapters */
3148                        switch (pi->tstamp_config.rx_filter) {
3149                        case HWTSTAMP_FILTER_NONE:
3150                        pi->rxtstamp = false;
3151                        break;
3152                        case HWTSTAMP_FILTER_ALL:
3153                        pi->rxtstamp = true;
3154                        break;
3155                        default:
3156                        pi->tstamp_config.rx_filter =
3157                        HWTSTAMP_FILTER_NONE;
3158                        return -ERANGE;
3159                        }
3160                }
3161                return copy_to_user(req->ifr_data, &pi->tstamp_config,
3162                                    sizeof(pi->tstamp_config)) ?
3163                        -EFAULT : 0;
3164        default:
3165                return -EOPNOTSUPP;
3166        }
3167        return ret;
3168}
3169
3170static void cxgb_set_rxmode(struct net_device *dev)
3171{
3172        /* unfortunately we can't return errors to the stack */
3173        set_rxmode(dev, -1, false);
3174}
3175
3176static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3177{
3178        struct port_info *pi = netdev_priv(dev);
3179        int ret;
3180
3181        ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3182                            pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3183        if (!ret)
3184                dev->mtu = new_mtu;
3185        return ret;
3186}
3187
3188#ifdef CONFIG_PCI_IOV
3189static int cxgb4_mgmt_open(struct net_device *dev)
3190{
3191        /* Turn carrier off since we don't have to transmit anything on this
3192         * interface.
3193         */
3194        netif_carrier_off(dev);
3195        return 0;
3196}
3197
3198/* Fill MAC address that will be assigned by the FW */
3199static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3200{
3201        u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
3202        unsigned int i, vf, nvfs;
3203        u16 a, b;
3204        int err;
3205        u8 *na;
3206
3207        err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3208        if (err)
3209                return;
3210
3211        na = adap->params.vpd.na;
3212        for (i = 0; i < ETH_ALEN; i++)
3213                hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3214                              hex2val(na[2 * i + 1]));
3215
3216        a = (hw_addr[0] << 8) | hw_addr[1];
3217        b = (hw_addr[1] << 8) | hw_addr[2];
3218        a ^= b;
3219        a |= 0x0200;    /* locally assigned Ethernet MAC address */
3220        a &= ~0x0100;   /* not a multicast Ethernet MAC address */
3221        macaddr[0] = a >> 8;
3222        macaddr[1] = a & 0xff;
3223
3224        for (i = 2; i < 5; i++)
3225                macaddr[i] = hw_addr[i + 1];
3226
3227        for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3228                vf < nvfs; vf++) {
3229                macaddr[5] = adap->pf * nvfs + vf;
3230                ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3231        }
3232}
3233
3234static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3235{
3236        struct port_info *pi = netdev_priv(dev);
3237        struct adapter *adap = pi->adapter;
3238        int ret;
3239
3240        /* verify MAC addr is valid */
3241        if (!is_valid_ether_addr(mac)) {
3242                dev_err(pi->adapter->pdev_dev,
3243                        "Invalid Ethernet address %pM for VF %d\n",
3244                        mac, vf);
3245                return -EINVAL;
3246        }
3247
3248        dev_info(pi->adapter->pdev_dev,
3249                 "Setting MAC %pM on VF %d\n", mac, vf);
3250        ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3251        if (!ret)
3252                ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3253        return ret;
3254}
3255
3256static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3257                                    int vf, struct ifla_vf_info *ivi)
3258{
3259        struct port_info *pi = netdev_priv(dev);
3260        struct adapter *adap = pi->adapter;
3261        struct vf_info *vfinfo;
3262
3263        if (vf >= adap->num_vfs)
3264                return -EINVAL;
3265        vfinfo = &adap->vfinfo[vf];
3266
3267        ivi->vf = vf;
3268        ivi->max_tx_rate = vfinfo->tx_rate;
3269        ivi->min_tx_rate = 0;
3270        ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3271        ivi->vlan = vfinfo->vlan;
3272        ivi->linkstate = vfinfo->link_state;
3273        return 0;
3274}
3275
3276static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3277                                       struct netdev_phys_item_id *ppid)
3278{
3279        struct port_info *pi = netdev_priv(dev);
3280        unsigned int phy_port_id;
3281
3282        phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3283        ppid->id_len = sizeof(phy_port_id);
3284        memcpy(ppid->id, &phy_port_id, ppid->id_len);
3285        return 0;
3286}
3287
3288static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3289                                  int min_tx_rate, int max_tx_rate)
3290{
3291        struct port_info *pi = netdev_priv(dev);
3292        struct adapter *adap = pi->adapter;
3293        unsigned int link_ok, speed, mtu;
3294        u32 fw_pfvf, fw_class;
3295        int class_id = vf;
3296        int ret;
3297        u16 pktsize;
3298
3299        if (vf >= adap->num_vfs)
3300                return -EINVAL;
3301
3302        if (min_tx_rate) {
3303                dev_err(adap->pdev_dev,
3304                        "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3305                        min_tx_rate, vf);
3306                return -EINVAL;
3307        }
3308
3309        if (max_tx_rate == 0) {
3310                /* unbind VF to to any Traffic Class */
3311                fw_pfvf =
3312                    (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3313                     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3314                fw_class = 0xffffffff;
3315                ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3316                                    &fw_pfvf, &fw_class);
3317                if (ret) {
3318                        dev_err(adap->pdev_dev,
3319                                "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3320                                ret, adap->pf, vf);
3321                        return -EINVAL;
3322                }
3323                dev_info(adap->pdev_dev,
3324                         "PF %d VF %d is unbound from TX Rate Limiting\n",
3325                         adap->pf, vf);
3326                adap->vfinfo[vf].tx_rate = 0;
3327                return 0;
3328        }
3329
3330        ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3331        if (ret != FW_SUCCESS) {
3332                dev_err(adap->pdev_dev,
3333                        "Failed to get link information for VF %d\n", vf);
3334                return -EINVAL;
3335        }
3336
3337        if (!link_ok) {
3338                dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3339                return -EINVAL;
3340        }
3341
3342        if (max_tx_rate > speed) {
3343                dev_err(adap->pdev_dev,
3344                        "Max tx rate %d for VF %d can't be > link-speed %u",
3345                        max_tx_rate, vf, speed);
3346                return -EINVAL;
3347        }
3348
3349        pktsize = mtu;
3350        /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3351        pktsize = pktsize - sizeof(struct ethhdr) - 4;
3352        /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3353        pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3354        /* configure Traffic Class for rate-limiting */
3355        ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3356                              SCHED_CLASS_LEVEL_CL_RL,
3357                              SCHED_CLASS_MODE_CLASS,
3358                              SCHED_CLASS_RATEUNIT_BITS,
3359                              SCHED_CLASS_RATEMODE_ABS,
3360                              pi->tx_chan, class_id, 0,
3361                              max_tx_rate * 1000, 0, pktsize, 0);
3362        if (ret) {
3363                dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3364                        ret);
3365                return -EINVAL;
3366        }
3367        dev_info(adap->pdev_dev,
3368                 "Class %d with MSS %u configured with rate %u\n",
3369                 class_id, pktsize, max_tx_rate);
3370
3371        /* bind VF to configured Traffic Class */
3372        fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3373                   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3374        fw_class = class_id;
3375        ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3376                            &fw_class);
3377        if (ret) {
3378                dev_err(adap->pdev_dev,
3379                        "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3380                        ret, adap->pf, vf, class_id);
3381                return -EINVAL;
3382        }
3383        dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3384                 adap->pf, vf, class_id);
3385        adap->vfinfo[vf].tx_rate = max_tx_rate;
3386        return 0;
3387}
3388
3389static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3390                                  u16 vlan, u8 qos, __be16 vlan_proto)
3391{
3392        struct port_info *pi = netdev_priv(dev);
3393        struct adapter *adap = pi->adapter;
3394        int ret;
3395
3396        if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3397                return -EINVAL;
3398
3399        if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3400                return -EPROTONOSUPPORT;
3401
3402        ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3403        if (!ret) {
3404                adap->vfinfo[vf].vlan = vlan;
3405                return 0;
3406        }
3407
3408        dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3409                ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3410        return ret;
3411}
3412
3413static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3414                                        int link)
3415{
3416        struct port_info *pi = netdev_priv(dev);
3417        struct adapter *adap = pi->adapter;
3418        u32 param, val;
3419        int ret = 0;
3420
3421        if (vf >= adap->num_vfs)
3422                return -EINVAL;
3423
3424        switch (link) {
3425        case IFLA_VF_LINK_STATE_AUTO:
3426                val = FW_VF_LINK_STATE_AUTO;
3427                break;
3428
3429        case IFLA_VF_LINK_STATE_ENABLE:
3430                val = FW_VF_LINK_STATE_ENABLE;
3431                break;
3432
3433        case IFLA_VF_LINK_STATE_DISABLE:
3434                val = FW_VF_LINK_STATE_DISABLE;
3435                break;
3436
3437        default:
3438                return -EINVAL;
3439        }
3440
3441        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3442                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3443        ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3444                            &param, &val);
3445        if (ret) {
3446                dev_err(adap->pdev_dev,
3447                        "Error %d in setting PF %d VF %d link state\n",
3448                        ret, adap->pf, vf);
3449                return -EINVAL;
3450        }
3451
3452        adap->vfinfo[vf].link_state = link;
3453        return ret;
3454}
3455#endif /* CONFIG_PCI_IOV */
3456
3457static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3458{
3459        int ret;
3460        struct sockaddr *addr = p;
3461        struct port_info *pi = netdev_priv(dev);
3462
3463        if (!is_valid_ether_addr(addr->sa_data))
3464                return -EADDRNOTAVAIL;
3465
3466        ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3467                                    addr->sa_data, true, &pi->smt_idx);
3468        if (ret < 0)
3469                return ret;
3470
3471        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3472        return 0;
3473}
3474
3475#ifdef CONFIG_NET_POLL_CONTROLLER
3476static void cxgb_netpoll(struct net_device *dev)
3477{
3478        struct port_info *pi = netdev_priv(dev);
3479        struct adapter *adap = pi->adapter;
3480
3481        if (adap->flags & CXGB4_USING_MSIX) {
3482                int i;
3483                struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3484
3485                for (i = pi->nqsets; i; i--, rx++)
3486                        t4_sge_intr_msix(0, &rx->rspq);
3487        } else
3488                t4_intr_handler(adap)(0, adap);
3489}
3490#endif
3491
3492static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3493{
3494        struct port_info *pi = netdev_priv(dev);
3495        struct adapter *adap = pi->adapter;
3496        struct ch_sched_queue qe = { 0 };
3497        struct ch_sched_params p = { 0 };
3498        struct sched_class *e;
3499        u32 req_rate;
3500        int err = 0;
3501
3502        if (!can_sched(dev))
3503                return -ENOTSUPP;
3504
3505        if (index < 0 || index > pi->nqsets - 1)
3506                return -EINVAL;
3507
3508        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3509                dev_err(adap->pdev_dev,
3510                        "Failed to rate limit on queue %d. Link Down?\n",
3511                        index);
3512                return -EINVAL;
3513        }
3514
3515        qe.queue = index;
3516        e = cxgb4_sched_queue_lookup(dev, &qe);
3517        if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3518                dev_err(adap->pdev_dev,
3519                        "Queue %u already bound to class %u of type: %u\n",
3520                        index, e->idx, e->info.u.params.level);
3521                return -EBUSY;
3522        }
3523
3524        /* Convert from Mbps to Kbps */
3525        req_rate = rate * 1000;
3526
3527        /* Max rate is 100 Gbps */
3528        if (req_rate > SCHED_MAX_RATE_KBPS) {
3529                dev_err(adap->pdev_dev,
3530                        "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3531                        rate, SCHED_MAX_RATE_KBPS / 1000);
3532                return -ERANGE;
3533        }
3534
3535        /* First unbind the queue from any existing class */
3536        memset(&qe, 0, sizeof(qe));
3537        qe.queue = index;
3538        qe.class = SCHED_CLS_NONE;
3539
3540        err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3541        if (err) {
3542                dev_err(adap->pdev_dev,
3543                        "Unbinding Queue %d on port %d fail. Err: %d\n",
3544                        index, pi->port_id, err);
3545                return err;
3546        }
3547
3548        /* Queue already unbound */
3549        if (!req_rate)
3550                return 0;
3551
3552        /* Fetch any available unused or matching scheduling class */
3553        p.type = SCHED_CLASS_TYPE_PACKET;
3554        p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
3555        p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
3556        p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3557        p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3558        p.u.params.channel  = pi->tx_chan;
3559        p.u.params.class    = SCHED_CLS_NONE;
3560        p.u.params.minrate  = 0;
3561        p.u.params.maxrate  = req_rate;
3562        p.u.params.weight   = 0;
3563        p.u.params.pktsize  = dev->mtu;
3564
3565        e = cxgb4_sched_class_alloc(dev, &p);
3566        if (!e)
3567                return -ENOMEM;
3568
3569        /* Bind the queue to a scheduling class */
3570        memset(&qe, 0, sizeof(qe));
3571        qe.queue = index;
3572        qe.class = e->idx;
3573
3574        err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3575        if (err)
3576                dev_err(adap->pdev_dev,
3577                        "Queue rate limiting failed. Err: %d\n", err);
3578        return err;
3579}
3580
3581static int cxgb_setup_tc_flower(struct net_device *dev,
3582                                struct flow_cls_offload *cls_flower)
3583{
3584        switch (cls_flower->command) {
3585        case FLOW_CLS_REPLACE:
3586                return cxgb4_tc_flower_replace(dev, cls_flower);
3587        case FLOW_CLS_DESTROY:
3588                return cxgb4_tc_flower_destroy(dev, cls_flower);
3589        case FLOW_CLS_STATS:
3590                return cxgb4_tc_flower_stats(dev, cls_flower);
3591        default:
3592                return -EOPNOTSUPP;
3593        }
3594}
3595
3596static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3597                                 struct tc_cls_u32_offload *cls_u32)
3598{
3599        switch (cls_u32->command) {
3600        case TC_CLSU32_NEW_KNODE:
3601        case TC_CLSU32_REPLACE_KNODE:
3602                return cxgb4_config_knode(dev, cls_u32);
3603        case TC_CLSU32_DELETE_KNODE:
3604                return cxgb4_delete_knode(dev, cls_u32);
3605        default:
3606                return -EOPNOTSUPP;
3607        }
3608}
3609
3610static int cxgb_setup_tc_matchall(struct net_device *dev,
3611                                  struct tc_cls_matchall_offload *cls_matchall,
3612                                  bool ingress)
3613{
3614        struct adapter *adap = netdev2adap(dev);
3615
3616        if (!adap->tc_matchall)
3617                return -ENOMEM;
3618
3619        switch (cls_matchall->command) {
3620        case TC_CLSMATCHALL_REPLACE:
3621                return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3622        case TC_CLSMATCHALL_DESTROY:
3623                return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3624        case TC_CLSMATCHALL_STATS:
3625                if (ingress)
3626                        return cxgb4_tc_matchall_stats(dev, cls_matchall);
3627                break;
3628        default:
3629                break;
3630        }
3631
3632        return -EOPNOTSUPP;
3633}
3634
3635static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3636                                          void *type_data, void *cb_priv)
3637{
3638        struct net_device *dev = cb_priv;
3639        struct port_info *pi = netdev2pinfo(dev);
3640        struct adapter *adap = netdev2adap(dev);
3641
3642        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3643                dev_err(adap->pdev_dev,
3644                        "Failed to setup tc on port %d. Link Down?\n",
3645                        pi->port_id);
3646                return -EINVAL;
3647        }
3648
3649        if (!tc_cls_can_offload_and_chain0(dev, type_data))
3650                return -EOPNOTSUPP;
3651
3652        switch (type) {
3653        case TC_SETUP_CLSU32:
3654                return cxgb_setup_tc_cls_u32(dev, type_data);
3655        case TC_SETUP_CLSFLOWER:
3656                return cxgb_setup_tc_flower(dev, type_data);
3657        case TC_SETUP_CLSMATCHALL:
3658                return cxgb_setup_tc_matchall(dev, type_data, true);
3659        default:
3660                return -EOPNOTSUPP;
3661        }
3662}
3663
3664static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3665                                         void *type_data, void *cb_priv)
3666{
3667        struct net_device *dev = cb_priv;
3668        struct port_info *pi = netdev2pinfo(dev);
3669        struct adapter *adap = netdev2adap(dev);
3670
3671        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3672                dev_err(adap->pdev_dev,
3673                        "Failed to setup tc on port %d. Link Down?\n",
3674                        pi->port_id);
3675                return -EINVAL;
3676        }
3677
3678        if (!tc_cls_can_offload_and_chain0(dev, type_data))
3679                return -EOPNOTSUPP;
3680
3681        switch (type) {
3682        case TC_SETUP_CLSMATCHALL:
3683                return cxgb_setup_tc_matchall(dev, type_data, false);
3684        default:
3685                break;
3686        }
3687
3688        return -EOPNOTSUPP;
3689}
3690
3691static int cxgb_setup_tc_mqprio(struct net_device *dev,
3692                                struct tc_mqprio_qopt_offload *mqprio)
3693{
3694        struct adapter *adap = netdev2adap(dev);
3695
3696        if (!is_ethofld(adap) || !adap->tc_mqprio)
3697                return -ENOMEM;
3698
3699        return cxgb4_setup_tc_mqprio(dev, mqprio);
3700}
3701
3702static LIST_HEAD(cxgb_block_cb_list);
3703
3704static int cxgb_setup_tc_block(struct net_device *dev,
3705                               struct flow_block_offload *f)
3706{
3707        struct port_info *pi = netdev_priv(dev);
3708        flow_setup_cb_t *cb;
3709        bool ingress_only;
3710
3711        pi->tc_block_shared = f->block_shared;
3712        if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3713                cb = cxgb_setup_tc_block_egress_cb;
3714                ingress_only = false;
3715        } else {
3716                cb = cxgb_setup_tc_block_ingress_cb;
3717                ingress_only = true;
3718        }
3719
3720        return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3721                                          cb, pi, dev, ingress_only);
3722}
3723
3724static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3725                         void *type_data)
3726{
3727        switch (type) {
3728        case TC_SETUP_QDISC_MQPRIO:
3729                return cxgb_setup_tc_mqprio(dev, type_data);
3730        case TC_SETUP_BLOCK:
3731                return cxgb_setup_tc_block(dev, type_data);
3732        default:
3733                return -EOPNOTSUPP;
3734        }
3735}
3736
3737static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3738                                      unsigned int table, unsigned int entry,
3739                                      struct udp_tunnel_info *ti)
3740{
3741        struct port_info *pi = netdev_priv(netdev);
3742        struct adapter *adapter = pi->adapter;
3743        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3744        int ret = 0, i;
3745
3746        switch (ti->type) {
3747        case UDP_TUNNEL_TYPE_VXLAN:
3748                adapter->vxlan_port = 0;
3749                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3750                break;
3751        case UDP_TUNNEL_TYPE_GENEVE:
3752                adapter->geneve_port = 0;
3753                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3754                break;
3755        default:
3756                return -EINVAL;
3757        }
3758
3759        /* Matchall mac entries can be deleted only after all tunnel ports
3760         * are brought down or removed.
3761         */
3762        if (!adapter->rawf_cnt)
3763                return 0;
3764        for_each_port(adapter, i) {
3765                pi = adap2pinfo(adapter, i);
3766                ret = t4_free_raw_mac_filt(adapter, pi->viid,
3767                                           match_all_mac, match_all_mac,
3768                                           adapter->rawf_start + pi->port_id,
3769                                           1, pi->port_id, false);
3770                if (ret < 0) {
3771                        netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3772                                    i);
3773                        return ret;
3774                }
3775        }
3776
3777        return 0;
3778}
3779
3780static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3781                                    unsigned int table, unsigned int entry,
3782                                    struct udp_tunnel_info *ti)
3783{
3784        struct port_info *pi = netdev_priv(netdev);
3785        struct adapter *adapter = pi->adapter;
3786        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3787        int i, ret;
3788
3789        switch (ti->type) {
3790        case UDP_TUNNEL_TYPE_VXLAN:
3791                adapter->vxlan_port = ti->port;
3792                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3793                             VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3794                break;
3795        case UDP_TUNNEL_TYPE_GENEVE:
3796                adapter->geneve_port = ti->port;
3797                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3798                             GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3799                break;
3800        default:
3801                return -EINVAL;
3802        }
3803
3804        /* Create a 'match all' mac filter entry for inner mac,
3805         * if raw mac interface is supported. Once the linux kernel provides
3806         * driver entry points for adding/deleting the inner mac addresses,
3807         * we will remove this 'match all' entry and fallback to adding
3808         * exact match filters.
3809         */
3810        for_each_port(adapter, i) {
3811                pi = adap2pinfo(adapter, i);
3812
3813                ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3814                                            match_all_mac,
3815                                            match_all_mac,
3816                                            adapter->rawf_start + pi->port_id,
3817                                            1, pi->port_id, false);
3818                if (ret < 0) {
3819                        netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3820                                    be16_to_cpu(ti->port));
3821                        return ret;
3822                }
3823        }
3824
3825        return 0;
3826}
3827
3828static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3829        .set_port       = cxgb_udp_tunnel_set_port,
3830        .unset_port     = cxgb_udp_tunnel_unset_port,
3831        .tables         = {
3832                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
3833                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },