linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/bitmap.h>
  38#include <linux/crc32.h>
  39#include <linux/ctype.h>
  40#include <linux/debugfs.h>
  41#include <linux/err.h>
  42#include <linux/etherdevice.h>
  43#include <linux/firmware.h>
  44#include <linux/if.h>
  45#include <linux/if_vlan.h>
  46#include <linux/init.h>
  47#include <linux/log2.h>
  48#include <linux/mdio.h>
  49#include <linux/module.h>
  50#include <linux/moduleparam.h>
  51#include <linux/mutex.h>
  52#include <linux/netdevice.h>
  53#include <linux/pci.h>
  54#include <linux/aer.h>
  55#include <linux/rtnetlink.h>
  56#include <linux/sched.h>
  57#include <linux/seq_file.h>
  58#include <linux/sockios.h>
  59#include <linux/vmalloc.h>
  60#include <linux/workqueue.h>
  61#include <net/neighbour.h>
  62#include <net/netevent.h>
  63#include <net/addrconf.h>
  64#include <net/bonding.h>
  65#include <linux/uaccess.h>
  66#include <linux/crash_dump.h>
  67#include <net/udp_tunnel.h>
  68#include <net/xfrm.h>
  69#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
  70#include <net/tls.h>
  71#endif
  72
  73#include "cxgb4.h"
  74#include "cxgb4_filter.h"
  75#include "t4_regs.h"
  76#include "t4_values.h"
  77#include "t4_msg.h"
  78#include "t4fw_api.h"
  79#include "t4fw_version.h"
  80#include "cxgb4_dcb.h"
  81#include "srq.h"
  82#include "cxgb4_debugfs.h"
  83#include "clip_tbl.h"
  84#include "l2t.h"
  85#include "smt.h"
  86#include "sched.h"
  87#include "cxgb4_tc_u32.h"
  88#include "cxgb4_tc_flower.h"
  89#include "cxgb4_tc_mqprio.h"
  90#include "cxgb4_tc_matchall.h"
  91#include "cxgb4_ptp.h"
  92#include "cxgb4_cudbg.h"
  93
  94char cxgb4_driver_name[] = KBUILD_MODNAME;
  95
  96#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
  97
  98#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  99                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 100                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 101
 102/* Macros needed to support the PCI Device ID Table ...
 103 */
 104#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
 105        static const struct pci_device_id cxgb4_pci_tbl[] = {
 106#define CXGB4_UNIFIED_PF 0x4
 107
 108#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
 109
 110/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
 111 * called for both.
 112 */
 113#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
 114
 115#define CH_PCI_ID_TABLE_ENTRY(devid) \
 116                {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
 117
 118#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
 119                { 0, } \
 120        }
 121
 122#include "t4_pci_id_tbl.h"
 123
 124#define FW4_FNAME "cxgb4/t4fw.bin"
 125#define FW5_FNAME "cxgb4/t5fw.bin"
 126#define FW6_FNAME "cxgb4/t6fw.bin"
 127#define FW4_CFNAME "cxgb4/t4-config.txt"
 128#define FW5_CFNAME "cxgb4/t5-config.txt"
 129#define FW6_CFNAME "cxgb4/t6-config.txt"
 130#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
 131#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
 132#define PHY_AQ1202_DEVICEID 0x4409
 133#define PHY_BCM84834_DEVICEID 0x4486
 134
 135MODULE_DESCRIPTION(DRV_DESC);
 136MODULE_AUTHOR("Chelsio Communications");
 137MODULE_LICENSE("Dual BSD/GPL");
 138MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 139MODULE_FIRMWARE(FW4_FNAME);
 140MODULE_FIRMWARE(FW5_FNAME);
 141MODULE_FIRMWARE(FW6_FNAME);
 142
 143/*
 144 * The driver uses the best interrupt scheme available on a platform in the
 145 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 146 * of these schemes the driver may consider as follows:
 147 *
 148 * msi = 2: choose from among all three options
 149 * msi = 1: only consider MSI and INTx interrupts
 150 * msi = 0: force INTx interrupts
 151 */
 152static int msi = 2;
 153
 154module_param(msi, int, 0644);
 155MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
 156
 157/*
 158 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 159 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 160 * boundaries.  This is a requirement for many architectures which will throw
 161 * a machine check fault if an attempt is made to access one of the 4-byte IP
 162 * header fields on a non-4-byte boundary.  And it's a major performance issue
 163 * even on some architectures which allow it like some implementations of the
 164 * x86 ISA.  However, some architectures don't mind this and for some very
 165 * edge-case performance sensitive applications (like forwarding large volumes
 166 * of small packets), setting this DMA offset to 0 will decrease the number of
 167 * PCI-E Bus transfers enough to measurably affect performance.
 168 */
 169static int rx_dma_offset = 2;
 170
 171/* TX Queue select used to determine what algorithm to use for selecting TX
 172 * queue. Select between the kernel provided function (select_queue=0) or user
 173 * cxgb_select_queue function (select_queue=1)
 174 *
 175 * Default: select_queue=0
 176 */
 177static int select_queue;
 178module_param(select_queue, int, 0644);
 179MODULE_PARM_DESC(select_queue,
 180                 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
 181
 182static struct dentry *cxgb4_debugfs_root;
 183
 184LIST_HEAD(adapter_list);
 185DEFINE_MUTEX(uld_mutex);
 186LIST_HEAD(uld_list);
 187
 188static int cfg_queues(struct adapter *adap);
 189
 190static void link_report(struct net_device *dev)
 191{
 192        if (!netif_carrier_ok(dev))
 193                netdev_info(dev, "link down\n");
 194        else {
 195                static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 196
 197                const char *s;
 198                const struct port_info *p = netdev_priv(dev);
 199
 200                switch (p->link_cfg.speed) {
 201                case 100:
 202                        s = "100Mbps";
 203                        break;
 204                case 1000:
 205                        s = "1Gbps";
 206                        break;
 207                case 10000:
 208                        s = "10Gbps";
 209                        break;
 210                case 25000:
 211                        s = "25Gbps";
 212                        break;
 213                case 40000:
 214                        s = "40Gbps";
 215                        break;
 216                case 50000:
 217                        s = "50Gbps";
 218                        break;
 219                case 100000:
 220                        s = "100Gbps";
 221                        break;
 222                default:
 223                        pr_info("%s: unsupported speed: %d\n",
 224                                dev->name, p->link_cfg.speed);
 225                        return;
 226                }
 227
 228                netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
 229                            fc[p->link_cfg.fc]);
 230        }
 231}
 232
 233#ifdef CONFIG_CHELSIO_T4_DCB
 234/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
 235static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
 236{
 237        struct port_info *pi = netdev_priv(dev);
 238        struct adapter *adap = pi->adapter;
 239        struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
 240        int i;
 241
 242        /* We use a simple mapping of Port TX Queue Index to DCB
 243         * Priority when we're enabling DCB.
 244         */
 245        for (i = 0; i < pi->nqsets; i++, txq++) {
 246                u32 name, value;
 247                int err;
 248
 249                name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
 250                        FW_PARAMS_PARAM_X_V(
 251                                FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
 252                        FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
 253                value = enable ? i : 0xffffffff;
 254
 255                /* Since we can be called while atomic (from "interrupt
 256                 * level") we need to issue the Set Parameters Commannd
 257                 * without sleeping (timeout < 0).
 258                 */
 259                err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
 260                                            &name, &value,
 261                                            -FW_CMD_MAX_TIMEOUT);
 262
 263                if (err)
 264                        dev_err(adap->pdev_dev,
 265                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
 266                                enable ? "set" : "unset", pi->port_id, i, -err);
 267                else
 268                        txq->dcb_prio = enable ? value : 0;
 269        }
 270}
 271
 272int cxgb4_dcb_enabled(const struct net_device *dev)
 273{
 274        struct port_info *pi = netdev_priv(dev);
 275
 276        if (!pi->dcb.enabled)
 277                return 0;
 278
 279        return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
 280                (pi->dcb.state == CXGB4_DCB_STATE_HOST));
 281}
 282#endif /* CONFIG_CHELSIO_T4_DCB */
 283
 284void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 285{
 286        struct net_device *dev = adapter->port[port_id];
 287
 288        /* Skip changes from disabled ports. */
 289        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
 290                if (link_stat)
 291                        netif_carrier_on(dev);
 292                else {
 293#ifdef CONFIG_CHELSIO_T4_DCB
 294                        if (cxgb4_dcb_enabled(dev)) {
 295                                cxgb4_dcb_reset(dev);
 296                                dcb_tx_queue_prio_enable(dev, false);
 297                        }
 298#endif /* CONFIG_CHELSIO_T4_DCB */
 299                        netif_carrier_off(dev);
 300                }
 301
 302                link_report(dev);
 303        }
 304}
 305
 306void t4_os_portmod_changed(struct adapter *adap, int port_id)
 307{
 308        static const char *mod_str[] = {
 309                NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 310        };
 311
 312        struct net_device *dev = adap->port[port_id];
 313        struct port_info *pi = netdev_priv(dev);
 314
 315        if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 316                netdev_info(dev, "port module unplugged\n");
 317        else if (pi->mod_type < ARRAY_SIZE(mod_str))
 318                netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 319        else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
 320                netdev_info(dev, "%s: unsupported port module inserted\n",
 321                            dev->name);
 322        else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
 323                netdev_info(dev, "%s: unknown port module inserted\n",
 324                            dev->name);
 325        else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
 326                netdev_info(dev, "%s: transceiver module error\n", dev->name);
 327        else
 328                netdev_info(dev, "%s: unknown module type %d inserted\n",
 329                            dev->name, pi->mod_type);
 330
 331        /* If the interface is running, then we'll need any "sticky" Link
 332         * Parameters redone with a new Transceiver Module.
 333         */
 334        pi->link_cfg.redo_l1cfg = netif_running(dev);
 335}
 336
 337int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
 338module_param(dbfifo_int_thresh, int, 0644);
 339MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
 340
 341/*
 342 * usecs to sleep while draining the dbfifo
 343 */
 344static int dbfifo_drain_delay = 1000;
 345module_param(dbfifo_drain_delay, int, 0644);
 346MODULE_PARM_DESC(dbfifo_drain_delay,
 347                 "usecs to sleep while draining the dbfifo");
 348
 349static inline int cxgb4_set_addr_hash(struct port_info *pi)
 350{
 351        struct adapter *adap = pi->adapter;
 352        u64 vec = 0;
 353        bool ucast = false;
 354        struct hash_mac_addr *entry;
 355
 356        /* Calculate the hash vector for the updated list and program it */
 357        list_for_each_entry(entry, &adap->mac_hlist, list) {
 358                ucast |= is_unicast_ether_addr(entry->addr);
 359                vec |= (1ULL << hash_mac_addr(entry->addr));
 360        }
 361        return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
 362                                vec, false);
 363}
 364
 365static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
 366{
 367        struct port_info *pi = netdev_priv(netdev);
 368        struct adapter *adap = pi->adapter;
 369        int ret;
 370        u64 mhash = 0;
 371        u64 uhash = 0;
 372        /* idx stores the index of allocated filters,
 373         * its size should be modified based on the number of
 374         * MAC addresses that we allocate filters for
 375         */
 376
 377        u16 idx[1] = {};
 378        bool free = false;
 379        bool ucast = is_unicast_ether_addr(mac_addr);
 380        const u8 *maclist[1] = {mac_addr};
 381        struct hash_mac_addr *new_entry;
 382
 383        ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
 384                                   idx, ucast ? &uhash : &mhash, false);
 385        if (ret < 0)
 386                goto out;
 387        /* if hash != 0, then add the addr to hash addr list
 388         * so on the end we will calculate the hash for the
 389         * list and program it
 390         */
 391        if (uhash || mhash) {
 392                new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
 393                if (!new_entry)
 394                        return -ENOMEM;
 395                ether_addr_copy(new_entry->addr, mac_addr);
 396                list_add_tail(&new_entry->list, &adap->mac_hlist);
 397                ret = cxgb4_set_addr_hash(pi);
 398        }
 399out:
 400        return ret < 0 ? ret : 0;
 401}
 402
 403static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
 404{
 405        struct port_info *pi = netdev_priv(netdev);
 406        struct adapter *adap = pi->adapter;
 407        int ret;
 408        const u8 *maclist[1] = {mac_addr};
 409        struct hash_mac_addr *entry, *tmp;
 410
 411        /* If the MAC address to be removed is in the hash addr
 412         * list, delete it from the list and update hash vector
 413         */
 414        list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
 415                if (ether_addr_equal(entry->addr, mac_addr)) {
 416                        list_del(&entry->list);
 417                        kfree(entry);
 418                        return cxgb4_set_addr_hash(pi);
 419                }
 420        }
 421
 422        ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
 423        return ret < 0 ? -EINVAL : 0;
 424}
 425
 426/*
 427 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 428 * If @mtu is -1 it is left unchanged.
 429 */
 430static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 431{
 432        struct port_info *pi = netdev_priv(dev);
 433        struct adapter *adapter = pi->adapter;
 434
 435        __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
 436        __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
 437
 438        return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
 439                             mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
 440                             (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 441                             sleep_ok);
 442}
 443
 444/**
 445 *      cxgb4_change_mac - Update match filter for a MAC address.
 446 *      @pi: the port_info
 447 *      @viid: the VI id
 448 *      @tcam_idx: TCAM index of existing filter for old value of MAC address,
 449 *                 or -1
 450 *      @addr: the new MAC address value
 451 *      @persist: whether a new MAC allocation should be persistent
 452 *      @smt_idx: the destination to store the new SMT index.
 453 *
 454 *      Modifies an MPS filter and sets it to the new MAC address if
 455 *      @tcam_idx >= 0, or adds the MAC address to a new filter if
 456 *      @tcam_idx < 0. In the latter case the address is added persistently
 457 *      if @persist is %true.
 458 *      Addresses are programmed to hash region, if tcam runs out of entries.
 459 *
 460 */
 461int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
 462                     int *tcam_idx, const u8 *addr, bool persist,
 463                     u8 *smt_idx)
 464{
 465        struct adapter *adapter = pi->adapter;
 466        struct hash_mac_addr *entry, *new_entry;
 467        int ret;
 468
 469        ret = t4_change_mac(adapter, adapter->mbox, viid,
 470                            *tcam_idx, addr, persist, smt_idx);
 471        /* We ran out of TCAM entries. try programming hash region. */
 472        if (ret == -ENOMEM) {
 473                /* If the MAC address to be updated is in the hash addr
 474                 * list, update it from the list
 475                 */
 476                list_for_each_entry(entry, &adapter->mac_hlist, list) {
 477                        if (entry->iface_mac) {
 478                                ether_addr_copy(entry->addr, addr);
 479                                goto set_hash;
 480                        }
 481                }
 482                new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
 483                if (!new_entry)
 484                        return -ENOMEM;
 485                ether_addr_copy(new_entry->addr, addr);
 486                new_entry->iface_mac = true;
 487                list_add_tail(&new_entry->list, &adapter->mac_hlist);
 488set_hash:
 489                ret = cxgb4_set_addr_hash(pi);
 490        } else if (ret >= 0) {
 491                *tcam_idx = ret;
 492                ret = 0;
 493        }
 494
 495        return ret;
 496}
 497
 498/*
 499 *      link_start - enable a port
 500 *      @dev: the port to enable
 501 *
 502 *      Performs the MAC and PHY actions needed to enable a port.
 503 */
 504static int link_start(struct net_device *dev)
 505{
 506        struct port_info *pi = netdev_priv(dev);
 507        unsigned int mb = pi->adapter->mbox;
 508        int ret;
 509
 510        /*
 511         * We do not set address filters and promiscuity here, the stack does
 512         * that step explicitly.
 513         */
 514        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
 515                            dev->mtu, -1, -1, -1,
 516                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
 517        if (ret == 0)
 518                ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
 519                                            dev->dev_addr, true, &pi->smt_idx);
 520        if (ret == 0)
 521                ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
 522                                    &pi->link_cfg);
 523        if (ret == 0) {
 524                local_bh_disable();
 525                ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
 526                                          true, CXGB4_DCB_ENABLED);
 527                local_bh_enable();
 528        }
 529
 530        return ret;
 531}
 532
 533#ifdef CONFIG_CHELSIO_T4_DCB
 534/* Handle a Data Center Bridging update message from the firmware. */
 535static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
 536{
 537        int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
 538        struct net_device *dev = adap->port[adap->chan_map[port]];
 539        int old_dcb_enabled = cxgb4_dcb_enabled(dev);
 540        int new_dcb_enabled;
 541
 542        cxgb4_dcb_handle_fw_update(adap, pcmd);
 543        new_dcb_enabled = cxgb4_dcb_enabled(dev);
 544
 545        /* If the DCB has become enabled or disabled on the port then we're
 546         * going to need to set up/tear down DCB Priority parameters for the
 547         * TX Queues associated with the port.
 548         */
 549        if (new_dcb_enabled != old_dcb_enabled)
 550                dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
 551}
 552#endif /* CONFIG_CHELSIO_T4_DCB */
 553
 554/* Response queue handler for the FW event queue.
 555 */
 556static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 557                          const struct pkt_gl *gl)
 558{
 559        u8 opcode = ((const struct rss_header *)rsp)->opcode;
 560
 561        rsp++;                                          /* skip RSS header */
 562
 563        /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
 564         */
 565        if (unlikely(opcode == CPL_FW4_MSG &&
 566           ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
 567                rsp++;
 568                opcode = ((const struct rss_header *)rsp)->opcode;
 569                rsp++;
 570                if (opcode != CPL_SGE_EGR_UPDATE) {
 571                        dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
 572                                , opcode);
 573                        goto out;
 574                }
 575        }
 576
 577        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
 578                const struct cpl_sge_egr_update *p = (void *)rsp;
 579                unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
 580                struct sge_txq *txq;
 581
 582                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
 583                txq->restarts++;
 584                if (txq->q_type == CXGB4_TXQ_ETH) {
 585                        struct sge_eth_txq *eq;
 586
 587                        eq = container_of(txq, struct sge_eth_txq, q);
 588                        t4_sge_eth_txq_egress_update(q->adap, eq, -1);
 589                } else {
 590                        struct sge_uld_txq *oq;
 591
 592                        oq = container_of(txq, struct sge_uld_txq, q);
 593                        tasklet_schedule(&oq->qresume_tsk);
 594                }
 595        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 596                const struct cpl_fw6_msg *p = (void *)rsp;
 597
 598#ifdef CONFIG_CHELSIO_T4_DCB
 599                const struct fw_port_cmd *pcmd = (const void *)p->data;
 600                unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
 601                unsigned int action =
 602                        FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
 603
 604                if (cmd == FW_PORT_CMD &&
 605                    (action == FW_PORT_ACTION_GET_PORT_INFO ||
 606                     action == FW_PORT_ACTION_GET_PORT_INFO32)) {
 607                        int port = FW_PORT_CMD_PORTID_G(
 608                                        be32_to_cpu(pcmd->op_to_portid));
 609                        struct net_device *dev;
 610                        int dcbxdis, state_input;
 611
 612                        dev = q->adap->port[q->adap->chan_map[port]];
 613                        dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
 614                          ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
 615                          : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
 616                               & FW_PORT_CMD_DCBXDIS32_F));
 617                        state_input = (dcbxdis
 618                                       ? CXGB4_DCB_INPUT_FW_DISABLED
 619                                       : CXGB4_DCB_INPUT_FW_ENABLED);
 620
 621                        cxgb4_dcb_state_fsm(dev, state_input);
 622                }
 623
 624                if (cmd == FW_PORT_CMD &&
 625                    action == FW_PORT_ACTION_L2_DCB_CFG)
 626                        dcb_rpl(q->adap, pcmd);
 627                else
 628#endif
 629                        if (p->type == 0)
 630                                t4_handle_fw_rpl(q->adap, p->data);
 631        } else if (opcode == CPL_L2T_WRITE_RPL) {
 632                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 633
 634                do_l2t_write_rpl(q->adap, p);
 635        } else if (opcode == CPL_SMT_WRITE_RPL) {
 636                const struct cpl_smt_write_rpl *p = (void *)rsp;
 637
 638                do_smt_write_rpl(q->adap, p);
 639        } else if (opcode == CPL_SET_TCB_RPL) {
 640                const struct cpl_set_tcb_rpl *p = (void *)rsp;
 641
 642                filter_rpl(q->adap, p);
 643        } else if (opcode == CPL_ACT_OPEN_RPL) {
 644                const struct cpl_act_open_rpl *p = (void *)rsp;
 645
 646                hash_filter_rpl(q->adap, p);
 647        } else if (opcode == CPL_ABORT_RPL_RSS) {
 648                const struct cpl_abort_rpl_rss *p = (void *)rsp;
 649
 650                hash_del_filter_rpl(q->adap, p);
 651        } else if (opcode == CPL_SRQ_TABLE_RPL) {
 652                const struct cpl_srq_table_rpl *p = (void *)rsp;
 653
 654                do_srq_table_rpl(q->adap, p);
 655        } else
 656                dev_err(q->adap->pdev_dev,
 657                        "unexpected CPL %#x on FW event queue\n", opcode);
 658out:
 659        return 0;
 660}
 661
 662static void disable_msi(struct adapter *adapter)
 663{
 664        if (adapter->flags & CXGB4_USING_MSIX) {
 665                pci_disable_msix(adapter->pdev);
 666                adapter->flags &= ~CXGB4_USING_MSIX;
 667        } else if (adapter->flags & CXGB4_USING_MSI) {
 668                pci_disable_msi(adapter->pdev);
 669                adapter->flags &= ~CXGB4_USING_MSI;
 670        }
 671}
 672
 673/*
 674 * Interrupt handler for non-data events used with MSI-X.
 675 */
 676static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 677{
 678        struct adapter *adap = cookie;
 679        u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
 680
 681        if (v & PFSW_F) {
 682                adap->swintr = 1;
 683                t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
 684        }
 685        if (adap->flags & CXGB4_MASTER_PF)
 686                t4_slow_intr_handler(adap);
 687        return IRQ_HANDLED;
 688}
 689
 690int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
 691                       cpumask_var_t *aff_mask, int idx)
 692{
 693        int rv;
 694
 695        if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
 696                dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
 697                return -ENOMEM;
 698        }
 699
 700        cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
 701                        *aff_mask);
 702
 703        rv = irq_set_affinity_hint(vec, *aff_mask);
 704        if (rv)
 705                dev_warn(adap->pdev_dev,
 706                         "irq_set_affinity_hint %u failed %d\n",
 707                         vec, rv);
 708
 709        return 0;
 710}
 711
 712void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
 713{
 714        irq_set_affinity_hint(vec, NULL);
 715        free_cpumask_var(aff_mask);
 716}
 717
 718static int request_msix_queue_irqs(struct adapter *adap)
 719{
 720        struct sge *s = &adap->sge;
 721        struct msix_info *minfo;
 722        int err, ethqidx;
 723
 724        if (s->fwevtq_msix_idx < 0)
 725                return -ENOMEM;
 726
 727        err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
 728                          t4_sge_intr_msix, 0,
 729                          adap->msix_info[s->fwevtq_msix_idx].desc,
 730                          &s->fw_evtq);
 731        if (err)
 732                return err;
 733
 734        for_each_ethrxq(s, ethqidx) {
 735                minfo = s->ethrxq[ethqidx].msix;
 736                err = request_irq(minfo->vec,
 737                                  t4_sge_intr_msix, 0,
 738                                  minfo->desc,
 739                                  &s->ethrxq[ethqidx].rspq);
 740                if (err)
 741                        goto unwind;
 742
 743                cxgb4_set_msix_aff(adap, minfo->vec,
 744                                   &minfo->aff_mask, ethqidx);
 745        }
 746        return 0;
 747
 748unwind:
 749        while (--ethqidx >= 0) {
 750                minfo = s->ethrxq[ethqidx].msix;
 751                cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
 752                free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
 753        }
 754        free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
 755        return err;
 756}
 757
 758static void free_msix_queue_irqs(struct adapter *adap)
 759{
 760        struct sge *s = &adap->sge;
 761        struct msix_info *minfo;
 762        int i;
 763
 764        free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
 765        for_each_ethrxq(s, i) {
 766                minfo = s->ethrxq[i].msix;
 767                cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
 768                free_irq(minfo->vec, &s->ethrxq[i].rspq);
 769        }
 770}
 771
 772static int setup_ppod_edram(struct adapter *adap)
 773{
 774        unsigned int param, val;
 775        int ret;
 776
 777        /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
 778         * if firmware supports ppod edram feature or not. If firmware
 779         * returns 1, then driver can enable this feature by sending
 780         * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
 781         * enable ppod edram feature.
 782         */
 783        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
 784                FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
 785
 786        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
 787        if (ret < 0) {
 788                dev_warn(adap->pdev_dev,
 789                         "querying PPOD_EDRAM support failed: %d\n",
 790                         ret);
 791                return -1;
 792        }
 793
 794        if (val != 1)
 795                return -1;
 796
 797        ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
 798        if (ret < 0) {
 799                dev_err(adap->pdev_dev,
 800                        "setting PPOD_EDRAM failed: %d\n", ret);
 801                return -1;
 802        }
 803        return 0;
 804}
 805
 806static void adap_config_hpfilter(struct adapter *adapter)
 807{
 808        u32 param, val = 0;
 809        int ret;
 810
 811        /* Enable HP filter region. Older fw will fail this request and
 812         * it is fine.
 813         */
 814        param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
 815        ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
 816                            1, &param, &val);
 817
 818        /* An error means FW doesn't know about HP filter support,
 819         * it's not a problem, don't return an error.
 820         */
 821        if (ret < 0)
 822                dev_err(adapter->pdev_dev,
 823                        "HP filter region isn't supported by FW\n");
 824}
 825
 826static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
 827                            u16 rss_size, u16 viid)
 828{
 829        struct adapter *adap = pi->adapter;
 830        int ret;
 831
 832        ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
 833                                  rss_size);
 834        if (ret)
 835                return ret;
 836
 837        /* If Tunnel All Lookup isn't specified in the global RSS
 838         * Configuration, then we need to specify a default Ingress
 839         * Queue for any ingress packets which aren't hashed.  We'll
 840         * use our first ingress queue ...
 841         */
 842        return t4_config_vi_rss(adap, adap->mbox, viid,
 843                                FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
 844                                FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
 845                                FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
 846                                FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
 847                                FW_RSS_VI_CONFIG_CMD_UDPEN_F,
 848                                rss[0]);
 849}
 850
 851/**
 852 *      cxgb4_write_rss - write the RSS table for a given port
 853 *      @pi: the port
 854 *      @queues: array of queue indices for RSS
 855 *
 856 *      Sets up the portion of the HW RSS table for the port's VI to distribute
 857 *      packets to the Rx queues in @queues.
 858 *      Should never be called before setting up sge eth rx queues
 859 */
 860int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 861{
 862        struct adapter *adapter = pi->adapter;
 863        const struct sge_eth_rxq *rxq;
 864        int i, err;
 865        u16 *rss;
 866
 867        rxq = &adapter->sge.ethrxq[pi->first_qset];
 868        rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
 869        if (!rss)
 870                return -ENOMEM;
 871
 872        /* map the queue indices to queue ids */
 873        for (i = 0; i < pi->rss_size; i++, queues++)
 874                rss[i] = rxq[*queues].rspq.abs_id;
 875
 876        err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
 877        kfree(rss);
 878        return err;
 879}
 880
 881/**
 882 *      setup_rss - configure RSS
 883 *      @adap: the adapter
 884 *
 885 *      Sets up RSS for each port.
 886 */
 887static int setup_rss(struct adapter *adap)
 888{
 889        int i, j, err;
 890
 891        for_each_port(adap, i) {
 892                const struct port_info *pi = adap2pinfo(adap, i);
 893
 894                /* Fill default values with equal distribution */
 895                for (j = 0; j < pi->rss_size; j++)
 896                        pi->rss[j] = j % pi->nqsets;
 897
 898                err = cxgb4_write_rss(pi, pi->rss);
 899                if (err)
 900                        return err;
 901        }
 902        return 0;
 903}
 904
 905/*
 906 * Return the channel of the ingress queue with the given qid.
 907 */
 908static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
 909{
 910        qid -= p->ingr_start;
 911        return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
 912}
 913
 914void cxgb4_quiesce_rx(struct sge_rspq *q)
 915{
 916        if (q->handler)
 917                napi_disable(&q->napi);
 918}
 919
 920/*
 921 * Wait until all NAPI handlers are descheduled.
 922 */
 923static void quiesce_rx(struct adapter *adap)
 924{
 925        int i;
 926
 927        for (i = 0; i < adap->sge.ingr_sz; i++) {
 928                struct sge_rspq *q = adap->sge.ingr_map[i];
 929
 930                if (!q)
 931                        continue;
 932
 933                cxgb4_quiesce_rx(q);
 934        }
 935}
 936
 937/* Disable interrupt and napi handler */
 938static void disable_interrupts(struct adapter *adap)
 939{
 940        struct sge *s = &adap->sge;
 941
 942        if (adap->flags & CXGB4_FULL_INIT_DONE) {
 943                t4_intr_disable(adap);
 944                if (adap->flags & CXGB4_USING_MSIX) {
 945                        free_msix_queue_irqs(adap);
 946                        free_irq(adap->msix_info[s->nd_msix_idx].vec,
 947                                 adap);
 948                } else {
 949                        free_irq(adap->pdev->irq, adap);
 950                }
 951                quiesce_rx(adap);
 952        }
 953}
 954
 955void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
 956{
 957        if (q->handler)
 958                napi_enable(&q->napi);
 959
 960        /* 0-increment GTS to start the timer and enable interrupts */
 961        t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
 962                     SEINTARM_V(q->intr_params) |
 963                     INGRESSQID_V(q->cntxt_id));
 964}
 965
 966/*
 967 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 968 */
 969static void enable_rx(struct adapter *adap)
 970{
 971        int i;
 972
 973        for (i = 0; i < adap->sge.ingr_sz; i++) {
 974                struct sge_rspq *q = adap->sge.ingr_map[i];
 975
 976                if (!q)
 977                        continue;
 978
 979                cxgb4_enable_rx(adap, q);
 980        }
 981}
 982
 983static int setup_non_data_intr(struct adapter *adap)
 984{
 985        int msix;
 986
 987        adap->sge.nd_msix_idx = -1;
 988        if (!(adap->flags & CXGB4_USING_MSIX))
 989                return 0;
 990
 991        /* Request MSI-X vector for non-data interrupt */
 992        msix = cxgb4_get_msix_idx_from_bmap(adap);
 993        if (msix < 0)
 994                return -ENOMEM;
 995
 996        snprintf(adap->msix_info[msix].desc,
 997                 sizeof(adap->msix_info[msix].desc),
 998                 "%s", adap->port[0]->name);
 999
1000        adap->sge.nd_msix_idx = msix;
1001        return 0;
1002}
1003
1004static int setup_fw_sge_queues(struct adapter *adap)
1005{
1006        struct sge *s = &adap->sge;
1007        int msix, err = 0;
1008
1009        bitmap_zero(s->starving_fl, s->egr_sz);
1010        bitmap_zero(s->txq_maperr, s->egr_sz);
1011
1012        if (adap->flags & CXGB4_USING_MSIX) {
1013                s->fwevtq_msix_idx = -1;
1014                msix = cxgb4_get_msix_idx_from_bmap(adap);
1015                if (msix < 0)
1016                        return -ENOMEM;
1017
1018                snprintf(adap->msix_info[msix].desc,
1019                         sizeof(adap->msix_info[msix].desc),
1020                         "%s-FWeventq", adap->port[0]->name);
1021        } else {
1022                err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023                                       NULL, NULL, NULL, -1);
1024                if (err)
1025                        return err;
1026                msix = -((int)s->intrq.abs_id + 1);
1027        }
1028
1029        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030                               msix, NULL, fwevtq_handler, NULL, -1);
1031        if (err && msix >= 0)
1032                cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034        s->fwevtq_msix_idx = msix;
1035        return err;
1036}
1037
1038/**
1039 *      setup_sge_queues - configure SGE Tx/Rx/response queues
1040 *      @adap: the adapter
1041 *
1042 *      Determines how many sets of SGE queues to use and initializes them.
1043 *      We support multiple queue sets per port if we have MSI-X, otherwise
1044 *      just one queue set per port.
1045 */
1046static int setup_sge_queues(struct adapter *adap)
1047{
1048        struct sge_uld_rxq_info *rxq_info = NULL;
1049        struct sge *s = &adap->sge;
1050        unsigned int cmplqid = 0;
1051        int err, i, j, msix = 0;
1052
1053        if (is_uld(adap))
1054                rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
1056        if (!(adap->flags & CXGB4_USING_MSIX))
1057                msix = -((int)s->intrq.abs_id + 1);
1058
1059        for_each_port(adap, i) {
1060                struct net_device *dev = adap->port[i];
1061                struct port_info *pi = netdev_priv(dev);
1062                struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063                struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065                for (j = 0; j < pi->nqsets; j++, q++) {
1066                        if (msix >= 0) {
1067                                msix = cxgb4_get_msix_idx_from_bmap(adap);
1068                                if (msix < 0) {
1069                                        err = msix;
1070                                        goto freeout;
1071                                }
1072
1073                                snprintf(adap->msix_info[msix].desc,
1074                                         sizeof(adap->msix_info[msix].desc),
1075                                         "%s-Rx%d", dev->name, j);
1076                                q->msix = &adap->msix_info[msix];
1077                        }
1078
1079                        err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080                                               msix, &q->fl,
1081                                               t4_ethrx_handler,
1082                                               NULL,
1083                                               t4_get_tp_ch_map(adap,
1084                                                                pi->tx_chan));
1085                        if (err)
1086                                goto freeout;
1087                        q->rspq.idx = j;
1088                        memset(&q->stats, 0, sizeof(q->stats));
1089                }
1090
1091                q = &s->ethrxq[pi->first_qset];
1092                for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093                        err = t4_sge_alloc_eth_txq(adap, t, dev,
1094                                        netdev_get_tx_queue(dev, j),
1095                                        q->rspq.cntxt_id,
1096                                        !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097                        if (err)
1098                                goto freeout;
1099                }
1100        }
1101
1102        for_each_port(adap, i) {
1103                /* Note that cmplqid below is 0 if we don't
1104                 * have RDMA queues, and that's the right value.
1105                 */
1106                if (rxq_info)
1107                        cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
1109                err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110                                            s->fw_evtq.cntxt_id, cmplqid);
1111                if (err)
1112                        goto freeout;
1113        }
1114
1115        if (!is_t4(adap->params.chip)) {
1116                err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117                                           netdev_get_tx_queue(adap->port[0], 0)
1118                                           , s->fw_evtq.cntxt_id, false);
1119                if (err)
1120                        goto freeout;
1121        }
1122
1123        t4_write_reg(adap, is_t4(adap->params.chip) ?
1124                                MPS_TRC_RSS_CONTROL_A :
1125                                MPS_T5_TRC_RSS_CONTROL_A,
1126                     RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127                     QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128        return 0;
1129freeout:
1130        dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131        t4_free_sge_resources(adap);
1132        return err;
1133}
1134
1135static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136                             struct net_device *sb_dev)
1137{
1138        int txq;
1139
1140#ifdef CONFIG_CHELSIO_T4_DCB
1141        /* If a Data Center Bridging has been successfully negotiated on this
1142         * link then we'll use the skb's priority to map it to a TX Queue.
1143         * The skb's priority is determined via the VLAN Tag Priority Code
1144         * Point field.
1145         */
1146        if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147                u16 vlan_tci;
1148                int err;
1149
1150                err = vlan_get_tag(skb, &vlan_tci);
1151                if (unlikely(err)) {
1152                        if (net_ratelimit())
1153                                netdev_warn(dev,
1154                                            "TX Packet without VLAN Tag on DCB Link\n");
1155                        txq = 0;
1156                } else {
1157                        txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1158#ifdef CONFIG_CHELSIO_T4_FCOE
1159                        if (skb->protocol == htons(ETH_P_FCOE))
1160                                txq = skb->priority & 0x7;
1161#endif /* CONFIG_CHELSIO_T4_FCOE */
1162                }
1163                return txq;
1164        }
1165#endif /* CONFIG_CHELSIO_T4_DCB */
1166
1167        if (dev->num_tc) {
1168                struct port_info *pi = netdev2pinfo(dev);
1169                u8 ver, proto;
1170
1171                ver = ip_hdr(skb)->version;
1172                proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173                                     ip_hdr(skb)->protocol;
1174
1175                /* Send unsupported traffic pattern to normal NIC queues. */
1176                txq = netdev_pick_tx(dev, skb, sb_dev);
1177                if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178                    skb->encapsulation ||
1179                    cxgb4_is_ktls_skb(skb) ||
1180                    (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1181                        txq = txq % pi->nqsets;
1182
1183                return txq;
1184        }
1185
1186        if (select_queue) {
1187                txq = (skb_rx_queue_recorded(skb)
1188                        ? skb_get_rx_queue(skb)
1189                        : smp_processor_id());
1190
1191                while (unlikely(txq >= dev->real_num_tx_queues))
1192                        txq -= dev->real_num_tx_queues;
1193
1194                return txq;
1195        }
1196
1197        return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1198}
1199
1200static int closest_timer(const struct sge *s, int time)
1201{
1202        int i, delta, match = 0, min_delta = INT_MAX;
1203
1204        for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1205                delta = time - s->timer_val[i];
1206                if (delta < 0)
1207                        delta = -delta;
1208                if (delta < min_delta) {
1209                        min_delta = delta;
1210                        match = i;
1211                }
1212        }
1213        return match;
1214}
1215
1216static int closest_thres(const struct sge *s, int thres)
1217{
1218        int i, delta, match = 0, min_delta = INT_MAX;
1219
1220        for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1221                delta = thres - s->counter_val[i];
1222                if (delta < 0)
1223                        delta = -delta;
1224                if (delta < min_delta) {
1225                        min_delta = delta;
1226                        match = i;
1227                }
1228        }
1229        return match;
1230}
1231
1232/**
1233 *      cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234 *      @q: the Rx queue
1235 *      @us: the hold-off time in us, or 0 to disable timer
1236 *      @cnt: the hold-off packet count, or 0 to disable counter
1237 *
1238 *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1239 *      one of the two needs to be enabled for the queue to generate interrupts.
1240 */
1241int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1242                               unsigned int us, unsigned int cnt)
1243{
1244        struct adapter *adap = q->adap;
1245
1246        if ((us | cnt) == 0)
1247                cnt = 1;
1248
1249        if (cnt) {
1250                int err;
1251                u32 v, new_idx;
1252
1253                new_idx = closest_thres(&adap->sge, cnt);
1254                if (q->desc && q->pktcnt_idx != new_idx) {
1255                        /* the queue has already been created, update it */
1256                        v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1257                            FW_PARAMS_PARAM_X_V(
1258                                        FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1259                            FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1260                        err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1261                                            &v, &new_idx);
1262                        if (err)
1263                                return err;
1264                }
1265                q->pktcnt_idx = new_idx;
1266        }
1267
1268        us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1269        q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1270        return 0;
1271}
1272
1273static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1274{
1275        netdev_features_t changed = dev->features ^ features;
1276        const struct port_info *pi = netdev_priv(dev);
1277        int err;
1278
1279        if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1280                return 0;
1281
1282        err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283                            pi->viid_mirror, -1, -1, -1, -1,
1284                            !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1285        if (unlikely(err))
1286                dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1287        return err;
1288}
1289
1290static int setup_debugfs(struct adapter *adap)
1291{
1292        if (IS_ERR_OR_NULL(adap->debugfs_root))
1293                return -1;
1294
1295#ifdef CONFIG_DEBUG_FS
1296        t4_setup_debugfs(adap);
1297#endif
1298        return 0;
1299}
1300
1301static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302                                       struct sge_eth_rxq *mirror_rxq)
1303{
1304        if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305            !(adap->flags & CXGB4_SHUTTING_DOWN))
1306                cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
1308        if (adap->flags & CXGB4_USING_MSIX) {
1309                cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310                                     mirror_rxq->msix->aff_mask);
1311                free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312                cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313        }
1314
1315        free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316}
1317
1318static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319{
1320        struct port_info *pi = netdev2pinfo(dev);
1321        struct adapter *adap = netdev2adap(dev);
1322        struct sge_eth_rxq *mirror_rxq;
1323        struct sge *s = &adap->sge;
1324        int ret = 0, msix = 0;
1325        u16 i, rxqid;
1326        u16 *rss;
1327
1328        if (!pi->vi_mirror_count)
1329                return 0;
1330
1331        if (s->mirror_rxq[pi->port_id])
1332                return 0;
1333
1334        mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335        if (!mirror_rxq)
1336                return -ENOMEM;
1337
1338        s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
1340        if (!(adap->flags & CXGB4_USING_MSIX))
1341                msix = -((int)adap->sge.intrq.abs_id + 1);
1342
1343        for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344                mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
1346                /* Allocate Mirror Rxqs */
1347                if (msix >= 0) {
1348                        msix = cxgb4_get_msix_idx_from_bmap(adap);
1349                        if (msix < 0) {
1350                                ret = msix;
1351                                goto out_free_queues;
1352                        }
1353
1354                        mirror_rxq->msix = &adap->msix_info[msix];
1355                        snprintf(mirror_rxq->msix->desc,
1356                                 sizeof(mirror_rxq->msix->desc),
1357                                 "%s-mirrorrxq%d", dev->name, i);
1358                }
1359
1360                init_rspq(adap, &mirror_rxq->rspq,
1361                          CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362                          CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363                          CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364                          CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
1366                mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
1368                ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369                                       dev, msix, &mirror_rxq->fl,
1370                                       t4_ethrx_handler, NULL, 0);
1371                if (ret)
1372                        goto out_free_msix_idx;
1373
1374                /* Setup MSI-X vectors for Mirror Rxqs */
1375                if (adap->flags & CXGB4_USING_MSIX) {
1376                        ret = request_irq(mirror_rxq->msix->vec,
1377                                          t4_sge_intr_msix, 0,
1378                                          mirror_rxq->msix->desc,
1379                                          &mirror_rxq->rspq);
1380                        if (ret)
1381                                goto out_free_rxq;
1382
1383                        cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384                                           &mirror_rxq->msix->aff_mask, i);
1385                }
1386
1387                /* Start NAPI for Mirror Rxqs */
1388                cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389        }
1390
1391        /* Setup RSS for Mirror Rxqs */
1392        rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393        if (!rss) {
1394                ret = -ENOMEM;
1395                goto out_free_queues;
1396        }
1397
1398        mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399        for (i = 0; i < pi->rss_size; i++)
1400                rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
1402        ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403        kfree(rss);
1404        if (ret)
1405                goto out_free_queues;
1406
1407        return 0;
1408
1409out_free_rxq:
1410        free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
1412out_free_msix_idx:
1413        cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
1415out_free_queues:
1416        while (rxqid-- > 0)
1417                cxgb4_port_mirror_free_rxq(adap,
1418                                           &s->mirror_rxq[pi->port_id][rxqid]);
1419
1420        kfree(s->mirror_rxq[pi->port_id]);
1421        s->mirror_rxq[pi->port_id] = NULL;
1422        return ret;
1423}
1424
1425static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426{
1427        struct port_info *pi = netdev2pinfo(dev);
1428        struct adapter *adap = netdev2adap(dev);
1429        struct sge *s = &adap->sge;
1430        u16 i;
1431
1432        if (!pi->vi_mirror_count)
1433                return;
1434
1435        if (!s->mirror_rxq[pi->port_id])
1436                return;
1437
1438        for (i = 0; i < pi->nmirrorqsets; i++)
1439                cxgb4_port_mirror_free_rxq(adap,
1440                                           &s->mirror_rxq[pi->port_id][i]);
1441
1442        kfree(s->mirror_rxq[pi->port_id]);
1443        s->mirror_rxq[pi->port_id] = NULL;
1444}
1445
1446static int cxgb4_port_mirror_start(struct net_device *dev)
1447{
1448        struct port_info *pi = netdev2pinfo(dev);
1449        struct adapter *adap = netdev2adap(dev);
1450        int ret, idx = -1;
1451
1452        if (!pi->vi_mirror_count)
1453                return 0;
1454
1455        /* Mirror VIs can be created dynamically after stack had
1456         * already setup Rx modes like MTU, promisc, allmulti, etc.
1457         * on main VI. So, parse what the stack had setup on the
1458         * main VI and update the same on the mirror VI.
1459         */
1460        ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461                            dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462                            (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464        if (ret) {
1465                dev_err(adap->pdev_dev,
1466                        "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467                        pi->viid_mirror, ret);
1468                return ret;
1469        }
1470
1471        /* Enable replication bit for the device's MAC address
1472         * in MPS TCAM, so that the packets for the main VI are
1473         * replicated to mirror VI.
1474         */
1475        ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476                                    dev->dev_addr, true, NULL);
1477        if (ret) {
1478                dev_err(adap->pdev_dev,
1479                        "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480                        pi->viid_mirror, ret);
1481                return ret;
1482        }
1483
1484        /* Enabling a Virtual Interface can result in an interrupt
1485         * during the processing of the VI Enable command and, in some
1486         * paths, result in an attempt to issue another command in the
1487         * interrupt context. Thus, we disable interrupts during the
1488         * course of the VI Enable command ...
1489         */
1490        local_bh_disable();
1491        ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492                                  false);
1493        local_bh_enable();
1494        if (ret)
1495                dev_err(adap->pdev_dev,
1496                        "Failed starting Mirror VI 0x%x, ret: %d\n",
1497                        pi->viid_mirror, ret);
1498
1499        return ret;
1500}
1501
1502static void cxgb4_port_mirror_stop(struct net_device *dev)
1503{
1504        struct port_info *pi = netdev2pinfo(dev);
1505        struct adapter *adap = netdev2adap(dev);
1506
1507        if (!pi->vi_mirror_count)
1508                return;
1509
1510        t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511                            false);
1512}
1513
1514int cxgb4_port_mirror_alloc(struct net_device *dev)
1515{
1516        struct port_info *pi = netdev2pinfo(dev);
1517        struct adapter *adap = netdev2adap(dev);
1518        int ret = 0;
1519
1520        if (!pi->nmirrorqsets)
1521                return -EOPNOTSUPP;
1522
1523        mutex_lock(&pi->vi_mirror_mutex);
1524        if (pi->viid_mirror) {
1525                pi->vi_mirror_count++;
1526                goto out_unlock;
1527        }
1528
1529        ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530                                  &pi->viid_mirror);
1531        if (ret)
1532                goto out_unlock;
1533
1534        pi->vi_mirror_count = 1;
1535
1536        if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537                ret = cxgb4_port_mirror_alloc_queues(dev);
1538                if (ret)
1539                        goto out_free_vi;
1540
1541                ret = cxgb4_port_mirror_start(dev);
1542                if (ret)
1543                        goto out_free_queues;
1544        }
1545
1546        mutex_unlock(&pi->vi_mirror_mutex);
1547        return 0;
1548
1549out_free_queues:
1550        cxgb4_port_mirror_free_queues(dev);
1551
1552out_free_vi:
1553        pi->vi_mirror_count = 0;
1554        t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555        pi->viid_mirror = 0;
1556
1557out_unlock:
1558        mutex_unlock(&pi->vi_mirror_mutex);
1559        return ret;
1560}
1561
1562void cxgb4_port_mirror_free(struct net_device *dev)
1563{
1564        struct port_info *pi = netdev2pinfo(dev);
1565        struct adapter *adap = netdev2adap(dev);
1566
1567        mutex_lock(&pi->vi_mirror_mutex);
1568        if (!pi->viid_mirror)
1569                goto out_unlock;
1570
1571        if (pi->vi_mirror_count > 1) {
1572                pi->vi_mirror_count--;
1573                goto out_unlock;
1574        }
1575
1576        cxgb4_port_mirror_stop(dev);
1577        cxgb4_port_mirror_free_queues(dev);
1578
1579        pi->vi_mirror_count = 0;
1580        t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581        pi->viid_mirror = 0;
1582
1583out_unlock:
1584        mutex_unlock(&pi->vi_mirror_mutex);
1585}
1586
1587/*
1588 * upper-layer driver support
1589 */
1590
1591/*
1592 * Allocate an active-open TID and set it to the supplied value.
1593 */
1594int cxgb4_alloc_atid(struct tid_info *t, void *data)
1595{
1596        int atid = -1;
1597
1598        spin_lock_bh(&t->atid_lock);
1599        if (t->afree) {
1600                union aopen_entry *p = t->afree;
1601
1602                atid = (p - t->atid_tab) + t->atid_base;
1603                t->afree = p->next;
1604                p->data = data;
1605                t->atids_in_use++;
1606        }
1607        spin_unlock_bh(&t->atid_lock);
1608        return atid;
1609}
1610EXPORT_SYMBOL(cxgb4_alloc_atid);
1611
1612/*
1613 * Release an active-open TID.
1614 */
1615void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1616{
1617        union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1618
1619        spin_lock_bh(&t->atid_lock);
1620        p->next = t->afree;
1621        t->afree = p;
1622        t->atids_in_use--;
1623        spin_unlock_bh(&t->atid_lock);
1624}
1625EXPORT_SYMBOL(cxgb4_free_atid);
1626
1627/*
1628 * Allocate a server TID and set it to the supplied value.
1629 */
1630int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1631{
1632        int stid;
1633
1634        spin_lock_bh(&t->stid_lock);
1635        if (family == PF_INET) {
1636                stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1637                if (stid < t->nstids)
1638                        __set_bit(stid, t->stid_bmap);
1639                else
1640                        stid = -1;
1641        } else {
1642                stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1643                if (stid < 0)
1644                        stid = -1;
1645        }
1646        if (stid >= 0) {
1647                t->stid_tab[stid].data = data;
1648                stid += t->stid_base;
1649                /* IPv6 requires max of 520 bits or 16 cells in TCAM
1650                 * This is equivalent to 4 TIDs. With CLIP enabled it
1651                 * needs 2 TIDs.
1652                 */
1653                if (family == PF_INET6) {
1654                        t->stids_in_use += 2;
1655                        t->v6_stids_in_use += 2;
1656                } else {
1657                        t->stids_in_use++;
1658                }
1659        }
1660        spin_unlock_bh(&t->stid_lock);
1661        return stid;
1662}
1663EXPORT_SYMBOL(cxgb4_alloc_stid);
1664
1665/* Allocate a server filter TID and set it to the supplied value.
1666 */
1667int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1668{
1669        int stid;
1670
1671        spin_lock_bh(&t->stid_lock);
1672        if (family == PF_INET) {
1673                stid = find_next_zero_bit(t->stid_bmap,
1674                                t->nstids + t->nsftids, t->nstids);
1675                if (stid < (t->nstids + t->nsftids))
1676                        __set_bit(stid, t->stid_bmap);
1677                else
1678                        stid = -1;
1679        } else {
1680                stid = -1;
1681        }
1682        if (stid >= 0) {
1683                t->stid_tab[stid].data = data;
1684                stid -= t->nstids;
1685                stid += t->sftid_base;
1686                t->sftids_in_use++;
1687        }
1688        spin_unlock_bh(&t->stid_lock);
1689        return stid;
1690}
1691EXPORT_SYMBOL(cxgb4_alloc_sftid);
1692
1693/* Release a server TID.
1694 */
1695void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1696{
1697        /* Is it a server filter TID? */
1698        if (t->nsftids && (stid >= t->sftid_base)) {
1699                stid -= t->sftid_base;
1700                stid += t->nstids;
1701        } else {
1702                stid -= t->stid_base;
1703        }
1704
1705        spin_lock_bh(&t->stid_lock);
1706        if (family == PF_INET)
1707                __clear_bit(stid, t->stid_bmap);
1708        else
1709                bitmap_release_region(t->stid_bmap, stid, 1);
1710        t->stid_tab[stid].data = NULL;
1711        if (stid < t->nstids) {
1712                if (family == PF_INET6) {
1713                        t->stids_in_use -= 2;
1714                        t->v6_stids_in_use -= 2;
1715                } else {
1716                        t->stids_in_use--;
1717                }
1718        } else {
1719                t->sftids_in_use--;
1720        }
1721
1722        spin_unlock_bh(&t->stid_lock);
1723}
1724EXPORT_SYMBOL(cxgb4_free_stid);
1725
1726/*
1727 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1728 */
1729static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1730                           unsigned int tid)
1731{
1732        struct cpl_tid_release *req;
1733
1734        set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1735        req = __skb_put(skb, sizeof(*req));
1736        INIT_TP_WR(req, tid);
1737        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1738}
1739
1740/*
1741 * Queue a TID release request and if necessary schedule a work queue to
1742 * process it.
1743 */
1744static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1745                                    unsigned int tid)
1746{
1747        struct adapter *adap = container_of(t, struct adapter, tids);
1748        void **p = &t->tid_tab[tid - t->tid_base];
1749
1750        spin_lock_bh(&adap->tid_release_lock);
1751        *p = adap->tid_release_head;
1752        /* Low 2 bits encode the Tx channel number */
1753        adap->tid_release_head = (void **)((uintptr_t)p | chan);
1754        if (!adap->tid_release_task_busy) {
1755                adap->tid_release_task_busy = true;
1756                queue_work(adap->workq, &adap->tid_release_task);
1757        }
1758        spin_unlock_bh(&adap->tid_release_lock);
1759}
1760
1761/*
1762 * Process the list of pending TID release requests.
1763 */
1764static void process_tid_release_list(struct work_struct *work)
1765{
1766        struct sk_buff *skb;
1767        struct adapter *adap;
1768
1769        adap = container_of(work, struct adapter, tid_release_task);
1770
1771        spin_lock_bh(&adap->tid_release_lock);
1772        while (adap->tid_release_head) {
1773                void **p = adap->tid_release_head;
1774                unsigned int chan = (uintptr_t)p & 3;
1775                p = (void *)p - chan;
1776
1777                adap->tid_release_head = *p;
1778                *p = NULL;
1779                spin_unlock_bh(&adap->tid_release_lock);
1780
1781                while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1782                                         GFP_KERNEL)))
1783                        schedule_timeout_uninterruptible(1);
1784
1785                mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786                t4_ofld_send(adap, skb);
1787                spin_lock_bh(&adap->tid_release_lock);
1788        }
1789        adap->tid_release_task_busy = false;
1790        spin_unlock_bh(&adap->tid_release_lock);
1791}
1792
1793/*
1794 * Release a TID and inform HW.  If we are unable to allocate the release
1795 * message we defer to a work queue.
1796 */
1797void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1798                      unsigned short family)
1799{
1800        struct adapter *adap = container_of(t, struct adapter, tids);
1801        struct sk_buff *skb;
1802
1803        WARN_ON(tid_out_of_range(&adap->tids, tid));
1804
1805        if (t->tid_tab[tid - adap->tids.tid_base]) {
1806                t->tid_tab[tid - adap->tids.tid_base] = NULL;
1807                atomic_dec(&t->conns_in_use);
1808                if (t->hash_base && (tid >= t->hash_base)) {
1809                        if (family == AF_INET6)
1810                                atomic_sub(2, &t->hash_tids_in_use);
1811                        else
1812                                atomic_dec(&t->hash_tids_in_use);
1813                } else {
1814                        if (family == AF_INET6)
1815                                atomic_sub(2, &t->tids_in_use);
1816                        else
1817                                atomic_dec(&t->tids_in_use);
1818                }
1819        }
1820
1821        skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1822        if (likely(skb)) {
1823                mk_tid_release(skb, chan, tid);
1824                t4_ofld_send(adap, skb);
1825        } else
1826                cxgb4_queue_tid_release(t, chan, tid);
1827}
1828EXPORT_SYMBOL(cxgb4_remove_tid);
1829
1830/*
1831 * Allocate and initialize the TID tables.  Returns 0 on success.
1832 */
1833static int tid_init(struct tid_info *t)
1834{
1835        struct adapter *adap = container_of(t, struct adapter, tids);
1836        unsigned int max_ftids = t->nftids + t->nsftids;
1837        unsigned int natids = t->natids;
1838        unsigned int hpftid_bmap_size;
1839        unsigned int eotid_bmap_size;
1840        unsigned int stid_bmap_size;
1841        unsigned int ftid_bmap_size;
1842        size_t size;
1843
1844        stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1845        ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1846        hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1847        eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1848        size = t->ntids * sizeof(*t->tid_tab) +
1849               natids * sizeof(*t->atid_tab) +
1850               t->nstids * sizeof(*t->stid_tab) +
1851               t->nsftids * sizeof(*t->stid_tab) +
1852               stid_bmap_size * sizeof(long) +
1853               t->nhpftids * sizeof(*t->hpftid_tab) +
1854               hpftid_bmap_size * sizeof(long) +
1855               max_ftids * sizeof(*t->ftid_tab) +
1856               ftid_bmap_size * sizeof(long) +
1857               t->neotids * sizeof(*t->eotid_tab) +
1858               eotid_bmap_size * sizeof(long);
1859
1860        t->tid_tab = kvzalloc(size, GFP_KERNEL);
1861        if (!t->tid_tab)
1862                return -ENOMEM;
1863
1864        t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1865        t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1866        t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1867        t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868        t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869        t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1870        t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1871        t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872        t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1873        spin_lock_init(&t->stid_lock);
1874        spin_lock_init(&t->atid_lock);
1875        spin_lock_init(&t->ftid_lock);
1876
1877        t->stids_in_use = 0;
1878        t->v6_stids_in_use = 0;
1879        t->sftids_in_use = 0;
1880        t->afree = NULL;
1881        t->atids_in_use = 0;
1882        atomic_set(&t->tids_in_use, 0);
1883        atomic_set(&t->conns_in_use, 0);
1884        atomic_set(&t->hash_tids_in_use, 0);
1885        atomic_set(&t->eotids_in_use, 0);
1886
1887        /* Setup the free list for atid_tab and clear the stid bitmap. */
1888        if (natids) {
1889                while (--natids)
1890                        t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1891                t->afree = t->atid_tab;
1892        }
1893
1894        if (is_offload(adap)) {
1895                bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1896                /* Reserve stid 0 for T4/T5 adapters */
1897                if (!t->stid_base &&
1898                    CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1899                        __set_bit(0, t->stid_bmap);
1900
1901                if (t->neotids)
1902                        bitmap_zero(t->eotid_bmap, t->neotids);
1903        }
1904
1905        if (t->nhpftids)
1906                bitmap_zero(t->hpftid_bmap, t->nhpftids);
1907        bitmap_zero(t->ftid_bmap, t->nftids);
1908        return 0;
1909}
1910
1911/**
1912 *      cxgb4_create_server - create an IP server
1913 *      @dev: the device
1914 *      @stid: the server TID
1915 *      @sip: local IP address to bind server to
1916 *      @sport: the server's TCP port
1917 *      @vlan: the VLAN header information
1918 *      @queue: queue to direct messages from this server to
1919 *
1920 *      Create an IP server for the given port and address.
1921 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1922 */
1923int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1924                        __be32 sip, __be16 sport, __be16 vlan,
1925                        unsigned int queue)
1926{
1927        unsigned int chan;
1928        struct sk_buff *skb;
1929        struct adapter *adap;
1930        struct cpl_pass_open_req *req;
1931        int ret;
1932
1933        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1934        if (!skb)
1935                return -ENOMEM;
1936
1937        adap = netdev2adap(dev);
1938        req = __skb_put(skb, sizeof(*req));
1939        INIT_TP_WR(req, 0);
1940        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1941        req->local_port = sport;
1942        req->peer_port = htons(0);
1943        req->local_ip = sip;
1944        req->peer_ip = htonl(0);
1945        chan = rxq_to_chan(&adap->sge, queue);
1946        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1947        req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1948                                SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1949        ret = t4_mgmt_tx(adap, skb);
1950        return net_xmit_eval(ret);
1951}
1952EXPORT_SYMBOL(cxgb4_create_server);
1953
1954/*      cxgb4_create_server6 - create an IPv6 server
1955 *      @dev: the device
1956 *      @stid: the server TID
1957 *      @sip: local IPv6 address to bind server to
1958 *      @sport: the server's TCP port
1959 *      @queue: queue to direct messages from this server to
1960 *
1961 *      Create an IPv6 server for the given port and address.
1962 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1963 */
1964int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1965                         const struct in6_addr *sip, __be16 sport,
1966                         unsigned int queue)
1967{
1968        unsigned int chan;
1969        struct sk_buff *skb;
1970        struct adapter *adap;
1971        struct cpl_pass_open_req6 *req;
1972        int ret;
1973
1974        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1975        if (!skb)
1976                return -ENOMEM;
1977
1978        adap = netdev2adap(dev);
1979        req = __skb_put(skb, sizeof(*req));
1980        INIT_TP_WR(req, 0);
1981        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1982        req->local_port = sport;
1983        req->peer_port = htons(0);
1984        req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1985        req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1986        req->peer_ip_hi = cpu_to_be64(0);
1987        req->peer_ip_lo = cpu_to_be64(0);
1988        chan = rxq_to_chan(&adap->sge, queue);
1989        req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1990        req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1991                                SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1992        ret = t4_mgmt_tx(adap, skb);
1993        return net_xmit_eval(ret);
1994}
1995EXPORT_SYMBOL(cxgb4_create_server6);
1996
1997int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1998                        unsigned int queue, bool ipv6)
1999{
2000        struct sk_buff *skb;
2001        struct adapter *adap;
2002        struct cpl_close_listsvr_req *req;
2003        int ret;
2004
2005        adap = netdev2adap(dev);
2006
2007        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2008        if (!skb)
2009                return -ENOMEM;
2010
2011        req = __skb_put(skb, sizeof(*req));
2012        INIT_TP_WR(req, 0);
2013        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2014        req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2015                                LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2016        ret = t4_mgmt_tx(adap, skb);
2017        return net_xmit_eval(ret);
2018}
2019EXPORT_SYMBOL(cxgb4_remove_server);
2020
2021/**
2022 *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2023 *      @mtus: the HW MTU table
2024 *      @mtu: the target MTU
2025 *      @idx: index of selected entry in the MTU table
2026 *
2027 *      Returns the index and the value in the HW MTU table that is closest to
2028 *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2029 *      table, in which case that smallest available value is selected.
2030 */
2031unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2032                            unsigned int *idx)
2033{
2034        unsigned int i = 0;
2035
2036        while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2037                ++i;
2038        if (idx)
2039                *idx = i;
2040        return mtus[i];
2041}
2042EXPORT_SYMBOL(cxgb4_best_mtu);
2043
2044/**
2045 *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2046 *     @mtus: the HW MTU table
2047 *     @header_size: Header Size
2048 *     @data_size_max: maximum Data Segment Size
2049 *     @data_size_align: desired Data Segment Size Alignment (2^N)
2050 *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
2051 *
2052 *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
2053 *     MTU Table based solely on a Maximum MTU parameter, we break that
2054 *     parameter up into a Header Size and Maximum Data Segment Size, and
2055 *     provide a desired Data Segment Size Alignment.  If we find an MTU in
2056 *     the Hardware MTU Table which will result in a Data Segment Size with
2057 *     the requested alignment _and_ that MTU isn't "too far" from the
2058 *     closest MTU, then we'll return that rather than the closest MTU.
2059 */
2060unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2061                                    unsigned short header_size,
2062                                    unsigned short data_size_max,
2063                                    unsigned short data_size_align,
2064                                    unsigned int *mtu_idxp)
2065{
2066        unsigned short max_mtu = header_size + data_size_max;
2067        unsigned short data_size_align_mask = data_size_align - 1;
2068        int mtu_idx, aligned_mtu_idx;
2069
2070        /* Scan the MTU Table till we find an MTU which is larger than our
2071         * Maximum MTU or we reach the end of the table.  Along the way,
2072         * record the last MTU found, if any, which will result in a Data
2073         * Segment Length matching the requested alignment.
2074         */
2075        for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2076                unsigned short data_size = mtus[mtu_idx] - header_size;
2077
2078                /* If this MTU minus the Header Size would result in a
2079                 * Data Segment Size of the desired alignment, remember it.
2080                 */
2081                if ((data_size & data_size_align_mask) == 0)
2082                        aligned_mtu_idx = mtu_idx;
2083
2084                /* If we're not at the end of the Hardware MTU Table and the
2085                 * next element is larger than our Maximum MTU, drop out of
2086                 * the loop.
2087                 */
2088                if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2089                        break;
2090        }
2091
2092        /* If we fell out of the loop because we ran to the end of the table,
2093         * then we just have to use the last [largest] entry.
2094         */
2095        if (mtu_idx == NMTUS)
2096                mtu_idx--;
2097
2098        /* If we found an MTU which resulted in the requested Data Segment
2099         * Length alignment and that's "not far" from the largest MTU which is
2100         * less than or equal to the maximum MTU, then use that.
2101         */
2102        if (aligned_mtu_idx >= 0 &&
2103            mtu_idx - aligned_mtu_idx <= 1)
2104                mtu_idx = aligned_mtu_idx;
2105
2106        /* If the caller has passed in an MTU Index pointer, pass the
2107         * MTU Index back.  Return the MTU value.
2108         */
2109        if (mtu_idxp)
2110                *mtu_idxp = mtu_idx;
2111        return mtus[mtu_idx];
2112}
2113EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2114
2115/**
2116 *      cxgb4_port_chan - get the HW channel of a port
2117 *      @dev: the net device for the port
2118 *
2119 *      Return the HW Tx channel of the given port.
2120 */
2121unsigned int cxgb4_port_chan(const struct net_device *dev)
2122{
2123        return netdev2pinfo(dev)->tx_chan;
2124}
2125EXPORT_SYMBOL(cxgb4_port_chan);
2126
2127/**
2128 *      cxgb4_port_e2cchan - get the HW c-channel of a port
2129 *      @dev: the net device for the port
2130 *
2131 *      Return the HW RX c-channel of the given port.
2132 */
2133unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134{
2135        return netdev2pinfo(dev)->rx_cchan;
2136}
2137EXPORT_SYMBOL(cxgb4_port_e2cchan);
2138
2139unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2140{
2141        struct adapter *adap = netdev2adap(dev);
2142        u32 v1, v2, lp_count, hp_count;
2143
2144        v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2145        v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2146        if (is_t4(adap->params.chip)) {
2147                lp_count = LP_COUNT_G(v1);
2148                hp_count = HP_COUNT_G(v1);
2149        } else {
2150                lp_count = LP_COUNT_T5_G(v1);
2151                hp_count = HP_COUNT_T5_G(v2);
2152        }
2153        return lpfifo ? lp_count : hp_count;
2154}
2155EXPORT_SYMBOL(cxgb4_dbfifo_count);
2156
2157/**
2158 *      cxgb4_port_viid - get the VI id of a port
2159 *      @dev: the net device for the port
2160 *
2161 *      Return the VI id of the given port.
2162 */
2163unsigned int cxgb4_port_viid(const struct net_device *dev)
2164{
2165        return netdev2pinfo(dev)->viid;
2166}
2167EXPORT_SYMBOL(cxgb4_port_viid);
2168
2169/**
2170 *      cxgb4_port_idx - get the index of a port
2171 *      @dev: the net device for the port
2172 *
2173 *      Return the index of the given port.
2174 */
2175unsigned int cxgb4_port_idx(const struct net_device *dev)
2176{
2177        return netdev2pinfo(dev)->port_id;
2178}
2179EXPORT_SYMBOL(cxgb4_port_idx);
2180
2181void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2182                         struct tp_tcp_stats *v6)
2183{
2184        struct adapter *adap = pci_get_drvdata(pdev);
2185
2186        spin_lock(&adap->stats_lock);
2187        t4_tp_get_tcp_stats(adap, v4, v6, false);
2188        spin_unlock(&adap->stats_lock);
2189}
2190EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2191
2192void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2193                      const unsigned int *pgsz_order)
2194{
2195        struct adapter *adap = netdev2adap(dev);
2196
2197        t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2198        t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2199                     HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2200                     HPZ3_V(pgsz_order[3]));
2201}
2202EXPORT_SYMBOL(cxgb4_iscsi_init);
2203
2204int cxgb4_flush_eq_cache(struct net_device *dev)
2205{
2206        struct adapter *adap = netdev2adap(dev);
2207
2208        return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2209}
2210EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2211
2212static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2213{
2214        u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2215        __be64 indices;
2216        int ret;
2217
2218        spin_lock(&adap->win0_lock);
2219        ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2220                           sizeof(indices), (__be32 *)&indices,
2221                           T4_MEMORY_READ);
2222        spin_unlock(&adap->win0_lock);
2223        if (!ret) {
2224                *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2225                *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2226        }
2227        return ret;
2228}
2229
2230int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2231                        u16 size)
2232{
2233        struct adapter *adap = netdev2adap(dev);
2234        u16 hw_pidx, hw_cidx;
2235        int ret;
2236
2237        ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2238        if (ret)
2239                goto out;
2240
2241        if (pidx != hw_pidx) {
2242                u16 delta;
2243                u32 val;
2244
2245                if (pidx >= hw_pidx)
2246                        delta = pidx - hw_pidx;
2247                else
2248                        delta = size - hw_pidx + pidx;
2249
2250                if (is_t4(adap->params.chip))
2251                        val = PIDX_V(delta);
2252                else
2253                        val = PIDX_T5_V(delta);
2254                wmb();
2255                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2256                             QID_V(qid) | val);
2257        }
2258out:
2259        return ret;
2260}
2261EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2262
2263int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2264{
2265        u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2266        u32 edc0_end, edc1_end, mc0_end, mc1_end;
2267        u32 offset, memtype, memaddr;
2268        struct adapter *adap;
2269        u32 hma_size = 0;
2270        int ret;
2271
2272        adap = netdev2adap(dev);
2273
2274        offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2275
2276        /* Figure out where the offset lands in the Memory Type/Address scheme.
2277         * This code assumes that the memory is laid out starting at offset 0
2278         * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2279         * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
2280         * MC0, and some have both MC0 and MC1.
2281         */
2282        size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2283        edc0_size = EDRAM0_SIZE_G(size) << 20;
2284        size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2285        edc1_size = EDRAM1_SIZE_G(size) << 20;
2286        size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2287        mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2288
2289        if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2290                size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2291                hma_size = EXT_MEM1_SIZE_G(size) << 20;
2292        }
2293        edc0_end = edc0_size;
2294        edc1_end = edc0_end + edc1_size;
2295        mc0_end = edc1_end + mc0_size;
2296
2297        if (offset < edc0_end) {
2298                memtype = MEM_EDC0;
2299                memaddr = offset;
2300        } else if (offset < edc1_end) {
2301                memtype = MEM_EDC1;
2302                memaddr = offset - edc0_end;
2303        } else {
2304                if (hma_size && (offset < (edc1_end + hma_size))) {
2305                        memtype = MEM_HMA;
2306                        memaddr = offset - edc1_end;
2307                } else if (offset < mc0_end) {
2308                        memtype = MEM_MC0;
2309                        memaddr = offset - edc1_end;
2310                } else if (is_t5(adap->params.chip)) {
2311                        size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2312                        mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2313                        mc1_end = mc0_end + mc1_size;
2314                        if (offset < mc1_end) {
2315                                memtype = MEM_MC1;
2316                                memaddr = offset - mc0_end;
2317                        } else {
2318                                /* offset beyond the end of any memory */
2319                                goto err;
2320                        }
2321                } else {
2322                        /* T4/T6 only has a single memory channel */
2323                        goto err;
2324                }
2325        }
2326
2327        spin_lock(&adap->win0_lock);
2328        ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2329        spin_unlock(&adap->win0_lock);
2330        return ret;
2331
2332err:
2333        dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2334                stag, offset);
2335        return -EINVAL;
2336}
2337EXPORT_SYMBOL(cxgb4_read_tpte);
2338
2339u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2340{
2341        u32 hi, lo;
2342        struct adapter *adap;
2343
2344        adap = netdev2adap(dev);
2345        lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2346        hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2347
2348        return ((u64)hi << 32) | (u64)lo;
2349}
2350EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2351
2352int cxgb4_bar2_sge_qregs(struct net_device *dev,
2353                         unsigned int qid,
2354                         enum cxgb4_bar2_qtype qtype,
2355                         int user,
2356                         u64 *pbar2_qoffset,
2357                         unsigned int *pbar2_qid)
2358{
2359        return t4_bar2_sge_qregs(netdev2adap(dev),
2360                                 qid,
2361                                 (qtype == CXGB4_BAR2_QTYPE_EGRESS
2362                                  ? T4_BAR2_QTYPE_EGRESS
2363                                  : T4_BAR2_QTYPE_INGRESS),
2364                                 user,
2365                                 pbar2_qoffset,
2366                                 pbar2_qid);
2367}
2368EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2369
2370static struct pci_driver cxgb4_driver;
2371
2372static void check_neigh_update(struct neighbour *neigh)
2373{
2374        const struct device *parent;
2375        const struct net_device *netdev = neigh->dev;
2376
2377        if (is_vlan_dev(netdev))
2378                netdev = vlan_dev_real_dev(netdev);
2379        parent = netdev->dev.parent;
2380        if (parent && parent->driver == &cxgb4_driver.driver)
2381                t4_l2t_update(dev_get_drvdata(parent), neigh);
2382}
2383
2384static int netevent_cb(struct notifier_block *nb, unsigned long event,
2385                       void *data)
2386{
2387        switch (event) {
2388        case NETEVENT_NEIGH_UPDATE:
2389                check_neigh_update(data);
2390                break;
2391        case NETEVENT_REDIRECT:
2392        default:
2393                break;
2394        }
2395        return 0;
2396}
2397
2398static bool netevent_registered;
2399static struct notifier_block cxgb4_netevent_nb = {
2400        .notifier_call = netevent_cb
2401};
2402
2403static void drain_db_fifo(struct adapter *adap, int usecs)
2404{
2405        u32 v1, v2, lp_count, hp_count;
2406
2407        do {
2408                v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2409                v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2410                if (is_t4(adap->params.chip)) {
2411                        lp_count = LP_COUNT_G(v1);
2412                        hp_count = HP_COUNT_G(v1);
2413                } else {
2414                        lp_count = LP_COUNT_T5_G(v1);
2415                        hp_count = HP_COUNT_T5_G(v2);
2416                }
2417
2418                if (lp_count == 0 && hp_count == 0)
2419                        break;
2420                set_current_state(TASK_UNINTERRUPTIBLE);
2421                schedule_timeout(usecs_to_jiffies(usecs));
2422        } while (1);
2423}
2424
2425static void disable_txq_db(struct sge_txq *q)
2426{
2427        unsigned long flags;
2428
2429        spin_lock_irqsave(&q->db_lock, flags);
2430        q->db_disabled = 1;
2431        spin_unlock_irqrestore(&q->db_lock, flags);
2432}
2433
2434static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2435{
2436        spin_lock_irq(&q->db_lock);
2437        if (q->db_pidx_inc) {
2438                /* Make sure that all writes to the TX descriptors
2439                 * are committed before we tell HW about them.
2440                 */
2441                wmb();
2442                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2443                             QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2444                q->db_pidx_inc = 0;
2445        }
2446        q->db_disabled = 0;
2447        spin_unlock_irq(&q->db_lock);
2448}
2449
2450static void disable_dbs(struct adapter *adap)
2451{
2452        int i;
2453
2454        for_each_ethrxq(&adap->sge, i)
2455                disable_txq_db(&adap->sge.ethtxq[i].q);
2456        if (is_offload(adap)) {
2457                struct sge_uld_txq_info *txq_info =
2458                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2459
2460                if (txq_info) {
2461                        for_each_ofldtxq(&adap->sge, i) {
2462                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2463
2464                                disable_txq_db(&txq->q);
2465                        }
2466                }
2467        }
2468        for_each_port(adap, i)
2469                disable_txq_db(&adap->sge.ctrlq[i].q);
2470}
2471
2472static void enable_dbs(struct adapter *adap)
2473{
2474        int i;
2475
2476        for_each_ethrxq(&adap->sge, i)
2477                enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2478        if (is_offload(adap)) {
2479                struct sge_uld_txq_info *txq_info =
2480                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2481
2482                if (txq_info) {
2483                        for_each_ofldtxq(&adap->sge, i) {
2484                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2485
2486                                enable_txq_db(adap, &txq->q);
2487                        }
2488                }
2489        }
2490        for_each_port(adap, i)
2491                enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2492}
2493
2494static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2495{
2496        enum cxgb4_uld type = CXGB4_ULD_RDMA;
2497
2498        if (adap->uld && adap->uld[type].handle)
2499                adap->uld[type].control(adap->uld[type].handle, cmd);
2500}
2501
2502static void process_db_full(struct work_struct *work)
2503{
2504        struct adapter *adap;
2505
2506        adap = container_of(work, struct adapter, db_full_task);
2507
2508        drain_db_fifo(adap, dbfifo_drain_delay);
2509        enable_dbs(adap);
2510        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2511        if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2512                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2513                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2514                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2515        else
2516                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2517                                 DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2518}
2519
2520static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2521{
2522        u16 hw_pidx, hw_cidx;
2523        int ret;
2524
2525        spin_lock_irq(&q->db_lock);
2526        ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2527        if (ret)
2528                goto out;
2529        if (q->db_pidx != hw_pidx) {
2530                u16 delta;
2531                u32 val;
2532
2533                if (q->db_pidx >= hw_pidx)
2534                        delta = q->db_pidx - hw_pidx;
2535                else
2536                        delta = q->size - hw_pidx + q->db_pidx;
2537
2538                if (is_t4(adap->params.chip))
2539                        val = PIDX_V(delta);
2540                else
2541                        val = PIDX_T5_V(delta);
2542                wmb();
2543                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2544                             QID_V(q->cntxt_id) | val);
2545        }
2546out:
2547        q->db_disabled = 0;
2548        q->db_pidx_inc = 0;
2549        spin_unlock_irq(&q->db_lock);
2550        if (ret)
2551                CH_WARN(adap, "DB drop recovery failed.\n");
2552}
2553
2554static void recover_all_queues(struct adapter *adap)
2555{
2556        int i;
2557
2558        for_each_ethrxq(&adap->sge, i)
2559                sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2560        if (is_offload(adap)) {
2561                struct sge_uld_txq_info *txq_info =
2562                        adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2563                if (txq_info) {
2564                        for_each_ofldtxq(&adap->sge, i) {
2565                                struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2566
2567                                sync_txq_pidx(adap, &txq->q);
2568                        }
2569                }
2570        }
2571        for_each_port(adap, i)
2572                sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2573}
2574
2575static void process_db_drop(struct work_struct *work)
2576{
2577        struct adapter *adap;
2578
2579        adap = container_of(work, struct adapter, db_drop_task);
2580
2581        if (is_t4(adap->params.chip)) {
2582                drain_db_fifo(adap, dbfifo_drain_delay);
2583                notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2584                drain_db_fifo(adap, dbfifo_drain_delay);
2585                recover_all_queues(adap);
2586                drain_db_fifo(adap, dbfifo_drain_delay);
2587                enable_dbs(adap);
2588                notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2589        } else if (is_t5(adap->params.chip)) {
2590                u32 dropped_db = t4_read_reg(adap, 0x010ac);
2591                u16 qid = (dropped_db >> 15) & 0x1ffff;
2592                u16 pidx_inc = dropped_db & 0x1fff;
2593                u64 bar2_qoffset;
2594                unsigned int bar2_qid;
2595                int ret;
2596
2597                ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2598                                        0, &bar2_qoffset, &bar2_qid);
2599                if (ret)
2600                        dev_err(adap->pdev_dev, "doorbell drop recovery: "
2601                                "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2602                else
2603                        writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2604                               adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2605
2606                /* Re-enable BAR2 WC */
2607                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2608        }
2609
2610        if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2611                t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2612}
2613
2614void t4_db_full(struct adapter *adap)
2615{
2616        if (is_t4(adap->params.chip)) {
2617                disable_dbs(adap);
2618                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619                t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2620                                 DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2621                queue_work(adap->workq, &adap->db_full_task);
2622        }
2623}
2624
2625void t4_db_dropped(struct adapter *adap)
2626{
2627        if (is_t4(adap->params.chip)) {
2628                disable_dbs(adap);
2629                notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2630        }
2631        queue_work(adap->workq, &adap->db_drop_task);
2632}
2633
2634void t4_register_netevent_notifier(void)
2635{
2636        if (!netevent_registered) {
2637                register_netevent_notifier(&cxgb4_netevent_nb);
2638                netevent_registered = true;
2639        }
2640}
2641
2642static void detach_ulds(struct adapter *adap)
2643{
2644        unsigned int i;
2645
2646        if (!is_uld(adap))
2647                return;
2648
2649        mutex_lock(&uld_mutex);
2650        list_del(&adap->list_node);
2651
2652        for (i = 0; i < CXGB4_ULD_MAX; i++)
2653                if (adap->uld && adap->uld[i].handle)
2654                        adap->uld[i].state_change(adap->uld[i].handle,
2655                                             CXGB4_STATE_DETACH);
2656
2657        if (netevent_registered && list_empty(&adapter_list)) {
2658                unregister_netevent_notifier(&cxgb4_netevent_nb);
2659                netevent_registered = false;
2660        }
2661        mutex_unlock(&uld_mutex);
2662}
2663
2664static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2665{
2666        unsigned int i;
2667
2668        mutex_lock(&uld_mutex);
2669        for (i = 0; i < CXGB4_ULD_MAX; i++)
2670                if (adap->uld && adap->uld[i].handle)
2671                        adap->uld[i].state_change(adap->uld[i].handle,
2672                                                  new_state);
2673        mutex_unlock(&uld_mutex);
2674}
2675
2676#if IS_ENABLED(CONFIG_IPV6)
2677static int cxgb4_inet6addr_handler(struct notifier_block *this,
2678                                   unsigned long event, void *data)
2679{
2680        struct inet6_ifaddr *ifa = data;
2681        struct net_device *event_dev = ifa->idev->dev;
2682        const struct device *parent = NULL;
2683#if IS_ENABLED(CONFIG_BONDING)
2684        struct adapter *adap;
2685#endif
2686        if (is_vlan_dev(event_dev))
2687                event_dev = vlan_dev_real_dev(event_dev);
2688#if IS_ENABLED(CONFIG_BONDING)
2689        if (event_dev->flags & IFF_MASTER) {
2690                list_for_each_entry(adap, &adapter_list, list_node) {
2691                        switch (event) {
2692                        case NETDEV_UP:
2693                                cxgb4_clip_get(adap->port[0],
2694                                               (const u32 *)ifa, 1);
2695                                break;
2696                        case NETDEV_DOWN:
2697                                cxgb4_clip_release(adap->port[0],
2698                                                   (const u32 *)ifa, 1);
2699                                break;
2700                        default:
2701                                break;
2702                        }
2703                }
2704                return NOTIFY_OK;
2705        }
2706#endif
2707
2708        if (event_dev)
2709                parent = event_dev->dev.parent;
2710
2711        if (parent && parent->driver == &cxgb4_driver.driver) {
2712                switch (event) {
2713                case NETDEV_UP:
2714                        cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2715                        break;
2716                case NETDEV_DOWN:
2717                        cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2718                        break;
2719                default:
2720                        break;
2721                }
2722        }
2723        return NOTIFY_OK;
2724}
2725
2726static bool inet6addr_registered;
2727static struct notifier_block cxgb4_inet6addr_notifier = {
2728        .notifier_call = cxgb4_inet6addr_handler
2729};
2730
2731static void update_clip(const struct adapter *adap)
2732{
2733        int i;
2734        struct net_device *dev;
2735        int ret;
2736
2737        rcu_read_lock();
2738
2739        for (i = 0; i < MAX_NPORTS; i++) {
2740                dev = adap->port[i];
2741                ret = 0;
2742
2743                if (dev)
2744                        ret = cxgb4_update_root_dev_clip(dev);
2745
2746                if (ret < 0)
2747                        break;
2748        }
2749        rcu_read_unlock();
2750}
2751#endif /* IS_ENABLED(CONFIG_IPV6) */
2752
2753/**
2754 *      cxgb_up - enable the adapter
2755 *      @adap: adapter being enabled
2756 *
2757 *      Called when the first port is enabled, this function performs the
2758 *      actions necessary to make an adapter operational, such as completing
2759 *      the initialization of HW modules, and enabling interrupts.
2760 *
2761 *      Must be called with the rtnl lock held.
2762 */
2763static int cxgb_up(struct adapter *adap)
2764{
2765        struct sge *s = &adap->sge;
2766        int err;
2767
2768        mutex_lock(&uld_mutex);
2769        err = setup_sge_queues(adap);
2770        if (err)
2771                goto rel_lock;
2772        err = setup_rss(adap);
2773        if (err)
2774                goto freeq;
2775
2776        if (adap->flags & CXGB4_USING_MSIX) {
2777                if (s->nd_msix_idx < 0) {
2778                        err = -ENOMEM;
2779                        goto irq_err;
2780                }
2781
2782                err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2783                                  t4_nondata_intr, 0,
2784                                  adap->msix_info[s->nd_msix_idx].desc, adap);
2785                if (err)
2786                        goto irq_err;
2787
2788                err = request_msix_queue_irqs(adap);
2789                if (err)
2790                        goto irq_err_free_nd_msix;
2791        } else {
2792                err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2793                                  (adap->flags & CXGB4_USING_MSI) ? 0
2794                                                                  : IRQF_SHARED,
2795                                  adap->port[0]->name, adap);
2796                if (err)
2797                        goto irq_err;
2798        }
2799
2800        enable_rx(adap);
2801        t4_sge_start(adap);
2802        t4_intr_enable(adap);
2803        adap->flags |= CXGB4_FULL_INIT_DONE;
2804        mutex_unlock(&uld_mutex);
2805
2806        notify_ulds(adap, CXGB4_STATE_UP);
2807#if IS_ENABLED(CONFIG_IPV6)
2808        update_clip(adap);
2809#endif
2810        return err;
2811
2812irq_err_free_nd_msix:
2813        free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2814irq_err:
2815        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2816freeq:
2817        t4_free_sge_resources(adap);
2818rel_lock:
2819        mutex_unlock(&uld_mutex);
2820        return err;
2821}
2822
2823static void cxgb_down(struct adapter *adapter)
2824{
2825        cancel_work_sync(&adapter->tid_release_task);
2826        cancel_work_sync(&adapter->db_full_task);
2827        cancel_work_sync(&adapter->db_drop_task);
2828        adapter->tid_release_task_busy = false;
2829        adapter->tid_release_head = NULL;
2830
2831        t4_sge_stop(adapter);
2832        t4_free_sge_resources(adapter);
2833
2834        adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2835}
2836
2837/*
2838 * net_device operations
2839 */
2840static int cxgb_open(struct net_device *dev)
2841{
2842        struct port_info *pi = netdev_priv(dev);
2843        struct adapter *adapter = pi->adapter;
2844        int err;
2845
2846        netif_carrier_off(dev);
2847
2848        if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2849                err = cxgb_up(adapter);
2850                if (err < 0)
2851                        return err;
2852        }
2853
2854        /* It's possible that the basic port information could have
2855         * changed since we first read it.
2856         */
2857        err = t4_update_port_info(pi);
2858        if (err < 0)
2859                return err;
2860
2861        err = link_start(dev);
2862        if (err)
2863                return err;
2864
2865        if (pi->nmirrorqsets) {
2866                mutex_lock(&pi->vi_mirror_mutex);
2867                err = cxgb4_port_mirror_alloc_queues(dev);
2868                if (err)
2869                        goto out_unlock;
2870
2871                err = cxgb4_port_mirror_start(dev);
2872                if (err)
2873                        goto out_free_queues;
2874                mutex_unlock(&pi->vi_mirror_mutex);
2875        }
2876
2877        netif_tx_start_all_queues(dev);
2878        return 0;
2879
2880out_free_queues:
2881        cxgb4_port_mirror_free_queues(dev);
2882
2883out_unlock:
2884        mutex_unlock(&pi->vi_mirror_mutex);
2885        return err;
2886}
2887
2888static int cxgb_close(struct net_device *dev)
2889{
2890        struct port_info *pi = netdev_priv(dev);
2891        struct adapter *adapter = pi->adapter;
2892        int ret;
2893
2894        netif_tx_stop_all_queues(dev);
2895        netif_carrier_off(dev);
2896        ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2897                                  false, false, false);
2898#ifdef CONFIG_CHELSIO_T4_DCB
2899        cxgb4_dcb_reset(dev);
2900        dcb_tx_queue_prio_enable(dev, false);
2901#endif
2902        if (ret)
2903                return ret;
2904
2905        if (pi->nmirrorqsets) {
2906                mutex_lock(&pi->vi_mirror_mutex);
2907                cxgb4_port_mirror_stop(dev);
2908                cxgb4_port_mirror_free_queues(dev);
2909                mutex_unlock(&pi->vi_mirror_mutex);
2910        }
2911
2912        return 0;
2913}
2914
2915int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2916                __be32 sip, __be16 sport, __be16 vlan,
2917                unsigned int queue, unsigned char port, unsigned char mask)
2918{
2919        int ret;
2920        struct filter_entry *f;
2921        struct adapter *adap;
2922        int i;
2923        u8 *val;
2924
2925        adap = netdev2adap(dev);
2926
2927        /* Adjust stid to correct filter index */
2928        stid -= adap->tids.sftid_base;
2929        stid += adap->tids.nftids;
2930
2931        /* Check to make sure the filter requested is writable ...
2932         */
2933        f = &adap->tids.ftid_tab[stid];
2934        ret = writable_filter(f);
2935        if (ret)
2936                return ret;
2937
2938        /* Clear out any old resources being used by the filter before
2939         * we start constructing the new filter.
2940         */
2941        if (f->valid)
2942                clear_filter(adap, f);
2943
2944        /* Clear out filter specifications */
2945        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2946        f->fs.val.lport = be16_to_cpu(sport);
2947        f->fs.mask.lport  = ~0;
2948        val = (u8 *)&sip;
2949        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2950                for (i = 0; i < 4; i++) {
2951                        f->fs.val.lip[i] = val[i];
2952                        f->fs.mask.lip[i] = ~0;
2953                }
2954                if (adap->params.tp.vlan_pri_map & PORT_F) {
2955                        f->fs.val.iport = port;
2956                        f->fs.mask.iport = mask;
2957                }
2958        }
2959
2960        if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2961                f->fs.val.proto = IPPROTO_TCP;
2962                f->fs.mask.proto = ~0;
2963        }
2964
2965        f->fs.dirsteer = 1;
2966        f->fs.iq = queue;
2967        /* Mark filter as locked */
2968        f->locked = 1;
2969        f->fs.rpttid = 1;
2970
2971        /* Save the actual tid. We need this to get the corresponding
2972         * filter entry structure in filter_rpl.
2973         */
2974        f->tid = stid + adap->tids.ftid_base;
2975        ret = set_filter_wr(adap, stid);
2976        if (ret) {
2977                clear_filter(adap, f);
2978                return ret;
2979        }
2980
2981        return 0;
2982}
2983EXPORT_SYMBOL(cxgb4_create_server_filter);
2984
2985int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2986                unsigned int queue, bool ipv6)
2987{
2988        struct filter_entry *f;
2989        struct adapter *adap;
2990
2991        adap = netdev2adap(dev);
2992
2993        /* Adjust stid to correct filter index */
2994        stid -= adap->tids.sftid_base;
2995        stid += adap->tids.nftids;
2996
2997        f = &adap->tids.ftid_tab[stid];
2998        /* Unlock the filter */
2999        f->locked = 0;
3000
3001        return delete_filter(adap, stid);
3002}
3003EXPORT_SYMBOL(cxgb4_remove_server_filter);
3004
3005static void cxgb_get_stats(struct net_device *dev,
3006                           struct rtnl_link_stats64 *ns)
3007{
3008        struct port_stats stats;
3009        struct port_info *p = netdev_priv(dev);
3010        struct adapter *adapter = p->adapter;
3011
3012        /* Block retrieving statistics during EEH error
3013         * recovery. Otherwise, the recovery might fail
3014         * and the PCI device will be removed permanently
3015         */
3016        spin_lock(&adapter->stats_lock);
3017        if (!netif_device_present(dev)) {
3018                spin_unlock(&adapter->stats_lock);
3019                return;
3020        }
3021        t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3022                                 &p->stats_base);
3023        spin_unlock(&adapter->stats_lock);
3024
3025        ns->tx_bytes   = stats.tx_octets;
3026        ns->tx_packets = stats.tx_frames;
3027        ns->rx_bytes   = stats.rx_octets;
3028        ns->rx_packets = stats.rx_frames;
3029        ns->multicast  = stats.rx_mcast_frames;
3030
3031        /* detailed rx_errors */
3032        ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3033                               stats.rx_runt;
3034        ns->rx_over_errors   = 0;
3035        ns->rx_crc_errors    = stats.rx_fcs_err;
3036        ns->rx_frame_errors  = stats.rx_symbol_err;
3037        ns->rx_dropped       = stats.rx_ovflow0 + stats.rx_ovflow1 +
3038                               stats.rx_ovflow2 + stats.rx_ovflow3 +
3039                               stats.rx_trunc0 + stats.rx_trunc1 +
3040                               stats.rx_trunc2 + stats.rx_trunc3;
3041        ns->rx_missed_errors = 0;
3042
3043        /* detailed tx_errors */
3044        ns->tx_aborted_errors   = 0;
3045        ns->tx_carrier_errors   = 0;
3046        ns->tx_fifo_errors      = 0;
3047        ns->tx_heartbeat_errors = 0;
3048        ns->tx_window_errors    = 0;
3049
3050        ns->tx_errors = stats.tx_error_frames;
3051        ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3052                ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3053}
3054
3055static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3056{
3057        unsigned int mbox;
3058        int ret = 0, prtad, devad;
3059        struct port_info *pi = netdev_priv(dev);
3060        struct adapter *adapter = pi->adapter;
3061        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3062
3063        switch (cmd) {
3064        case SIOCGMIIPHY:
3065                if (pi->mdio_addr < 0)
3066                        return -EOPNOTSUPP;
3067                data->phy_id = pi->mdio_addr;
3068                break;
3069        case SIOCGMIIREG:
3070        case SIOCSMIIREG:
3071                if (mdio_phy_id_is_c45(data->phy_id)) {
3072                        prtad = mdio_phy_id_prtad(data->phy_id);
3073                        devad = mdio_phy_id_devad(data->phy_id);
3074                } else if (data->phy_id < 32) {
3075                        prtad = data->phy_id;
3076                        devad = 0;
3077                        data->reg_num &= 0x1f;
3078                } else
3079                        return -EINVAL;
3080
3081                mbox = pi->adapter->pf;
3082                if (cmd == SIOCGMIIREG)
3083                        ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3084                                         data->reg_num, &data->val_out);
3085                else
3086                        ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3087                                         data->reg_num, data->val_in);
3088                break;
3089        case SIOCGHWTSTAMP:
3090                return copy_to_user(req->ifr_data, &pi->tstamp_config,
3091                                    sizeof(pi->tstamp_config)) ?
3092                        -EFAULT : 0;
3093        case SIOCSHWTSTAMP:
3094                if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3095                                   sizeof(pi->tstamp_config)))
3096                        return -EFAULT;
3097
3098                if (!is_t4(adapter->params.chip)) {
3099                        switch (pi->tstamp_config.tx_type) {
3100                        case HWTSTAMP_TX_OFF:
3101                        case HWTSTAMP_TX_ON:
3102                                break;
3103                        default:
3104                                return -ERANGE;
3105                        }
3106
3107                        switch (pi->tstamp_config.rx_filter) {
3108                        case HWTSTAMP_FILTER_NONE:
3109                                pi->rxtstamp = false;
3110                                break;
3111                        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3112                        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3113                                cxgb4_ptprx_timestamping(pi, pi->port_id,
3114                                                         PTP_TS_L4);
3115                                break;
3116                        case HWTSTAMP_FILTER_PTP_V2_EVENT:
3117                                cxgb4_ptprx_timestamping(pi, pi->port_id,
3118                                                         PTP_TS_L2_L4);
3119                                break;
3120                        case HWTSTAMP_FILTER_ALL:
3121                        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3122                        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3123                        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3124                        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3125                                pi->rxtstamp = true;
3126                                break;
3127                        default:
3128                                pi->tstamp_config.rx_filter =
3129                                        HWTSTAMP_FILTER_NONE;
3130                                return -ERANGE;
3131                        }
3132
3133                        if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3134                            (pi->tstamp_config.rx_filter ==
3135                                HWTSTAMP_FILTER_NONE)) {
3136                                if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3137                                        pi->ptp_enable = false;
3138                        }
3139
3140                        if (pi->tstamp_config.rx_filter !=
3141                                HWTSTAMP_FILTER_NONE) {
3142                                if (cxgb4_ptp_redirect_rx_packet(adapter,
3143                                                                 pi) >= 0)
3144                                        pi->ptp_enable = true;
3145                        }
3146                } else {
3147                        /* For T4 Adapters */
3148                        switch (pi->tstamp_config.rx_filter) {
3149                        case HWTSTAMP_FILTER_NONE:
3150                        pi->rxtstamp = false;
3151                        break;
3152                        case HWTSTAMP_FILTER_ALL:
3153                        pi->rxtstamp = true;
3154                        break;
3155                        default:
3156                        pi->tstamp_config.rx_filter =
3157                        HWTSTAMP_FILTER_NONE;
3158                        return -ERANGE;
3159                        }
3160                }
3161                return copy_to_user(req->ifr_data, &pi->tstamp_config,
3162                                    sizeof(pi->tstamp_config)) ?
3163                        -EFAULT : 0;
3164        default:
3165                return -EOPNOTSUPP;
3166        }
3167        return ret;
3168}
3169
3170static void cxgb_set_rxmode(struct net_device *dev)
3171{
3172        /* unfortunately we can't return errors to the stack */
3173        set_rxmode(dev, -1, false);
3174}
3175
3176static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3177{
3178        struct port_info *pi = netdev_priv(dev);
3179        int ret;
3180
3181        ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3182                            pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3183        if (!ret)
3184                dev->mtu = new_mtu;
3185        return ret;
3186}
3187
3188#ifdef CONFIG_PCI_IOV
3189static int cxgb4_mgmt_open(struct net_device *dev)
3190{
3191        /* Turn carrier off since we don't have to transmit anything on this
3192         * interface.
3193         */
3194        netif_carrier_off(dev);
3195        return 0;
3196}
3197
3198/* Fill MAC address that will be assigned by the FW */
3199static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3200{
3201        u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
3202        unsigned int i, vf, nvfs;
3203        u16 a, b;
3204        int err;
3205        u8 *na;
3206
3207        err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3208        if (err)
3209                return;
3210
3211        na = adap->params.vpd.na;
3212        for (i = 0; i < ETH_ALEN; i++)
3213                hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3214                              hex2val(na[2 * i + 1]));
3215
3216        a = (hw_addr[0] << 8) | hw_addr[1];
3217        b = (hw_addr[1] << 8) | hw_addr[2];
3218        a ^= b;
3219        a |= 0x0200;    /* locally assigned Ethernet MAC address */
3220        a &= ~0x0100;   /* not a multicast Ethernet MAC address */
3221        macaddr[0] = a >> 8;
3222        macaddr[1] = a & 0xff;
3223
3224        for (i = 2; i < 5; i++)
3225                macaddr[i] = hw_addr[i + 1];
3226
3227        for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3228                vf < nvfs; vf++) {
3229                macaddr[5] = adap->pf * nvfs + vf;
3230                ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3231        }
3232}
3233
3234static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3235{
3236        struct port_info *pi = netdev_priv(dev);
3237        struct adapter *adap = pi->adapter;
3238        int ret;
3239
3240        /* verify MAC addr is valid */
3241        if (!is_valid_ether_addr(mac)) {
3242                dev_err(pi->adapter->pdev_dev,
3243                        "Invalid Ethernet address %pM for VF %d\n",
3244                        mac, vf);
3245                return -EINVAL;
3246        }
3247
3248        dev_info(pi->adapter->pdev_dev,
3249                 "Setting MAC %pM on VF %d\n", mac, vf);
3250        ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3251        if (!ret)
3252                ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3253        return ret;
3254}
3255
3256static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3257                                    int vf, struct ifla_vf_info *ivi)
3258{
3259        struct port_info *pi = netdev_priv(dev);
3260        struct adapter *adap = pi->adapter;
3261        struct vf_info *vfinfo;
3262
3263        if (vf >= adap->num_vfs)
3264                return -EINVAL;
3265        vfinfo = &adap->vfinfo[vf];
3266
3267        ivi->vf = vf;
3268        ivi->max_tx_rate = vfinfo->tx_rate;
3269        ivi->min_tx_rate = 0;
3270        ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3271        ivi->vlan = vfinfo->vlan;
3272        ivi->linkstate = vfinfo->link_state;
3273        return 0;
3274}
3275
3276static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3277                                       struct netdev_phys_item_id *ppid)
3278{
3279        struct port_info *pi = netdev_priv(dev);
3280        unsigned int phy_port_id;
3281
3282        phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3283        ppid->id_len = sizeof(phy_port_id);
3284        memcpy(ppid->id, &phy_port_id, ppid->id_len);
3285        return 0;
3286}
3287
3288static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3289                                  int min_tx_rate, int max_tx_rate)
3290{
3291        struct port_info *pi = netdev_priv(dev);
3292        struct adapter *adap = pi->adapter;
3293        unsigned int link_ok, speed, mtu;
3294        u32 fw_pfvf, fw_class;
3295        int class_id = vf;
3296        int ret;
3297        u16 pktsize;
3298
3299        if (vf >= adap->num_vfs)
3300                return -EINVAL;
3301
3302        if (min_tx_rate) {
3303                dev_err(adap->pdev_dev,
3304                        "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3305                        min_tx_rate, vf);
3306                return -EINVAL;
3307        }
3308
3309        if (max_tx_rate == 0) {
3310                /* unbind VF to to any Traffic Class */
3311                fw_pfvf =
3312                    (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3313                     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3314                fw_class = 0xffffffff;
3315                ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3316                                    &fw_pfvf, &fw_class);
3317                if (ret) {
3318                        dev_err(adap->pdev_dev,
3319                                "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3320                                ret, adap->pf, vf);
3321                        return -EINVAL;
3322                }
3323                dev_info(adap->pdev_dev,
3324                         "PF %d VF %d is unbound from TX Rate Limiting\n",
3325                         adap->pf, vf);
3326                adap->vfinfo[vf].tx_rate = 0;
3327                return 0;
3328        }
3329
3330        ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3331        if (ret != FW_SUCCESS) {
3332                dev_err(adap->pdev_dev,
3333                        "Failed to get link information for VF %d\n", vf);
3334                return -EINVAL;
3335        }
3336
3337        if (!link_ok) {
3338                dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3339                return -EINVAL;
3340        }
3341
3342        if (max_tx_rate > speed) {
3343                dev_err(adap->pdev_dev,
3344                        "Max tx rate %d for VF %d can't be > link-speed %u",
3345                        max_tx_rate, vf, speed);
3346                return -EINVAL;
3347        }
3348
3349        pktsize = mtu;
3350        /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3351        pktsize = pktsize - sizeof(struct ethhdr) - 4;
3352        /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3353        pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3354        /* configure Traffic Class for rate-limiting */
3355        ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3356                              SCHED_CLASS_LEVEL_CL_RL,
3357                              SCHED_CLASS_MODE_CLASS,
3358                              SCHED_CLASS_RATEUNIT_BITS,
3359                              SCHED_CLASS_RATEMODE_ABS,
3360                              pi->tx_chan, class_id, 0,
3361                              max_tx_rate * 1000, 0, pktsize, 0);
3362        if (ret) {
3363                dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3364                        ret);
3365                return -EINVAL;
3366        }
3367        dev_info(adap->pdev_dev,
3368                 "Class %d with MSS %u configured with rate %u\n",
3369                 class_id, pktsize, max_tx_rate);
3370
3371        /* bind VF to configured Traffic Class */
3372        fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3373                   FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3374        fw_class = class_id;
3375        ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3376                            &fw_class);
3377        if (ret) {
3378                dev_err(adap->pdev_dev,
3379                        "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3380                        ret, adap->pf, vf, class_id);
3381                return -EINVAL;
3382        }
3383        dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3384                 adap->pf, vf, class_id);
3385        adap->vfinfo[vf].tx_rate = max_tx_rate;
3386        return 0;
3387}
3388
3389static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3390                                  u16 vlan, u8 qos, __be16 vlan_proto)
3391{
3392        struct port_info *pi = netdev_priv(dev);
3393        struct adapter *adap = pi->adapter;
3394        int ret;
3395
3396        if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3397                return -EINVAL;
3398
3399        if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3400                return -EPROTONOSUPPORT;
3401
3402        ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3403        if (!ret) {
3404                adap->vfinfo[vf].vlan = vlan;
3405                return 0;
3406        }
3407
3408        dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3409                ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3410        return ret;
3411}
3412
3413static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3414                                        int link)
3415{
3416        struct port_info *pi = netdev_priv(dev);
3417        struct adapter *adap = pi->adapter;
3418        u32 param, val;
3419        int ret = 0;
3420
3421        if (vf >= adap->num_vfs)
3422                return -EINVAL;
3423
3424        switch (link) {
3425        case IFLA_VF_LINK_STATE_AUTO:
3426                val = FW_VF_LINK_STATE_AUTO;
3427                break;
3428
3429        case IFLA_VF_LINK_STATE_ENABLE:
3430                val = FW_VF_LINK_STATE_ENABLE;
3431                break;
3432
3433        case IFLA_VF_LINK_STATE_DISABLE:
3434                val = FW_VF_LINK_STATE_DISABLE;
3435                break;
3436
3437        default:
3438                return -EINVAL;
3439        }
3440
3441        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3442                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3443        ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3444                            &param, &val);
3445        if (ret) {
3446                dev_err(adap->pdev_dev,
3447                        "Error %d in setting PF %d VF %d link state\n",
3448                        ret, adap->pf, vf);
3449                return -EINVAL;
3450        }
3451
3452        adap->vfinfo[vf].link_state = link;
3453        return ret;
3454}
3455#endif /* CONFIG_PCI_IOV */
3456
3457static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3458{
3459        int ret;
3460        struct sockaddr *addr = p;
3461        struct port_info *pi = netdev_priv(dev);
3462
3463        if (!is_valid_ether_addr(addr->sa_data))
3464                return -EADDRNOTAVAIL;
3465
3466        ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3467                                    addr->sa_data, true, &pi->smt_idx);
3468        if (ret < 0)
3469                return ret;
3470
3471        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3472        return 0;
3473}
3474
3475#ifdef CONFIG_NET_POLL_CONTROLLER
3476static void cxgb_netpoll(struct net_device *dev)
3477{
3478        struct port_info *pi = netdev_priv(dev);
3479        struct adapter *adap = pi->adapter;
3480
3481        if (adap->flags & CXGB4_USING_MSIX) {
3482                int i;
3483                struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3484
3485                for (i = pi->nqsets; i; i--, rx++)
3486                        t4_sge_intr_msix(0, &rx->rspq);
3487        } else
3488                t4_intr_handler(adap)(0, adap);
3489}
3490#endif
3491
3492static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3493{
3494        struct port_info *pi = netdev_priv(dev);
3495        struct adapter *adap = pi->adapter;
3496        struct ch_sched_queue qe = { 0 };
3497        struct ch_sched_params p = { 0 };
3498        struct sched_class *e;
3499        u32 req_rate;
3500        int err = 0;
3501
3502        if (!can_sched(dev))
3503                return -ENOTSUPP;
3504
3505        if (index < 0 || index > pi->nqsets - 1)
3506                return -EINVAL;
3507
3508        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3509                dev_err(adap->pdev_dev,
3510                        "Failed to rate limit on queue %d. Link Down?\n",
3511                        index);
3512                return -EINVAL;
3513        }
3514
3515        qe.queue = index;
3516        e = cxgb4_sched_queue_lookup(dev, &qe);
3517        if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3518                dev_err(adap->pdev_dev,
3519                        "Queue %u already bound to class %u of type: %u\n",
3520                        index, e->idx, e->info.u.params.level);
3521                return -EBUSY;
3522        }
3523
3524        /* Convert from Mbps to Kbps */
3525        req_rate = rate * 1000;
3526
3527        /* Max rate is 100 Gbps */
3528        if (req_rate > SCHED_MAX_RATE_KBPS) {
3529                dev_err(adap->pdev_dev,
3530                        "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3531                        rate, SCHED_MAX_RATE_KBPS / 1000);
3532                return -ERANGE;
3533        }
3534
3535        /* First unbind the queue from any existing class */
3536        memset(&qe, 0, sizeof(qe));
3537        qe.queue = index;
3538        qe.class = SCHED_CLS_NONE;
3539
3540        err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3541        if (err) {
3542                dev_err(adap->pdev_dev,
3543                        "Unbinding Queue %d on port %d fail. Err: %d\n",
3544                        index, pi->port_id, err);
3545                return err;
3546        }
3547
3548        /* Queue already unbound */
3549        if (!req_rate)
3550                return 0;
3551
3552        /* Fetch any available unused or matching scheduling class */
3553        p.type = SCHED_CLASS_TYPE_PACKET;
3554        p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
3555        p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
3556        p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3557        p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3558        p.u.params.channel  = pi->tx_chan;
3559        p.u.params.class    = SCHED_CLS_NONE;
3560        p.u.params.minrate  = 0;
3561        p.u.params.maxrate  = req_rate;
3562        p.u.params.weight   = 0;
3563        p.u.params.pktsize  = dev->mtu;
3564
3565        e = cxgb4_sched_class_alloc(dev, &p);
3566        if (!e)
3567                return -ENOMEM;
3568
3569        /* Bind the queue to a scheduling class */
3570        memset(&qe, 0, sizeof(qe));
3571        qe.queue = index;
3572        qe.class = e->idx;
3573
3574        err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3575        if (err)
3576                dev_err(adap->pdev_dev,
3577                        "Queue rate limiting failed. Err: %d\n", err);
3578        return err;
3579}
3580
3581static int cxgb_setup_tc_flower(struct net_device *dev,
3582                                struct flow_cls_offload *cls_flower)
3583{
3584        switch (cls_flower->command) {
3585        case FLOW_CLS_REPLACE:
3586                return cxgb4_tc_flower_replace(dev, cls_flower);
3587        case FLOW_CLS_DESTROY:
3588                return cxgb4_tc_flower_destroy(dev, cls_flower);
3589        case FLOW_CLS_STATS:
3590                return cxgb4_tc_flower_stats(dev, cls_flower);
3591        default:
3592                return -EOPNOTSUPP;
3593        }
3594}
3595
3596static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3597                                 struct tc_cls_u32_offload *cls_u32)
3598{
3599        switch (cls_u32->command) {
3600        case TC_CLSU32_NEW_KNODE:
3601        case TC_CLSU32_REPLACE_KNODE:
3602                return cxgb4_config_knode(dev, cls_u32);
3603        case TC_CLSU32_DELETE_KNODE:
3604                return cxgb4_delete_knode(dev, cls_u32);
3605        default:
3606                return -EOPNOTSUPP;
3607        }
3608}
3609
3610static int cxgb_setup_tc_matchall(struct net_device *dev,
3611                                  struct tc_cls_matchall_offload *cls_matchall,
3612                                  bool ingress)
3613{
3614        struct adapter *adap = netdev2adap(dev);
3615
3616        if (!adap->tc_matchall)
3617                return -ENOMEM;
3618
3619        switch (cls_matchall->command) {
3620        case TC_CLSMATCHALL_REPLACE:
3621                return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3622        case TC_CLSMATCHALL_DESTROY:
3623                return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3624        case TC_CLSMATCHALL_STATS:
3625                if (ingress)
3626                        return cxgb4_tc_matchall_stats(dev, cls_matchall);
3627                break;
3628        default:
3629                break;
3630        }
3631
3632        return -EOPNOTSUPP;
3633}
3634
3635static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3636                                          void *type_data, void *cb_priv)
3637{
3638        struct net_device *dev = cb_priv;
3639        struct port_info *pi = netdev2pinfo(dev);
3640        struct adapter *adap = netdev2adap(dev);
3641
3642        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3643                dev_err(adap->pdev_dev,
3644                        "Failed to setup tc on port %d. Link Down?\n",
3645                        pi->port_id);
3646                return -EINVAL;
3647        }
3648
3649        if (!tc_cls_can_offload_and_chain0(dev, type_data))
3650                return -EOPNOTSUPP;
3651
3652        switch (type) {
3653        case TC_SETUP_CLSU32:
3654                return cxgb_setup_tc_cls_u32(dev, type_data);
3655        case TC_SETUP_CLSFLOWER:
3656                return cxgb_setup_tc_flower(dev, type_data);
3657        case TC_SETUP_CLSMATCHALL:
3658                return cxgb_setup_tc_matchall(dev, type_data, true);
3659        default:
3660                return -EOPNOTSUPP;
3661        }
3662}
3663
3664static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3665                                         void *type_data, void *cb_priv)
3666{
3667        struct net_device *dev = cb_priv;
3668        struct port_info *pi = netdev2pinfo(dev);
3669        struct adapter *adap = netdev2adap(dev);
3670
3671        if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3672                dev_err(adap->pdev_dev,
3673                        "Failed to setup tc on port %d. Link Down?\n",
3674                        pi->port_id);
3675                return -EINVAL;
3676        }
3677
3678        if (!tc_cls_can_offload_and_chain0(dev, type_data))
3679                return -EOPNOTSUPP;
3680
3681        switch (type) {
3682        case TC_SETUP_CLSMATCHALL:
3683                return cxgb_setup_tc_matchall(dev, type_data, false);
3684        default:
3685                break;
3686        }
3687
3688        return -EOPNOTSUPP;
3689}
3690
3691static int cxgb_setup_tc_mqprio(struct net_device *dev,
3692                                struct tc_mqprio_qopt_offload *mqprio)
3693{
3694        struct adapter *adap = netdev2adap(dev);
3695
3696        if (!is_ethofld(adap) || !adap->tc_mqprio)
3697                return -ENOMEM;
3698
3699        return cxgb4_setup_tc_mqprio(dev, mqprio);
3700}
3701
3702static LIST_HEAD(cxgb_block_cb_list);
3703
3704static int cxgb_setup_tc_block(struct net_device *dev,
3705                               struct flow_block_offload *f)
3706{
3707        struct port_info *pi = netdev_priv(dev);
3708        flow_setup_cb_t *cb;
3709        bool ingress_only;
3710
3711        pi->tc_block_shared = f->block_shared;
3712        if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3713                cb = cxgb_setup_tc_block_egress_cb;
3714                ingress_only = false;
3715        } else {
3716                cb = cxgb_setup_tc_block_ingress_cb;
3717                ingress_only = true;
3718        }
3719
3720        return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3721                                          cb, pi, dev, ingress_only);
3722}
3723
3724static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3725                         void *type_data)
3726{
3727        switch (type) {
3728        case TC_SETUP_QDISC_MQPRIO:
3729                return cxgb_setup_tc_mqprio(dev, type_data);
3730        case TC_SETUP_BLOCK:
3731                return cxgb_setup_tc_block(dev, type_data);
3732        default:
3733                return -EOPNOTSUPP;
3734        }
3735}
3736
3737static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3738                                      unsigned int table, unsigned int entry,
3739                                      struct udp_tunnel_info *ti)
3740{
3741        struct port_info *pi = netdev_priv(netdev);
3742        struct adapter *adapter = pi->adapter;
3743        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3744        int ret = 0, i;
3745
3746        switch (ti->type) {
3747        case UDP_TUNNEL_TYPE_VXLAN:
3748                adapter->vxlan_port = 0;
3749                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3750                break;
3751        case UDP_TUNNEL_TYPE_GENEVE:
3752                adapter->geneve_port = 0;
3753                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3754                break;
3755        default:
3756                return -EINVAL;
3757        }
3758
3759        /* Matchall mac entries can be deleted only after all tunnel ports
3760         * are brought down or removed.
3761         */
3762        if (!adapter->rawf_cnt)
3763                return 0;
3764        for_each_port(adapter, i) {
3765                pi = adap2pinfo(adapter, i);
3766                ret = t4_free_raw_mac_filt(adapter, pi->viid,
3767                                           match_all_mac, match_all_mac,
3768                                           adapter->rawf_start + pi->port_id,
3769                                           1, pi->port_id, false);
3770                if (ret < 0) {
3771                        netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3772                                    i);
3773                        return ret;
3774                }
3775        }
3776
3777        return 0;
3778}
3779
3780static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3781                                    unsigned int table, unsigned int entry,
3782                                    struct udp_tunnel_info *ti)
3783{
3784        struct port_info *pi = netdev_priv(netdev);
3785        struct adapter *adapter = pi->adapter;
3786        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3787        int i, ret;
3788
3789        switch (ti->type) {
3790        case UDP_TUNNEL_TYPE_VXLAN:
3791                adapter->vxlan_port = ti->port;
3792                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3793                             VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3794                break;
3795        case UDP_TUNNEL_TYPE_GENEVE:
3796                adapter->geneve_port = ti->port;
3797                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3798                             GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3799                break;
3800        default:
3801                return -EINVAL;
3802        }
3803
3804        /* Create a 'match all' mac filter entry for inner mac,
3805         * if raw mac interface is supported. Once the linux kernel provides
3806         * driver entry points for adding/deleting the inner mac addresses,
3807         * we will remove this 'match all' entry and fallback to adding
3808         * exact match filters.
3809         */
3810        for_each_port(adapter, i) {
3811                pi = adap2pinfo(adapter, i);
3812
3813                ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3814                                            match_all_mac,
3815                                            match_all_mac,
3816                                            adapter->rawf_start + pi->port_id,
3817                                            1, pi->port_id, false);
3818                if (ret < 0) {
3819                        netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3820                                    be16_to_cpu(ti->port));
3821                        return ret;
3822                }
3823        }
3824
3825        return 0;
3826}
3827
3828static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3829        .set_port       = cxgb_udp_tunnel_set_port,
3830        .unset_port     = cxgb_udp_tunnel_unset_port,
3831        .tables         = {
3832                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
3833                { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3834        },
3835};
3836
3837static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3838                                             struct net_device *dev,
3839                                             netdev_features_t features)
3840{
3841        struct port_info *pi = netdev_priv(dev);
3842        struct adapter *adapter = pi->adapter;
3843
3844        if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3845                return features;
3846
3847        /* Check if hw supports offload for this packet */
3848        if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3849                return features;
3850
3851        /* Offload is not supported for this encapsulated packet */
3852        return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3853}
3854
3855static netdev_features_t cxgb_fix_features(struct net_device *dev,
3856                                           netdev_features_t features)
3857{
3858        /* Disable GRO, if RX_CSUM is disabled */
3859        if (!(features & NETIF_F_RXCSUM))
3860                features &= ~NETIF_F_GRO;
3861
3862        return features;
3863}
3864
3865static const struct net_device_ops cxgb4_netdev_ops = {
3866        .ndo_open             = cxgb_open,
3867        .ndo_stop             = cxgb_close,
3868        .ndo_start_xmit       = t4_start_xmit,
3869        .ndo_select_queue     = cxgb_select_queue,
3870        .ndo_get_stats64      = cxgb_get_stats,
3871        .ndo_set_rx_mode      = cxgb_set_rxmode,
3872        .ndo_set_mac_address  = cxgb_set_mac_addr,
3873        .ndo_set_features     = cxgb_set_features,
3874        .ndo_validate_addr    = eth_validate_addr,
3875        .ndo_do_ioctl         = cxgb_ioctl,
3876        .ndo_change_mtu       = cxgb_change_mtu,
3877#ifdef CONFIG_NET_POLL_CONTROLLER
3878        .ndo_poll_controller  = cxgb_netpoll,
3879#endif
3880#ifdef CONFIG_CHELSIO_T4_FCOE
3881        .ndo_fcoe_enable      = cxgb_fcoe_enable,
3882        .ndo_fcoe_disable     = cxgb_fcoe_disable,
3883#endif /* CONFIG_CHELSIO_T4_FCOE */
3884        .ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
3885        .ndo_setup_tc         = cxgb_setup_tc,
3886        .ndo_features_check   = cxgb_features_check,
3887        .ndo_fix_features     = cxgb_fix_features,
3888};
3889
3890#ifdef CONFIG_PCI_IOV
3891static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3892        .ndo_open               = cxgb4_mgmt_open,
3893        .ndo_set_vf_mac         = cxgb4_mgmt_set_vf_mac,
3894        .ndo_get_vf_config      = cxgb4_mgmt_get_vf_config,
3895        .ndo_set_vf_rate        = cxgb4_mgmt_set_vf_rate,
3896        .ndo_get_phys_port_id   = cxgb4_mgmt_get_phys_port_id,
3897        .ndo_set_vf_vlan        = cxgb4_mgmt_set_vf_vlan,
3898        .ndo_set_vf_link_state  = cxgb4_mgmt_set_vf_link_state,
3899};
3900
3901static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3902                                   struct ethtool_drvinfo *info)
3903{
3904        struct adapter *adapter = netdev2adap(dev);
3905
3906        strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3907        strlcpy(info->bus_info, pci_name(adapter->pdev),
3908                sizeof(info->bus_info));
3909}
3910
3911static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3912        .get_drvinfo       = cxgb4_mgmt_get_drvinfo,
3913};
3914#endif
3915
3916static void notify_fatal_err(struct work_struct *work)
3917{
3918        struct adapter *adap;
3919
3920        adap = container_of(work, struct adapter, fatal_err_notify_task);
3921        notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3922}
3923
3924void t4_fatal_err(struct adapter *adap)
3925{
3926        int port;
3927
3928        if (pci_channel_offline(adap->pdev))
3929                return;
3930
3931        /* Disable the SGE since ULDs are going to free resources that
3932         * could be exposed to the adapter.  RDMA MWs for example...
3933         */
3934        t4_shutdown_adapter(adap);
3935        for_each_port(adap, port) {
3936                struct net_device *dev = adap->port[port];
3937
3938                /* If we get here in very early initialization the network
3939                 * devices may not have been set up yet.
3940                 */
3941                if (!dev)
3942                        continue;
3943
3944                netif_tx_stop_all_queues(dev);
3945                netif_carrier_off(dev);
3946        }
3947        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3948        queue_work(adap->workq, &adap->fatal_err_notify_task);
3949}
3950
3951static void setup_memwin(struct adapter *adap)
3952{
3953        u32 nic_win_base = t4_get_util_window(adap);
3954
3955        t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3956}
3957
3958static void setup_memwin_rdma(struct adapter *adap)
3959{
3960        if (adap->vres.ocq.size) {
3961                u32 start;
3962                unsigned int sz_kb;
3963
3964                start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3965                start &= PCI_BASE_ADDRESS_MEM_MASK;
3966                start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3967                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3968                t4_write_reg(adap,
3969                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3970                             start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3971                t4_write_reg(adap,
3972                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3973                             adap->vres.ocq.start);
3974                t4_read_reg(adap,
3975                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3976        }
3977}
3978
3979/* HMA Definitions */
3980
3981/* The maximum number of address that can be send in a single FW cmd */
3982#define HMA_MAX_ADDR_IN_CMD     5
3983
3984#define HMA_PAGE_SIZE           PAGE_SIZE
3985
3986#define HMA_MAX_NO_FW_ADDRESS   (16 << 10)  /* FW supports 16K addresses */
3987
3988#define HMA_PAGE_ORDER                                  \
3989        ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ?      \
3990        ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3991
3992/* The minimum and maximum possible HMA sizes that can be specified in the FW
3993 * configuration(in units of MB).
3994 */
3995#define HMA_MIN_TOTAL_SIZE      1
3996#define HMA_MAX_TOTAL_SIZE                              \
3997        (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) *           \
3998          HMA_MAX_NO_FW_ADDRESS) >> 20)
3999
4000static void adap_free_hma_mem(struct adapter *adapter)
4001{
4002        struct scatterlist *iter;
4003        struct page *page;
4004        int i;
4005
4006        if (!adapter->hma.sgt)
4007                return;
4008
4009        if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4010                dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4011                             adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
4012                adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4013        }
4014
4015        for_each_sg(adapter->hma.sgt->sgl, iter,
4016                    adapter->hma.sgt->orig_nents, i) {
4017                page = sg_page(iter);
4018                if (page)
4019                        __free_pages(page, HMA_PAGE_ORDER);
4020        }
4021
4022        kfree(adapter->hma.phy_addr);
4023        sg_free_table(adapter->hma.sgt);
4024        kfree(adapter->hma.sgt);
4025        adapter->hma.sgt = NULL;
4026}
4027
4028static int adap_config_hma(struct adapter *adapter)
4029{
4030        struct scatterlist *sgl, *iter;
4031        struct sg_table *sgt;
4032        struct page *newpage;
4033        unsigned int i, j, k;
4034        u32 param, hma_size;
4035        unsigned int ncmds;
4036        size_t page_size;
4037        u32 page_order;
4038        int node, ret;
4039
4040        /* HMA is supported only for T6+ cards.
4041         * Avoid initializing HMA in kdump kernels.
4042         */
4043        if (is_kdump_kernel() ||
4044            CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4045                return 0;
4046
4047        /* Get the HMA region size required by fw */
4048        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4049                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
4050        ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4051                              1, &param, &hma_size);
4052        /* An error means card has its own memory or HMA is not supported by
4053         * the firmware. Return without any errors.
4054         */
4055        if (ret || !hma_size)
4056                return 0;
4057
4058        if (hma_size < HMA_MIN_TOTAL_SIZE ||
4059            hma_size > HMA_MAX_TOTAL_SIZE) {
4060                dev_err(adapter->pdev_dev,
4061                        "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4062                        hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
4063                return -EINVAL;
4064        }
4065
4066        page_size = HMA_PAGE_SIZE;
4067        page_order = HMA_PAGE_ORDER;
4068        adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4069        if (unlikely(!adapter->hma.sgt)) {
4070                dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4071                return -ENOMEM;
4072        }
4073        sgt = adapter->hma.sgt;
4074        /* FW returned value will be in MB's
4075         */
4076        sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4077        if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4078                dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4079                kfree(adapter->hma.sgt);
4080                adapter->hma.sgt = NULL;
4081                return -ENOMEM;
4082        }
4083
4084        sgl = adapter->hma.sgt->sgl;
4085        node = dev_to_node(adapter->pdev_dev);
4086        for_each_sg(sgl, iter, sgt->orig_nents, i) {
4087                newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
4088                                           __GFP_ZERO, page_order);
4089                if (!newpage) {
4090                        dev_err(adapter->pdev_dev,
4091                                "Not enough memory for HMA page allocation\n");
4092                        ret = -ENOMEM;
4093                        goto free_hma;
4094                }
4095                sg_set_page(iter, newpage, page_size << page_order, 0);
4096        }
4097
4098        sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4099                                DMA_BIDIRECTIONAL);
4100        if (!sgt->nents) {
4101                dev_err(adapter->pdev_dev,
4102                        "Not enough memory for HMA DMA mapping");
4103                ret = -ENOMEM;
4104                goto free_hma;
4105        }
4106        adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4107
4108        adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4109                                        GFP_KERNEL);
4110        if (unlikely(!adapter->hma.phy_addr))
4111                goto free_hma;
4112
4113        for_each_sg(sgl, iter, sgt->nents, i) {
4114                newpage = sg_page(iter);
4115                adapter->hma.phy_addr[i] = sg_dma_address(iter);
4116        }
4117
4118        ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4119        /* Pass on the addresses to firmware */
4120        for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
4121                struct fw_hma_cmd hma_cmd;
4122                u8 naddr = HMA_MAX_ADDR_IN_CMD;
4123                u8 soc = 0, eoc = 0;
4124                u8 hma_mode = 1; /* Presently we support only Page table mode */
4125
4126                soc = (i == 0) ? 1 : 0;
4127                eoc = (i == ncmds - 1) ? 1 : 0;
4128
4129                /* For last cmd, set naddr corresponding to remaining
4130                 * addresses
4131                 */
4132                if (i == ncmds - 1) {
4133                        naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4134                        naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
4135                }
4136                memset(&hma_cmd, 0, sizeof(hma_cmd));
4137                hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
4138                                       FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4139                hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
4140
4141                hma_cmd.mode_to_pcie_params =
4142                        htonl(FW_HMA_CMD_MODE_V(hma_mode) |
4143                              FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
4144
4145                /* HMA cmd size specified in MB's */
4146                hma_cmd.naddr_size =
4147                        htonl(FW_HMA_CMD_SIZE_V(hma_size) |
4148                              FW_HMA_CMD_NADDR_V(naddr));
4149
4150                /* Total Page size specified in units of 4K */
4151                hma_cmd.addr_size_pkd =
4152                        htonl(FW_HMA_CMD_ADDR_SIZE_V
4153                                ((page_size << page_order) >> 12));
4154
4155                /* Fill the 5 addresses */
4156                for (j = 0; j < naddr; j++) {
4157                        hma_cmd.phy_address[j] =
4158                                cpu_to_be64(adapter->hma.phy_addr[j + k]);
4159                }
4160                ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4161                                 sizeof(hma_cmd), &hma_cmd);
4162                if (ret) {
4163                        dev_err(adapter->pdev_dev,
4164                                "HMA FW command failed with err %d\n", ret);
4165                        goto free_hma;
4166                }
4167        }
4168
4169        if (!ret)
4170                dev_info(adapter->pdev_dev,
4171                         "Reserved %uMB host memory for HMA\n", hma_size);
4172        return ret;
4173
4174free_hma:
4175        adap_free_hma_mem(adapter);
4176        return ret;
4177}
4178
4179static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4180{
4181        u32 v;
4182        int ret;
4183
4184        /* Now that we've successfully configured and initialized the adapter
4185         * can ask the Firmware what resources it has provisioned for us.
4186         */
4187        ret = t4_get_pfres(adap);
4188        if (ret) {
4189                dev_err(adap->pdev_dev,
4190                        "Unable to retrieve resource provisioning information\n");
4191                return ret;
4192        }
4193
4194        /* get device capabilities */
4195        memset(c, 0, sizeof(*c));
4196        c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4197                               FW_CMD_REQUEST_F | FW_CMD_READ_F);
4198        c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4199        ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4200        if (ret < 0)
4201                return ret;
4202
4203        c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4204                               FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4205        ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4206        if (ret < 0)
4207                return ret;
4208
4209        ret = t4_config_glbl_rss(adap, adap->pf,
4210                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4211                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4212                                 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4213        if (ret < 0)
4214                return ret;
4215
4216        ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4217                          MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4218                          FW_CMD_CAP_PF);
4219        if (ret < 0)
4220                return ret;
4221
4222        t4_sge_init(adap);
4223
4224        /* tweak some settings */
4225        t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4226        t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4227        t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4228        v = t4_read_reg(adap, TP_PIO_DATA_A);
4229        t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4230
4231        /* first 4 Tx modulation queues point to consecutive Tx channels */
4232        adap->params.tp.tx_modq_map = 0xE4;
4233        t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4234                     TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4235
4236        /* associate each Tx modulation queue with consecutive Tx channels */
4237        v = 0x84218421;
4238        t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4239                          &v, 1, TP_TX_SCHED_HDR_A);
4240        t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4241                          &v, 1, TP_TX_SCHED_FIFO_A);
4242        t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4243                          &v, 1, TP_TX_SCHED_PCMD_A);
4244
4245#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4246        if (is_offload(adap)) {
4247                t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4248                             TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4249                             TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4250                             TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4251                             TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4252                t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4253                             TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4254                             TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4255                             TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4256                             TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4257        }
4258
4259        /* get basic stuff going */
4260        return t4_early_init(adap, adap->pf);
4261}
4262
4263/*
4264 * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4265 */
4266#define MAX_ATIDS 8192U
4267
4268/*
4269 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4270 *
4271 * If the firmware we're dealing with has Configuration File support, then
4272 * we use that to perform all configuration
4273 */
4274
4275/*
4276 * Tweak configuration based on module parameters, etc.  Most of these have
4277 * defaults assigned to them by Firmware Configuration Files (if we're using
4278 * them) but need to be explicitly set if we're using hard-coded
4279 * initialization.  But even in the case of using Firmware Configuration
4280 * Files, we'd like to expose the ability to change these via module
4281 * parameters so these are essentially common tweaks/settings for
4282 * Configuration Files and hard-coded initialization ...
4283 */
4284static int adap_init0_tweaks(struct adapter *adapter)
4285{
4286        /*
4287         * Fix up various Host-Dependent Parameters like Page Size, Cache
4288         * Line Size, etc.  The firmware default is for a 4KB Page Size and
4289         * 64B Cache Line Size ...
4290         */
4291        t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4292
4293        /*
4294         * Process module parameters which affect early initialization.
4295         */
4296        if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4297                dev_err(&adapter->pdev->dev,
4298                        "Ignoring illegal rx_dma_offset=%d, using 2\n",
4299                        rx_dma_offset);
4300                rx_dma_offset = 2;
4301        }
4302        t4_set_reg_field(adapter, SGE_CONTROL_A,
4303                         PKTSHIFT_V(PKTSHIFT_M),
4304                         PKTSHIFT_V(rx_dma_offset));
4305
4306        /*
4307         * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4308         * adds the pseudo header itself.
4309         */
4310        t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4311                               CSUM_HAS_PSEUDO_HDR_F, 0);
4312
4313        return 0;
4314}
4315
4316/* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4317 * unto themselves and they contain their own firmware to perform their
4318 * tasks ...
4319 */
4320static int phy_aq1202_version(const u8 *phy_fw_data,
4321                              size_t phy_fw_size)
4322{
4323        int offset;
4324
4325        /* At offset 0x8 you're looking for the primary image's
4326         * starting offset which is 3 Bytes wide
4327         *
4328         * At offset 0xa of the primary image, you look for the offset
4329         * of the DRAM segment which is 3 Bytes wide.
4330         *
4331         * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
4332         * wide
4333         */
4334        #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4335        #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4336        #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4337
4338        offset = le24(phy_fw_data + 0x8) << 12;
4339        offset = le24(phy_fw_data + offset + 0xa);
4340        return be16(phy_fw_data + offset + 0x27e);
4341
4342        #undef be16
4343        #undef le16
4344        #undef le24
4345}
4346
4347static struct info_10gbt_phy_fw {
4348        unsigned int phy_fw_id;         /* PCI Device ID */
4349        char *phy_fw_file;              /* /lib/firmware/ PHY Firmware file */
4350        int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4351        int phy_flash;                  /* Has FLASH for PHY Firmware */
4352} phy_info_array[] = {
4353        {
4354                PHY_AQ1202_DEVICEID,
4355                PHY_AQ1202_FIRMWARE,
4356                phy_aq1202_version,
4357                1,
4358        },
4359        {
4360                PHY_BCM84834_DEVICEID,
4361                PHY_BCM84834_FIRMWARE,
4362                NULL,
4363                0,
4364        },
4365        { 0, NULL, NULL },
4366};
4367
4368static struct info_10gbt_phy_fw *find_phy_info(int devid)
4369{
4370        int i;
4371
4372        for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4373                if (phy_info_array[i].phy_fw_id == devid)
4374                        return &phy_info_array[i];
4375        }
4376        return NULL;
4377}
4378
4379/* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
4380 * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
4381 * we return a negative error number.  If we transfer new firmware we return 1
4382 * (from t4_load_phy_fw()).  If we don't do anything we return 0.
4383 */
4384static int adap_init0_phy(struct adapter *adap)
4385{
4386        const struct firmware *phyf;
4387        int ret;
4388        struct info_10gbt_phy_fw *phy_info;
4389
4390        /* Use the device ID to determine which PHY file to flash.
4391         */
4392        phy_info = find_phy_info(adap->pdev->device);
4393        if (!phy_info) {
4394                dev_warn(adap->pdev_dev,
4395                         "No PHY Firmware file found for this PHY\n");
4396                return -EOPNOTSUPP;
4397        }
4398
4399        /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
4400         * use that. The adapter firmware provides us with a memory buffer
4401         * where we can load a PHY firmware file from the host if we want to
4402         * override the PHY firmware File in flash.
4403         */
4404        ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4405                                      adap->pdev_dev);
4406        if (ret < 0) {
4407                /* For adapters without FLASH attached to PHY for their
4408                 * firmware, it's obviously a fatal error if we can't get the
4409                 * firmware to the adapter.  For adapters with PHY firmware
4410                 * FLASH storage, it's worth a warning if we can't find the
4411                 * PHY Firmware but we'll neuter the error ...
4412                 */
4413                dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4414                        "/lib/firmware/%s, error %d\n",
4415                        phy_info->phy_fw_file, -ret);
4416                if (phy_info->phy_flash) {
4417                        int cur_phy_fw_ver = 0;
4418
4419                        t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4420                        dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4421                                 "FLASH copy, version %#x\n", cur_phy_fw_ver);
4422                        ret = 0;
4423                }
4424
4425                return ret;
4426        }
4427
4428        /* Load PHY Firmware onto adapter.
4429         */
4430        ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4431                             (u8 *)phyf->data, phyf->size);
4432        if (ret < 0)
4433                dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4434                        -ret);
4435        else if (ret > 0) {
4436                int new_phy_fw_ver = 0;
4437
4438                if (phy_info->phy_fw_version)
4439                        new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4440                                                                  phyf->size);
4441                dev_info(adap->pdev_dev, "Successfully transferred PHY "
4442                         "Firmware /lib/firmware/%s, version %#x\n",
4443                         phy_info->phy_fw_file, new_phy_fw_ver);
4444        }
4445
4446        release_firmware(phyf);
4447
4448        return ret;
4449}
4450
4451/*
4452 * Attempt to initialize the adapter via a Firmware Configuration File.
4453 */
4454static int adap_init0_config(struct adapter *adapter, int reset)
4455{
4456        char *fw_config_file, fw_config_file_path[256];
4457        u32 finiver, finicsum, cfcsum, param, val;
4458        struct fw_caps_config_cmd caps_cmd;
4459        unsigned long mtype = 0, maddr = 0;
4460        const struct firmware *cf;
4461        char *config_name = NULL;
4462        int config_issued = 0;
4463        int ret;
4464
4465        /*
4466         * Reset device if necessary.
4467         */
4468        if (reset) {
4469                ret = t4_fw_reset(adapter, adapter->mbox,
4470                                  PIORSTMODE_F | PIORST_F);
4471                if (ret < 0)
4472                        goto bye;
4473        }
4474
4475        /* If this is a 10Gb/s-BT adapter make sure the chip-external
4476         * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
4477         * to be performed after any global adapter RESET above since some
4478         * PHYs only have local RAM copies of the PHY firmware.
4479         */
4480        if (is_10gbt_device(adapter->pdev->device)) {
4481                ret = adap_init0_phy(adapter);
4482                if (ret < 0)
4483                        goto bye;
4484        }
4485        /*
4486         * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4487         * then use that.  Otherwise, use the configuration file stored
4488         * in the adapter flash ...
4489         */
4490        switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4491        case CHELSIO_T4:
4492                fw_config_file = FW4_CFNAME;
4493                break;
4494        case CHELSIO_T5:
4495                fw_config_file = FW5_CFNAME;
4496                break;
4497        case CHELSIO_T6:
4498                fw_config_file = FW6_CFNAME;
4499                break;
4500        default:
4501                dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4502                       adapter->pdev->device);
4503                ret = -EINVAL;
4504                goto bye;
4505        }
4506
4507        ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4508        if (ret < 0) {
4509                config_name = "On FLASH";
4510                mtype = FW_MEMTYPE_CF_FLASH;
4511                maddr = t4_flash_cfg_addr(adapter);
4512        } else {
4513                u32 params[7], val[7];
4514
4515                sprintf(fw_config_file_path,
4516                        "/lib/firmware/%s", fw_config_file);
4517                config_name = fw_config_file_path;
4518
4519                if (cf->size >= FLASH_CFG_MAX_SIZE)
4520                        ret = -ENOMEM;
4521                else {
4522                        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4523                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4524                        ret = t4_query_params(adapter, adapter->mbox,
4525                                              adapter->pf, 0, 1, params, val);
4526                        if (ret == 0) {
4527                                /*
4528                                 * For t4_memory_rw() below addresses and
4529                                 * sizes have to be in terms of multiples of 4
4530                                 * bytes.  So, if the Configuration File isn't
4531                                 * a multiple of 4 bytes in length we'll have
4532                                 * to write that out separately since we can't
4533                                 * guarantee that the bytes following the
4534                                 * residual byte in the buffer returned by
4535                                 * request_firmware() are zeroed out ...
4536                                 */
4537                                size_t resid = cf->size & 0x3;
4538                                size_t size = cf->size & ~0x3;
4539                                __be32 *data = (__be32 *)cf->data;
4540
4541                                mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4542                                maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4543
4544                                spin_lock(&adapter->win0_lock);
4545                                ret = t4_memory_rw(adapter, 0, mtype, maddr,
4546                                                   size, data, T4_MEMORY_WRITE);
4547                                if (ret == 0 && resid != 0) {
4548                                        union {
4549                                                __be32 word;
4550                                                char buf[4];
4551                                        } last;
4552                                        int i;
4553
4554                                        last.word = data[size >> 2];
4555                                        for (i = resid; i < 4; i++)
4556                                                last.buf[i] = 0;
4557                                        ret = t4_memory_rw(adapter, 0, mtype,
4558                                                           maddr + size,
4559                                                           4, &last.word,
4560                                                           T4_MEMORY_WRITE);
4561                                }
4562                                spin_unlock(&adapter->win0_lock);
4563                        }
4564                }
4565
4566                release_firmware(cf);
4567                if (ret)
4568                        goto bye;
4569        }
4570
4571        val = 0;
4572
4573        /* Ofld + Hash filter is supported. Older fw will fail this request and
4574         * it is fine.
4575         */
4576        param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4577                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4578        ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4579                            1, &param, &val);
4580
4581        /* FW doesn't know about Hash filter + ofld support,
4582         * it's not a problem, don't return an error.
4583         */
4584        if (ret < 0) {
4585                dev_warn(adapter->pdev_dev,
4586                         "Hash filter with ofld is not supported by FW\n");
4587        }
4588
4589        /*
4590         * Issue a Capability Configuration command to the firmware to get it
4591         * to parse the Configuration File.  We don't use t4_fw_config_file()
4592         * because we want the ability to modify various features after we've
4593         * processed the configuration file ...
4594         */
4595        memset(&caps_cmd, 0, sizeof(caps_cmd));
4596        caps_cmd.op_to_write =
4597                htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4598                      FW_CMD_REQUEST_F |
4599                      FW_CMD_READ_F);
4600        caps_cmd.cfvalid_to_len16 =
4601                htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4602                      FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4603                      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4604                      FW_LEN16(caps_cmd));
4605        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4606                         &caps_cmd);
4607
4608        /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4609         * Configuration File in FLASH), our last gasp effort is to use the
4610         * Firmware Configuration File which is embedded in the firmware.  A
4611         * very few early versions of the firmware didn't have one embedded
4612         * but we can ignore those.
4613         */
4614        if (ret == -ENOENT) {
4615                memset(&caps_cmd, 0, sizeof(caps_cmd));
4616                caps_cmd.op_to_write =
4617                        htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4618                                        FW_CMD_REQUEST_F |
4619                                        FW_CMD_READ_F);
4620                caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4621                ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4622                                sizeof(caps_cmd), &caps_cmd);
4623                config_name = "Firmware Default";
4624        }
4625
4626        config_issued = 1;
4627        if (ret < 0)
4628                goto bye;
4629
4630        finiver = ntohl(caps_cmd.finiver);
4631        finicsum = ntohl(caps_cmd.finicsum);
4632        cfcsum = ntohl(caps_cmd.cfcsum);
4633        if (finicsum != cfcsum)
4634                dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4635                         "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4636                         finicsum, cfcsum);
4637
4638        /*
4639         * And now tell the firmware to use the configuration we just loaded.
4640         */
4641        caps_cmd.op_to_write =
4642                htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4643                      FW_CMD_REQUEST_F |
4644                      FW_CMD_WRITE_F);
4645        caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4646        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4647                         NULL);
4648        if (ret < 0)
4649                goto bye;
4650
4651        /*
4652         * Tweak configuration based on system architecture, module
4653         * parameters, etc.
4654         */
4655        ret = adap_init0_tweaks(adapter);
4656        if (ret < 0)
4657                goto bye;
4658
4659        /* We will proceed even if HMA init fails. */
4660        ret = adap_config_hma(adapter);
4661        if (ret)
4662                dev_err(adapter->pdev_dev,
4663                        "HMA configuration failed with error %d\n", ret);
4664
4665        if (is_t6(adapter->params.chip)) {
4666                adap_config_hpfilter(adapter);
4667                ret = setup_ppod_edram(adapter);
4668                if (!ret)
4669                        dev_info(adapter->pdev_dev, "Successfully enabled "
4670                                 "ppod edram feature\n");
4671        }
4672
4673        /*
4674         * And finally tell the firmware to initialize itself using the
4675         * parameters from the Configuration File.
4676         */
4677        ret = t4_fw_initialize(adapter, adapter->mbox);
4678        if (ret < 0)
4679                goto bye;
4680
4681        /* Emit Firmware Configuration File information and return
4682         * successfully.
4683         */
4684        dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4685                 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4686                 config_name, finiver, cfcsum);
4687        return 0;
4688
4689        /*
4690         * Something bad happened.  Return the error ...  (If the "error"
4691         * is that there's no Configuration File on the adapter we don't
4692         * want to issue a warning since this is fairly common.)
4693         */
4694bye:
4695        if (config_issued && ret != -ENOENT)
4696                dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4697                         config_name, -ret);
4698        return ret;
4699}
4700
4701static struct fw_info fw_info_array[] = {
4702        {
4703                .chip = CHELSIO_T4,
4704                .fs_name = FW4_CFNAME,
4705                .fw_mod_name = FW4_FNAME,
4706                .fw_hdr = {
4707                        .chip = FW_HDR_CHIP_T4,
4708                        .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4709                        .intfver_nic = FW_INTFVER(T4, NIC),
4710                        .intfver_vnic = FW_INTFVER(T4, VNIC),
4711                        .intfver_ri = FW_INTFVER(T4, RI),
4712                        .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4713                        .intfver_fcoe = FW_INTFVER(T4, FCOE),
4714                },
4715        }, {
4716                .chip = CHELSIO_T5,
4717                .fs_name = FW5_CFNAME,
4718                .fw_mod_name = FW5_FNAME,
4719                .fw_hdr = {
4720                        .chip = FW_HDR_CHIP_T5,
4721                        .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4722                        .intfver_nic = FW_INTFVER(T5, NIC),
4723                        .intfver_vnic = FW_INTFVER(T5, VNIC),
4724                        .intfver_ri = FW_INTFVER(T5, RI),
4725                        .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4726                        .intfver_fcoe = FW_INTFVER(T5, FCOE),
4727                },
4728        }, {
4729                .chip = CHELSIO_T6,
4730                .fs_name = FW6_CFNAME,
4731                .fw_mod_name = FW6_FNAME,
4732                .fw_hdr = {
4733                        .chip = FW_HDR_CHIP_T6,
4734                        .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4735                        .intfver_nic = FW_INTFVER(T6, NIC),
4736                        .intfver_vnic = FW_INTFVER(T6, VNIC),
4737                        .intfver_ofld = FW_INTFVER(T6, OFLD),
4738                        .intfver_ri = FW_INTFVER(T6, RI),
4739                        .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4740                        .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4741                        .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4742                        .intfver_fcoe = FW_INTFVER(T6, FCOE),
4743                },
4744        }
4745
4746};
4747
4748static struct fw_info *find_fw_info(int chip)
4749{
4750        int i;
4751
4752        for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4753                if (fw_info_array[i].chip == chip)
4754                        return &fw_info_array[i];
4755        }
4756        return NULL;
4757}
4758
4759/*
4760 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4761 */
4762static int adap_init0(struct adapter *adap, int vpd_skip)
4763{
4764        struct fw_caps_config_cmd caps_cmd;
4765        u32 params[7], val[7];
4766        enum dev_state state;
4767        u32 v, port_vec;
4768        int reset = 1;
4769        int ret;
4770
4771        /* Grab Firmware Device Log parameters as early as possible so we have
4772         * access to it for debugging, etc.
4773         */
4774        ret = t4_init_devlog_params(adap);
4775        if (ret < 0)
4776                return ret;
4777
4778        /* Contact FW, advertising Master capability */
4779        ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4780                          is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4781        if (ret < 0) {
4782                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4783                        ret);
4784                return ret;
4785        }
4786        if (ret == adap->mbox)
4787                adap->flags |= CXGB4_MASTER_PF;
4788
4789        /*
4790         * If we're the Master PF Driver and the device is uninitialized,
4791         * then let's consider upgrading the firmware ...  (We always want
4792         * to check the firmware version number in order to A. get it for
4793         * later reporting and B. to warn if the currently loaded firmware
4794         * is excessively mismatched relative to the driver.)
4795         */
4796
4797        t4_get_version_info(adap);
4798        ret = t4_check_fw_version(adap);
4799        /* If firmware is too old (not supported by driver) force an update. */
4800        if (ret)
4801                state = DEV_STATE_UNINIT;
4802        if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4803                struct fw_info *fw_info;
4804                struct fw_hdr *card_fw;
4805                const struct firmware *fw;
4806                const u8 *fw_data = NULL;
4807                unsigned int fw_size = 0;
4808
4809                /* This is the firmware whose headers the driver was compiled
4810                 * against
4811                 */
4812                fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4813                if (fw_info == NULL) {
4814                        dev_err(adap->pdev_dev,
4815                                "unable to get firmware info for chip %d.\n",
4816                                CHELSIO_CHIP_VERSION(adap->params.chip));
4817                        return -EINVAL;
4818                }
4819
4820                /* allocate memory to read the header of the firmware on the
4821                 * card
4822                 */
4823                card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4824                if (!card_fw) {
4825                        ret = -ENOMEM;
4826                        goto bye;
4827                }
4828
4829                /* Get FW from from /lib/firmware/ */
4830                ret = request_firmware(&fw, fw_info->fw_mod_name,
4831                                       adap->pdev_dev);
4832                if (ret < 0) {
4833                        dev_err(adap->pdev_dev,
4834                                "unable to load firmware image %s, error %d\n",
4835                                fw_info->fw_mod_name, ret);
4836                } else {
4837                        fw_data = fw->data;
4838                        fw_size = fw->size;
4839                }
4840
4841                /* upgrade FW logic */
4842                ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4843                                 state, &reset);
4844
4845                /* Cleaning up */
4846                release_firmware(fw);
4847                kvfree(card_fw);
4848
4849                if (ret < 0)
4850                        goto bye;
4851        }
4852
4853        /* If the firmware is initialized already, emit a simply note to that
4854         * effect. Otherwise, it's time to try initializing the adapter.
4855         */
4856        if (state == DEV_STATE_INIT) {
4857                ret = adap_config_hma(adap);
4858                if (ret)
4859                        dev_err(adap->pdev_dev,
4860                                "HMA configuration failed with error %d\n",
4861                                ret);
4862                dev_info(adap->pdev_dev, "Coming up as %s: "\
4863                         "Adapter already initialized\n",
4864                         adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4865        } else {
4866                dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4867                         "Initializing adapter\n");
4868
4869                /* Find out whether we're dealing with a version of the
4870                 * firmware which has configuration file support.
4871                 */
4872                params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4873                             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4874                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4875                                      params, val);
4876
4877                /* If the firmware doesn't support Configuration Files,
4878                 * return an error.
4879                 */
4880                if (ret < 0) {
4881                        dev_err(adap->pdev_dev, "firmware doesn't support "
4882                                "Firmware Configuration Files\n");
4883                        goto bye;
4884                }
4885
4886                /* The firmware provides us with a memory buffer where we can
4887                 * load a Configuration File from the host if we want to
4888                 * override the Configuration File in flash.
4889                 */
4890                ret = adap_init0_config(adap, reset);
4891                if (ret == -ENOENT) {
4892                        dev_err(adap->pdev_dev, "no Configuration File "
4893                                "present on adapter.\n");
4894                        goto bye;
4895                }
4896                if (ret < 0) {
4897                        dev_err(adap->pdev_dev, "could not initialize "
4898                                "adapter, error %d\n", -ret);
4899                        goto bye;
4900                }
4901        }
4902
4903        /* Now that we've successfully configured and initialized the adapter
4904         * (or found it already initialized), we can ask the Firmware what
4905         * resources it has provisioned for us.
4906         */
4907        ret = t4_get_pfres(adap);
4908        if (ret) {
4909                dev_err(adap->pdev_dev,
4910                        "Unable to retrieve resource provisioning information\n");
4911                goto bye;
4912        }
4913
4914        /* Grab VPD parameters.  This should be done after we establish a
4915         * connection to the firmware since some of the VPD parameters
4916         * (notably the Core Clock frequency) are retrieved via requests to
4917         * the firmware.  On the other hand, we need these fairly early on
4918         * so we do this right after getting ahold of the firmware.
4919         *
4920         * We need to do this after initializing the adapter because someone
4921         * could have FLASHed a new VPD which won't be read by the firmware
4922         * until we do the RESET ...
4923         */
4924        if (!vpd_skip) {
4925                ret = t4_get_vpd_params(adap, &adap->params.vpd);
4926                if (ret < 0)
4927                        goto bye;
4928        }
4929
4930        /* Find out what ports are available to us.  Note that we need to do
4931         * this before calling adap_init0_no_config() since it needs nports
4932         * and portvec ...
4933         */
4934        v =
4935            FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4936            FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4937        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4938        if (ret < 0)
4939                goto bye;
4940
4941        adap->params.nports = hweight32(port_vec);
4942        adap->params.portvec = port_vec;
4943
4944        /* Give the SGE code a chance to pull in anything that it needs ...
4945         * Note that this must be called after we retrieve our VPD parameters
4946         * in order to know how to convert core ticks to seconds, etc.
4947         */
4948        ret = t4_sge_init(adap);
4949        if (ret < 0)
4950                goto bye;
4951
4952        /* Grab the SGE Doorbell Queue Timer values.  If successful, that
4953         * indicates that the Firmware and Hardware support this.
4954         */
4955        params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4956                    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4957        ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4958                              1, params, val);
4959
4960        if (!ret) {
4961                adap->sge.dbqtimer_tick = val[0];
4962                ret = t4_read_sge_dbqtimers(adap,
4963                                            ARRAY_SIZE(adap->sge.dbqtimer_val),
4964                                            adap->sge.dbqtimer_val);
4965        }
4966
4967        if (!ret)
4968                adap->flags |= CXGB4_SGE_DBQ_TIMER;
4969
4970        if (is_bypass_device(adap->pdev->device))
4971                adap->params.bypass = 1;
4972
4973        /*
4974         * Grab some of our basic fundamental operating parameters.
4975         */
4976        params[0] = FW_PARAM_PFVF(EQ_START);
4977        params[1] = FW_PARAM_PFVF(L2T_START);
4978        params[2] = FW_PARAM_PFVF(L2T_END);
4979        params[3] = FW_PARAM_PFVF(FILTER_START);
4980        params[4] = FW_PARAM_PFVF(FILTER_END);
4981        params[5] = FW_PARAM_PFVF(IQFLINT_START);
4982        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4983        if (ret < 0)
4984                goto bye;
4985        adap->sge.egr_start = val[0];
4986        adap->l2t_start = val[1];
4987        adap->l2t_end = val[2];
4988        adap->tids.ftid_base = val[3];
4989        adap->tids.nftids = val[4] - val[3] + 1;
4990        adap->sge.ingr_start = val[5];
4991
4992        if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4993                params[0] = FW_PARAM_PFVF(HPFILTER_START);
4994                params[1] = FW_PARAM_PFVF(HPFILTER_END);
4995                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4996                                      params, val);
4997                if (ret < 0)
4998                        goto bye;
4999
5000                adap->tids.hpftid_base = val[0];
5001                adap->tids.nhpftids = val[1] - val[0] + 1;
5002
5003                /* Read the raw mps entries. In T6, the last 2 tcam entries
5004                 * are reserved for raw mac addresses (rawf = 2, one per port).
5005                 */
5006                params[0] = FW_PARAM_PFVF(RAWF_START);
5007                params[1] = FW_PARAM_PFVF(RAWF_END);
5008                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5009                                      params, val);
5010                if (ret == 0) {
5011                        adap->rawf_start = val[0];
5012                        adap->rawf_cnt = val[1] - val[0] + 1;
5013                }
5014
5015                adap->tids.tid_base =
5016                        t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
5017        }
5018
5019        /* qids (ingress/egress) returned from firmware can be anywhere
5020         * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
5021         * Hence driver needs to allocate memory for this range to
5022         * store the queue info. Get the highest IQFLINT/EQ index returned
5023         * in FW_EQ_*_CMD.alloc command.
5024         */
5025        params[0] = FW_PARAM_PFVF(EQ_END);
5026        params[1] = FW_PARAM_PFVF(IQFLINT_END);
5027        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5028        if (ret < 0)
5029                goto bye;
5030        adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5031        adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5032
5033        adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5034                                    sizeof(*adap->sge.egr_map), GFP_KERNEL);
5035        if (!adap->sge.egr_map) {
5036                ret = -ENOMEM;
5037                goto bye;
5038        }
5039
5040        adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5041                                     sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5042        if (!adap->sge.ingr_map) {
5043                ret = -ENOMEM;
5044                goto bye;
5045        }
5046
5047        /* Allocate the memory for the vaious egress queue bitmaps
5048         * ie starving_fl, txq_maperr and blocked_fl.
5049         */
5050        adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5051                                        sizeof(long), GFP_KERNEL);
5052        if (!adap->sge.starving_fl) {
5053                ret = -ENOMEM;
5054                goto bye;
5055        }
5056
5057        adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5058                                       sizeof(long), GFP_KERNEL);
5059        if (!adap->sge.txq_maperr) {
5060                ret = -ENOMEM;
5061                goto bye;
5062        }
5063
5064#ifdef CONFIG_DEBUG_FS
5065        adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5066                                       sizeof(long), GFP_KERNEL);
5067        if (!adap->sge.blocked_fl) {
5068                ret = -ENOMEM;
5069                goto bye;
5070        }
5071        bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz);
5072#endif
5073
5074        params[0] = FW_PARAM_PFVF(CLIP_START);
5075        params[1] = FW_PARAM_PFVF(CLIP_END);
5076        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5077        if (ret < 0)
5078                goto bye;
5079        adap->clipt_start = val[0];
5080        adap->clipt_end = val[1];
5081
5082        /* Get the supported number of traffic classes */
5083        params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5084        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5085        if (ret < 0) {
5086                /* We couldn't retrieve the number of Traffic Classes
5087                 * supported by the hardware/firmware. So we hard
5088                 * code it here.
5089                 */
5090                adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5091        } else {
5092                adap->params.nsched_cls = val[0];
5093        }
5094
5095        /* query params related to active filter region */
5096        params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5097        params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5098        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5099        /* If Active filter size is set we enable establishing
5100         * offload connection through firmware work request
5101         */
5102        if ((val[0] != val[1]) && (ret >= 0)) {
5103                adap->flags |= CXGB4_FW_OFLD_CONN;
5104                adap->tids.aftid_base = val[0];
5105                adap->tids.aftid_end = val[1];
5106        }
5107
5108        /* If we're running on newer firmware, let it know that we're
5109         * prepared to deal with encapsulated CPL messages.  Older
5110         * firmware won't understand this and we'll just get
5111         * unencapsulated messages ...
5112         */
5113        params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5114        val[0] = 1;
5115        (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5116
5117        /*
5118         * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5119         * capability.  Earlier versions of the firmware didn't have the
5120         * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5121         * permission to use ULPTX MEMWRITE DSGL.
5122         */
5123        if (is_t4(adap->params.chip)) {
5124                adap->params.ulptx_memwrite_dsgl = false;
5125        } else {
5126                params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5127                ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5128                                      1, params, val);
5129                adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5130        }
5131
5132        /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
5133        params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5134        ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5135                              1, params, val);
5136        adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5137
5138        /* See if FW supports FW_FILTER2 work request */
5139        if (is_t4(adap->params.chip)) {
5140                adap->params.filter2_wr_support = false;
5141        } else {
5142                params[0] = FW_PARAM_DEV(FILTER2_WR);
5143                ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5144                                      1, params, val);
5145                adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5146        }
5147
5148        /* Check if FW supports returning vin and smt index.
5149         * If this is not supported, driver will interpret
5150         * these values from viid.
5151         */
5152        params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5153        ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5154                              1, params, val);
5155        adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5156
5157        /*
5158         * Get device capabilities so we can determine what resources we need
5159         * to manage.
5160         */
5161        memset(&caps_cmd, 0, sizeof(caps_cmd));
5162        caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5163                                     FW_CMD_REQUEST_F | FW_CMD_READ_F);
5164        caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5165        ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5166                         &caps_cmd);
5167        if (ret < 0)
5168                goto bye;
5169
5170        /* hash filter has some mandatory register settings to be tested and for
5171         * that it needs to test whether offload is enabled or not, hence
5172         * checking and setting it here.
5173         */
5174        if (caps_cmd.ofldcaps)
5175                adap->params.offload = 1;
5176
5177        if (caps_cmd.ofldcaps ||
5178            (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5179            (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
5180                /* query offload-related parameters */
5181                params[0] = FW_PARAM_DEV(NTID);
5182                params[1] = FW_PARAM_PFVF(SERVER_START);
5183                params[2] = FW_PARAM_PFVF(SERVER_END);
5184                params[3] = FW_PARAM_PFVF(TDDP_START);
5185                params[4] = FW_PARAM_PFVF(TDDP_END);
5186                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5187                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5188                                      params, val);
5189                if (ret < 0)
5190                        goto bye;
5191                adap->tids.ntids = val[0];
5192                adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5193                adap->tids.stid_base = val[1];
5194                adap->tids.nstids = val[2] - val[1] + 1;
5195                /*
5196                 * Setup server filter region. Divide the available filter
5197                 * region into two parts. Regular filters get 1/3rd and server
5198                 * filters get 2/3rd part. This is only enabled if workarond
5199                 * path is enabled.
5200                 * 1. For regular filters.
5201                 * 2. Server filter: This are special filters which are used
5202                 * to redirect SYN packets to offload queue.
5203                 */
5204                if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5205                        adap->tids.sftid_base = adap->tids.ftid_base +
5206                                        DIV_ROUND_UP(adap->tids.nftids, 3);
5207                        adap->tids.nsftids = adap->tids.nftids -
5208                                         DIV_ROUND_UP(adap->tids.nftids, 3);
5209                        adap->tids.nftids = adap->tids.sftid_base -
5210                                                adap->tids.ftid_base;
5211                }
5212                adap->vres.ddp.start = val[3];
5213                adap->vres.ddp.size = val[4] - val[3] + 1;
5214                adap->params.ofldq_wr_cred = val[5];
5215
5216                if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5217                        init_hash_filter(adap);
5218                } else {
5219                        adap->num_ofld_uld += 1;
5220                }
5221
5222                if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5223                        params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5224                        params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5225                        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5226                                              params, val);
5227                        if (!ret) {
5228                                adap->tids.eotid_base = val[0];
5229                                adap->tids.neotids = min_t(u32, MAX_ATIDS,
5230                                                           val[1] - val[0] + 1);
5231                                adap->params.ethofld = 1;
5232                        }
5233                }
5234        }
5235        if (caps_cmd.rdmacaps) {
5236                params[0] = FW_PARAM_PFVF(STAG_START);
5237                params[1] = FW_PARAM_PFVF(STAG_END);
5238                params[2] = FW_PARAM_PFVF(RQ_START);
5239                params[3] = FW_PARAM_PFVF(RQ_END);
5240                params[4] = FW_PARAM_PFVF(PBL_START);
5241                params[5] = FW_PARAM_PFVF(PBL_END);
5242                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5243                                      params, val);
5244                if (ret < 0)
5245                        goto bye;
5246                adap->vres.stag.start = val[0];
5247                adap->vres.stag.size = val[1] - val[0] + 1;
5248                adap->vres.rq.start = val[2];
5249                adap->vres.rq.size = val[3] - val[2] + 1;
5250                adap->vres.pbl.start = val[4];
5251                adap->vres.pbl.size = val[5] - val[4] + 1;
5252
5253                params[0] = FW_PARAM_PFVF(SRQ_START);
5254                params[1] = FW_PARAM_PFVF(SRQ_END);
5255                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5256                                      params, val);
5257                if (!ret) {
5258                        adap->vres.srq.start = val[0];
5259                        adap->vres.srq.size = val[1] - val[0] + 1;
5260                }
5261                if (adap->vres.srq.size) {
5262                        adap->srq = t4_init_srq(adap->vres.srq.size);
5263                        if (!adap->srq)
5264                                dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5265                }
5266
5267                params[0] = FW_PARAM_PFVF(SQRQ_START);
5268                params[1] = FW_PARAM_PFVF(SQRQ_END);
5269                params[2] = FW_PARAM_PFVF(CQ_START);
5270                params[3] = FW_PARAM_PFVF(CQ_END);
5271                params[4] = FW_PARAM_PFVF(OCQ_START);
5272                params[5] = FW_PARAM_PFVF(OCQ_END);
5273                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5274                                      val);
5275                if (ret < 0)
5276                        goto bye;
5277                adap->vres.qp.start = val[0];
5278                adap->vres.qp.size = val[1] - val[0] + 1;
5279                adap->vres.cq.start = val[2];
5280                adap->vres.cq.size = val[3] - val[2] + 1;
5281                adap->vres.ocq.start = val[4];
5282                adap->vres.ocq.size = val[5] - val[4] + 1;
5283
5284                params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5285                params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5286                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5287                                      val);
5288                if (ret < 0) {
5289                        adap->params.max_ordird_qp = 8;
5290                        adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5291                        ret = 0;
5292                } else {
5293                        adap->params.max_ordird_qp = val[0];
5294                        adap->params.max_ird_adapter = val[1];
5295                }
5296                dev_info(adap->pdev_dev,
5297                         "max_ordird_qp %d max_ird_adapter %d\n",
5298                         adap->params.max_ordird_qp,
5299                         adap->params.max_ird_adapter);
5300
5301                /* Enable write_with_immediate if FW supports it */
5302                params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5303                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5304                                      val);
5305                adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5306
5307                /* Enable write_cmpl if FW supports it */
5308                params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5309                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5310                                      val);
5311                adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5312                adap->num_ofld_uld += 2;
5313        }
5314        if (caps_cmd.iscsicaps) {
5315                params[0] = FW_PARAM_PFVF(ISCSI_START);
5316                params[1] = FW_PARAM_PFVF(ISCSI_END);
5317                ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5318                                      params, val);
5319                if (ret < 0)
5320                        goto bye;
5321                adap->vres.iscsi.start = val[0];
5322                adap->vres.iscsi.size = val[1] - val[0] + 1;
5323                if (is_t6(adap->params.chip)) {
5324                        params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5325                        params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5326                        ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5327                                              params, val);
5328                        if (!ret) {
5329                                adap->vres.ppod_edram.start = val[0];
5330                                adap->vres.ppod_edram.size =
5331                                        val[1] - val[0] + 1;
5332
5333                                dev_info(adap->pdev_dev,
5334                                         "ppod edram start 0x%x end 0x%x size 0x%x\n",
5335                                         val[0], val[1],
5336                                         adap->vres.ppod_edram.size);
5337                        }
5338                }
5339                /* LIO target and cxgb4i initiaitor */
5340                adap->num_ofld_uld += 2;
5341        }
5342        if (caps_cmd.cryptocaps) {
5343                if (ntohs(caps_cmd.cryptocaps) &
5344                    FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5345                        params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5346                        ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5347                                              2, params, val);
5348                        if (ret < 0) {
5349                                if (ret != -EINVAL)
5350                                        goto bye;
5351                        } else {
5352                                adap->vres.ncrypto_fc = val[0];
5353                        }
5354                        adap->num_ofld_uld += 1;
5355                }
5356                if (ntohs(caps_cmd.cryptocaps) &
5357                    FW_CAPS_CONFIG_TLS_INLINE) {
5358                        params[0] = FW_PARAM_PFVF(TLS_START);
5359                        params[1] = FW_PARAM_PFVF(TLS_END);
5360                        ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5361                                              2, params, val);
5362                        if (ret < 0)
5363                                goto bye;
5364                        adap->vres.key.start = val[0];
5365                        adap->vres.key.size = val[1] - val[0] + 1;
5366                        adap->num_uld += 1;
5367                }
5368                adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5369        }
5370
5371        /* The MTU/MSS Table is initialized by now, so load their values.  If
5372         * we're initializing the adapter, then we'll make any modifications
5373         * we want to the MTU/MSS Table and also initialize the congestion
5374         * parameters.
5375         */
5376        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5377        if (state != DEV_STATE_INIT) {
5378                int i;
5379
5380                /* The default MTU Table contains values 1492 and 1500.
5381                 * However, for TCP, it's better to have two values which are
5382                 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5383                 * This allows us to have a TCP Data Payload which is a
5384                 * multiple of 8 regardless of what combination of TCP Options
5385                 * are in use (always a multiple of 4 bytes) which is
5386                 * important for performance reasons.  For instance, if no
5387                 * options are in use, then we have a 20-byte IP header and a
5388                 * 20-byte TCP header.  In this case, a 1500-byte MSS would
5389                 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5390                 * which is not a multiple of 8.  So using an MSS of 1488 in
5391                 * this case results in a TCP Data Payload of 1448 bytes which
5392                 * is a multiple of 8.  On the other hand, if 12-byte TCP Time
5393                 * Stamps have been negotiated, then an MTU of 1500 bytes
5394                 * results in a TCP Data Payload of 1448 bytes which, as
5395                 * above, is a multiple of 8 bytes ...
5396                 */
5397                for (i = 0; i < NMTUS; i++)
5398                        if (adap->params.mtus[i] == 1492) {
5399                                adap->params.mtus[i] = 1488;
5400                                break;
5401                        }
5402
5403                t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5404                             adap->params.b_wnd);
5405        }
5406        t4_init_sge_params(adap);
5407        adap->flags |= CXGB4_FW_OK;
5408        t4_init_tp_params(adap, true);
5409        return 0;
5410
5411        /*
5412         * Something bad happened.  If a command timed out or failed with EIO
5413         * FW does not operate within its spec or something catastrophic
5414         * happened to HW/FW, stop issuing commands.
5415         */
5416bye:
5417        adap_free_hma_mem(adap);
5418        kfree(adap->sge.egr_map);
5419        kfree(adap->sge.ingr_map);
5420        kfree(adap->sge.starving_fl);
5421        kfree(adap->sge.txq_maperr);
5422#ifdef CONFIG_DEBUG_FS
5423        kfree(adap->sge.blocked_fl);
5424#endif
5425        if (ret != -ETIMEDOUT && ret != -EIO)
5426                t4_fw_bye(adap, adap->mbox);
5427        return ret;
5428}
5429
5430/* EEH callbacks */
5431
5432static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5433                                         pci_channel_state_t state)
5434{
5435        int i;
5436        struct adapter *adap = pci_get_drvdata(pdev);
5437
5438        if (!adap)
5439                goto out;
5440
5441        rtnl_lock();
5442        adap->flags &= ~CXGB4_FW_OK;
5443        notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5444        spin_lock(&adap->stats_lock);
5445        for_each_port(adap, i) {
5446                struct net_device *dev = adap->port[i];
5447                if (dev) {
5448                        netif_device_detach(dev);
5449                        netif_carrier_off(dev);
5450                }
5451        }
5452        spin_unlock(&adap->stats_lock);
5453        disable_interrupts(adap);
5454        if (adap->flags & CXGB4_FULL_INIT_DONE)
5455                cxgb_down(adap);
5456        rtnl_unlock();
5457        if ((adap->flags & CXGB4_DEV_ENABLED)) {
5458                pci_disable_device(pdev);
5459                adap->flags &= ~CXGB4_DEV_ENABLED;
5460        }
5461out:    return state == pci_channel_io_perm_failure ?
5462                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5463}
5464
5465static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5466{
5467        int i, ret;
5468        struct fw_caps_config_cmd c;
5469        struct adapter *adap = pci_get_drvdata(pdev);
5470
5471        if (!adap) {
5472                pci_restore_state(pdev);
5473                pci_save_state(pdev);
5474                return PCI_ERS_RESULT_RECOVERED;
5475        }
5476
5477        if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5478                if (pci_enable_device(pdev)) {
5479                        dev_err(&pdev->dev, "Cannot reenable PCI "
5480                                            "device after reset\n");
5481                        return PCI_ERS_RESULT_DISCONNECT;
5482                }
5483                adap->flags |= CXGB4_DEV_ENABLED;
5484        }
5485
5486        pci_set_master(pdev);
5487        pci_restore_state(pdev);
5488        pci_save_state(pdev);
5489
5490        if (t4_wait_dev_ready(adap->regs) < 0)
5491                return PCI_ERS_RESULT_DISCONNECT;
5492        if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5493                return PCI_ERS_RESULT_DISCONNECT;
5494        adap->flags |= CXGB4_FW_OK;
5495        if (adap_init1(adap, &c))
5496                return PCI_ERS_RESULT_DISCONNECT;
5497
5498        for_each_port(adap, i) {
5499                struct port_info *pi = adap2pinfo(adap, i);
5500                u8 vivld = 0, vin = 0;
5501
5502                ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5503                                  NULL, NULL, &vivld, &vin);
5504                if (ret < 0)
5505                        return PCI_ERS_RESULT_DISCONNECT;
5506                pi->viid = ret;
5507                pi->xact_addr_filt = -1;
5508                /* If fw supports returning the VIN as part of FW_VI_CMD,
5509                 * save the returned values.
5510                 */
5511                if (adap->params.viid_smt_extn_support) {
5512                        pi->vivld = vivld;
5513                        pi->vin = vin;
5514                } else {
5515                        /* Retrieve the values from VIID */
5516                        pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5517                        pi->vin = FW_VIID_VIN_G(pi->viid);
5518                }
5519        }
5520
5521        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5522                     adap->params.b_wnd);
5523        setup_memwin(adap);
5524        if (cxgb_up(adap))
5525                return PCI_ERS_RESULT_DISCONNECT;
5526        return PCI_ERS_RESULT_RECOVERED;
5527}
5528
5529static void eeh_resume(struct pci_dev *pdev)
5530{
5531        int i;
5532        struct adapter *adap = pci_get_drvdata(pdev);
5533
5534        if (!adap)
5535                return;
5536
5537        rtnl_lock();
5538        for_each_port(adap, i) {
5539                struct net_device *dev = adap->port[i];
5540                if (dev) {
5541                        if (netif_running(dev)) {
5542                                link_start(dev);
5543                                cxgb_set_rxmode(dev);
5544                        }
5545                        netif_device_attach(dev);
5546                }
5547        }
5548        rtnl_unlock();
5549}
5550
5551static void eeh_reset_prepare(struct pci_dev *pdev)
5552{
5553        struct adapter *adapter = pci_get_drvdata(pdev);
5554        int i;
5555
5556        if (adapter->pf != 4)
5557                return;
5558
5559        adapter->flags &= ~CXGB4_FW_OK;
5560
5561        notify_ulds(adapter, CXGB4_STATE_DOWN);
5562
5563        for_each_port(adapter, i)
5564                if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5565                        cxgb_close(adapter->port[i]);
5566
5567        disable_interrupts(adapter);
5568        cxgb4_free_mps_ref_entries(adapter);
5569
5570        adap_free_hma_mem(adapter);
5571
5572        if (adapter->flags & CXGB4_FULL_INIT_DONE)
5573                cxgb_down(adapter);
5574}
5575
5576static void eeh_reset_done(struct pci_dev *pdev)
5577{
5578        struct adapter *adapter = pci_get_drvdata(pdev);
5579        int err, i;
5580
5581        if (adapter->pf != 4)
5582                return;
5583
5584        err = t4_wait_dev_ready(adapter->regs);
5585        if (err < 0) {
5586                dev_err(adapter->pdev_dev,
5587                        "Device not ready, err %d", err);
5588                return;
5589        }
5590
5591        setup_memwin(adapter);
5592
5593        err = adap_init0(adapter, 1);
5594        if (err) {
5595                dev_err(adapter->pdev_dev,
5596                        "Adapter init failed, err %d", err);
5597                return;
5598        }
5599
5600        setup_memwin_rdma(adapter);
5601
5602        if (adapter->flags & CXGB4_FW_OK) {
5603                err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5604                if (err) {
5605                        dev_err(adapter->pdev_dev,
5606                                "Port init failed, err %d", err);
5607                        return;
5608                }
5609        }
5610
5611        err = cfg_queues(adapter);
5612        if (err) {
5613                dev_err(adapter->pdev_dev,
5614                        "Config queues failed, err %d", err);
5615                return;
5616        }
5617
5618        cxgb4_init_mps_ref_entries(adapter);
5619
5620        err = setup_fw_sge_queues(adapter);
5621        if (err) {
5622                dev_err(adapter->pdev_dev,
5623                        "FW sge queue allocation failed, err %d", err);
5624                return;
5625        }
5626
5627        for_each_port(adapter, i)
5628                if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5629                        cxgb_open(adapter->port[i]);
5630}
5631
5632static const struct pci_error_handlers cxgb4_eeh = {
5633        .error_detected = eeh_err_detected,
5634        .slot_reset     = eeh_slot_reset,
5635        .resume         = eeh_resume,
5636        .reset_prepare  = eeh_reset_prepare,
5637        .reset_done     = eeh_reset_done,
5638};
5639
5640/* Return true if the Link Configuration supports "High Speeds" (those greater
5641 * than 1Gb/s).
5642 */
5643static inline bool is_x_10g_port(const struct link_config *lc)
5644{
5645        unsigned int speeds, high_speeds;
5646
5647        speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5648        high_speeds = speeds &
5649                        ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5650
5651        return high_speeds != 0;
5652}
5653
5654/* Perform default configuration of DMA queues depending on the number and type
5655 * of ports we found and the number of available CPUs.  Most settings can be
5656 * modified by the admin prior to actual use.
5657 */
5658static int cfg_queues(struct adapter *adap)
5659{
5660        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5661        u32 ncpus = num_online_cpus();
5662        u32 niqflint, neq, num_ulds;
5663        struct sge *s = &adap->sge;
5664        u32 i, n10g = 0, qidx = 0;
5665        u32 q10g = 0, q1g;
5666
5667        /* Reduce memory usage in kdump environment, disable all offload. */
5668        if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5669                adap->params.offload = 0;
5670                adap->params.crypto = 0;
5671                adap->params.ethofld = 0;
5672        }
5673
5674        /* Calculate the number of Ethernet Queue Sets available based on
5675         * resources provisioned for us.  We always have an Asynchronous
5676         * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
5677         * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5678         * Ingress Queue.  Meanwhile, we need two Egress Queues for each
5679         * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5680         *
5681         * Note that we should also take into account all of the various
5682         * Offload Queues.  But, in any situation where we're operating in
5683         * a Resource Constrained Provisioning environment, doing any Offload
5684         * at all is problematic ...
5685         */
5686        niqflint = adap->params.pfres.niqflint - 1;
5687        if (!(adap->flags & CXGB4_USING_MSIX))
5688                niqflint--;
5689        neq = adap->params.pfres.neq / 2;
5690        avail_qsets = min(niqflint, neq);
5691
5692        if (avail_qsets < adap->params.nports) {
5693                dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5694                        avail_qsets, adap->params.nports);
5695                return -ENOMEM;
5696        }
5697
5698        /* Count the number of 10Gb/s or better ports */
5699        for_each_port(adap, i)
5700                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5701
5702        avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5703
5704        /* We default to 1 queue per non-10G port and up to # of cores queues
5705         * per 10G port.
5706         */
5707        if (n10g)
5708                q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5709
5710#ifdef CONFIG_CHELSIO_T4_DCB
5711        /* For Data Center Bridging support we need to be able to support up
5712         * to 8 Traffic Priorities; each of which will be assigned to its
5713         * own TX Queue in order to prevent Head-Of-Line Blocking.
5714         */
5715        q1g = 8;
5716        if (adap->params.nports * 8 > avail_eth_qsets) {
5717                dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5718                        avail_eth_qsets, adap->params.nports * 8);
5719                return -ENOMEM;
5720        }
5721
5722        if (adap->params.nports * ncpus < avail_eth_qsets)
5723                q10g = max(8U, ncpus);
5724        else
5725                q10g = max(8U, q10g);
5726
5727        while ((q10g * n10g) >
5728               (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5729                q10g--;
5730
5731#else /* !CONFIG_CHELSIO_T4_DCB */
5732        q1g = 1;
5733        q10g = min(q10g, ncpus);
5734#endif /* !CONFIG_CHELSIO_T4_DCB */
5735        if (is_kdump_kernel()) {
5736                q10g = 1;
5737                q1g = 1;
5738        }
5739
5740        for_each_port(adap, i) {
5741                struct port_info *pi = adap2pinfo(adap, i);
5742
5743                pi->first_qset = qidx;
5744                pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5745                qidx += pi->nqsets;
5746        }
5747
5748        s->ethqsets = qidx;
5749        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5750        avail_qsets -= qidx;
5751
5752        if (is_uld(adap)) {
5753                /* For offload we use 1 queue/channel if all ports are up to 1G,
5754                 * otherwise we divide all available queues amongst the channels
5755                 * capped by the number of available cores.
5756                 */
5757                num_ulds = adap->num_uld + adap->num_ofld_uld;
5758                i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5759                avail_uld_qsets = roundup(i, adap->params.nports);
5760                if (avail_qsets < num_ulds * adap->params.nports) {
5761                        adap->params.offload = 0;
5762                        adap->params.crypto = 0;
5763                        s->ofldqsets = 0;
5764                } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5765                        s->ofldqsets = adap->params.nports;
5766                } else {
5767                        s->ofldqsets = avail_uld_qsets;
5768                }
5769
5770                avail_qsets -= num_ulds * s->ofldqsets;
5771        }
5772
5773        /* ETHOFLD Queues used for QoS offload should follow same
5774         * allocation scheme as normal Ethernet Queues.
5775         */
5776        if (is_ethofld(adap)) {
5777                if (avail_qsets < s->max_ethqsets) {
5778                        adap->params.ethofld = 0;
5779                        s->eoqsets = 0;
5780                } else {
5781                        s->eoqsets = s->max_ethqsets;
5782                }
5783                avail_qsets -= s->eoqsets;
5784        }
5785
5786        /* Mirror queues must follow same scheme as normal Ethernet
5787         * Queues, when there are enough queues available. Otherwise,
5788         * allocate at least 1 queue per port. If even 1 queue is not
5789         * available, then disable mirror queues support.
5790         */
5791        if (avail_qsets >= s->max_ethqsets)
5792                s->mirrorqsets = s->max_ethqsets;
5793        else if (avail_qsets >= adap->params.nports)
5794                s->mirrorqsets = adap->params.nports;
5795        else
5796                s->mirrorqsets = 0;
5797        avail_qsets -= s->mirrorqsets;
5798
5799        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5800                struct sge_eth_rxq *r = &s->ethrxq[i];
5801
5802                init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5803                r->fl.size = 72;
5804        }
5805
5806        for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5807                s->ethtxq[i].q.size = 1024;
5808
5809        for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5810                s->ctrlq[i].q.size = 512;
5811
5812        if (!is_t4(adap->params.chip))
5813                s->ptptxq.q.size = 8;
5814
5815        init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5816        init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5817
5818        return 0;
5819}
5820
5821/*
5822 * Reduce the number of Ethernet queues across all ports to at most n.
5823 * n provides at least one queue per port.
5824 */
5825static void reduce_ethqs(struct adapter *adap, int n)
5826{
5827        int i;
5828        struct port_info *pi;
5829
5830        while (n < adap->sge.ethqsets)
5831                for_each_port(adap, i) {
5832                        pi = adap2pinfo(adap, i);
5833                        if (pi->nqsets > 1) {
5834                                pi->nqsets--;
5835                                adap->sge.ethqsets--;
5836                                if (adap->sge.ethqsets <= n)
5837                                        break;
5838                        }
5839                }
5840
5841        n = 0;
5842        for_each_port(adap, i) {
5843                pi = adap2pinfo(adap, i);
5844                pi->first_qset = n;
5845                n += pi->nqsets;
5846        }
5847}
5848
5849static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5850{
5851        struct msix_info *msix_info;
5852
5853        msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5854        if (!msix_info)
5855                return -ENOMEM;
5856
5857        adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5858                                            sizeof(long), GFP_KERNEL);
5859        if (!adap->msix_bmap.msix_bmap) {
5860                kfree(msix_info);
5861                return -ENOMEM;
5862        }
5863
5864        spin_lock_init(&adap->msix_bmap.lock);
5865        adap->msix_bmap.mapsize = num_vec;
5866
5867        adap->msix_info = msix_info;
5868        return 0;
5869}
5870
5871static void free_msix_info(struct adapter *adap)
5872{
5873        kfree(adap->msix_bmap.msix_bmap);
5874        kfree(adap->msix_info);
5875}
5876
5877int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5878{
5879        struct msix_bmap *bmap = &adap->msix_bmap;
5880        unsigned int msix_idx;
5881        unsigned long flags;
5882
5883        spin_lock_irqsave(&bmap->lock, flags);
5884        msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5885        if (msix_idx < bmap->mapsize) {
5886                __set_bit(msix_idx, bmap->msix_bmap);
5887        } else {
5888                spin_unlock_irqrestore(&bmap->lock, flags);
5889                return -ENOSPC;
5890        }
5891
5892        spin_unlock_irqrestore(&bmap->lock, flags);
5893        return msix_idx;
5894}
5895
5896void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5897                                 unsigned int msix_idx)
5898{
5899        struct msix_bmap *bmap = &adap->msix_bmap;
5900        unsigned long flags;
5901
5902        spin_lock_irqsave(&bmap->lock, flags);
5903        __clear_bit(msix_idx, bmap->msix_bmap);
5904        spin_unlock_irqrestore(&bmap->lock, flags);
5905}
5906
5907/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5908#define EXTRA_VECS 2
5909
5910static int enable_msix(struct adapter *adap)
5911{
5912        u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5913        u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5914        u8 num_uld = 0, nchan = adap->params.nports;
5915        u32 i, want, need, num_vec;
5916        struct sge *s = &adap->sge;
5917        struct msix_entry *entries;
5918        struct port_info *pi;
5919        int allocated, ret;
5920
5921        want = s->max_ethqsets;
5922#ifdef CONFIG_CHELSIO_T4_DCB
5923        /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5924         * each port.
5925         */
5926        need = 8 * nchan;
5927#else
5928        need = nchan;
5929#endif
5930        eth_need = need;
5931        if (is_uld(adap)) {
5932                num_uld = adap->num_ofld_uld + adap->num_uld;
5933                want += num_uld * s->ofldqsets;
5934                uld_need = num_uld * nchan;
5935                need += uld_need;
5936        }
5937
5938        if (is_ethofld(adap)) {
5939                want += s->eoqsets;
5940                ethofld_need = eth_need;
5941                need += ethofld_need;
5942        }
5943
5944        if (s->mirrorqsets) {
5945                want += s->mirrorqsets;
5946                mirror_need = nchan;
5947                need += mirror_need;
5948        }
5949
5950        want += EXTRA_VECS;
5951        need += EXTRA_VECS;
5952
5953        entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5954        if (!entries)
5955                return -ENOMEM;
5956
5957        for (i = 0; i < want; i++)
5958                entries[i].entry = i;
5959
5960        allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5961        if (allocated < 0) {
5962                /* Disable offload and attempt to get vectors for NIC
5963                 * only mode.
5964                 */
5965                want = s->max_ethqsets + EXTRA_VECS;
5966                need = eth_need + EXTRA_VECS;
5967                allocated = pci_enable_msix_range(adap->pdev, entries,
5968                                                  need, want);
5969                if (allocated < 0) {
5970                        dev_info(adap->pdev_dev,
5971                                 "Disabling MSI-X due to insufficient MSI-X vectors\n");
5972                        ret = allocated;
5973                        goto out_free;
5974                }
5975
5976                dev_info(adap->pdev_dev,
5977                         "Disabling offload due to insufficient MSI-X vectors\n");
5978                adap->params.offload = 0;
5979                adap->params.crypto = 0;
5980                adap->params.ethofld = 0;
5981                s->ofldqsets = 0;
5982                s->eoqsets = 0;
5983                s->mirrorqsets = 0;
5984                uld_need = 0;
5985                ethofld_need = 0;
5986                mirror_need = 0;
5987        }
5988
5989        num_vec = allocated;
5990        if (num_vec < want) {
5991                /* Distribute available vectors to the various queue groups.
5992                 * Every group gets its minimum requirement and NIC gets top
5993                 * priority for leftovers.
5994                 */
5995                ethqsets = eth_need;
5996                if (is_uld(adap))
5997                        ofldqsets = nchan;
5998                if (is_ethofld(adap))
5999                        eoqsets = ethofld_need;
6000                if (s->mirrorqsets)
6001                        mirrorqsets = mirror_need;
6002
6003                num_vec -= need;
6004                while (num_vec) {
6005                        if (num_vec < eth_need + ethofld_need ||
6006                            ethqsets > s->max_ethqsets)
6007                                break;
6008
6009                        for_each_port(adap, i) {
6010                                pi = adap2pinfo(adap, i);
6011                                if (pi->nqsets < 2)
6012                                        continue;
6013
6014                                ethqsets++;
6015                                num_vec--;
6016                                if (ethofld_need) {
6017                                        eoqsets++;
6018                                        num_vec--;
6019                                }
6020                        }
6021                }
6022
6023                if (is_uld(adap)) {
6024                        while (num_vec) {
6025                                if (num_vec < uld_need ||
6026                                    ofldqsets > s->ofldqsets)
6027                                        break;
6028
6029                                ofldqsets++;
6030                                num_vec -= uld_need;
6031                        }
6032                }
6033
6034                if (s->mirrorqsets) {
6035                        while (num_vec) {
6036                                if (num_vec < mirror_need ||
6037                                    mirrorqsets > s->mirrorqsets)
6038                                        break;
6039
6040                                mirrorqsets++;
6041                                num_vec -= mirror_need;
6042                        }
6043                }
6044        } else {
6045                ethqsets = s->max_ethqsets;
6046                if (is_uld(adap))
6047                        ofldqsets = s->ofldqsets;
6048                if (is_ethofld(adap))
6049                        eoqsets = s->eoqsets;
6050                if (s->mirrorqsets)
6051                        mirrorqsets = s->mirrorqsets;
6052        }
6053
6054        if (ethqsets < s->max_ethqsets) {
6055                s->max_ethqsets = ethqsets;
6056                reduce_ethqs(adap, ethqsets);
6057        }
6058
6059        if (is_uld(adap)) {
6060                s->ofldqsets = ofldqsets;
6061                s->nqs_per_uld = s->ofldqsets;
6062        }
6063
6064        if (is_ethofld(adap))
6065                s->eoqsets = eoqsets;
6066
6067        if (s->mirrorqsets) {
6068                s->mirrorqsets = mirrorqsets;
6069                for_each_port(adap, i) {
6070                        pi = adap2pinfo(adap, i);
6071                        pi->nmirrorqsets = s->mirrorqsets / nchan;
6072                        mutex_init(&pi->vi_mirror_mutex);
6073                }
6074        }
6075
6076        /* map for msix */
6077        ret = alloc_msix_info(adap, allocated);
6078        if (ret)
6079                goto out_disable_msix;
6080
6081        for (i = 0; i < allocated; i++) {
6082                adap->msix_info[i].vec = entries[i].vector;
6083                adap->msix_info[i].idx = i;
6084        }
6085
6086        dev_info(adap->pdev_dev,
6087                 "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6088                 allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6089                 s->mirrorqsets);
6090
6091        kfree(entries);
6092        return 0;
6093
6094out_disable_msix:
6095        pci_disable_msix(adap->pdev);
6096
6097out_free:
6098        kfree(entries);
6099        return ret;
6100}
6101
6102#undef EXTRA_VECS
6103
6104static int init_rss(struct adapter *adap)
6105{
6106        unsigned int i;
6107        int err;
6108
6109        err = t4_init_rss_mode(adap, adap->mbox);
6110        if (err)
6111                return err;
6112
6113        for_each_port(adap, i) {
6114                struct port_info *pi = adap2pinfo(adap, i);
6115
6116                pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6117                if (!pi->rss)
6118                        return -ENOMEM;
6119        }
6120        return 0;
6121}
6122
6123/* Dump basic information about the adapter */
6124static void print_adapter_info(struct adapter *adapter)
6125{
6126        /* Hardware/Firmware/etc. Version/Revision IDs */
6127        t4_dump_version_info(adapter);
6128
6129        /* Software/Hardware configuration */
6130        dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6131                 is_offload(adapter) ? "R" : "",
6132                 ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6133                  (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6134                 is_offload(adapter) ? "Offload" : "non-Offload");
6135}
6136
6137static void print_port_info(const struct net_device *dev)
6138{
6139        char buf[80];
6140        char *bufp = buf;
6141        const struct port_info *pi = netdev_priv(dev);
6142        const struct adapter *adap = pi->adapter;
6143
6144        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6145                bufp += sprintf(bufp, "100M/");
6146        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6147                bufp += sprintf(bufp, "1G/");
6148        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6149                bufp += sprintf(bufp, "10G/");
6150        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6151                bufp += sprintf(bufp, "25G/");
6152        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6153                bufp += sprintf(bufp, "40G/");
6154        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6155                bufp += sprintf(bufp, "50G/");
6156        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6157                bufp += sprintf(bufp, "100G/");
6158        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6159                bufp += sprintf(bufp, "200G/");
6160        if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6161                bufp += sprintf(bufp, "400G/");
6162        if (bufp != buf)
6163                --bufp;
6164        sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6165
6166        netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
6167                    dev->name, adap->params.vpd.id, adap->name, buf);
6168}
6169
6170/*
6171 * Free the following resources:
6172 * - memory used for tables
6173 * - MSI/MSI-X
6174 * - net devices
6175 * - resources FW is holding for us
6176 */
6177static void free_some_resources(struct adapter *adapter)
6178{
6179        unsigned int i;
6180
6181        kvfree(adapter->smt);
6182        kvfree(adapter->l2t);
6183        kvfree(adapter->srq);
6184        t4_cleanup_sched(adapter);
6185        kvfree(adapter->tids.tid_tab);
6186        cxgb4_cleanup_tc_matchall(adapter);
6187        cxgb4_cleanup_tc_mqprio(adapter);
6188        cxgb4_cleanup_tc_flower(adapter);
6189        cxgb4_cleanup_tc_u32(adapter);
6190        cxgb4_cleanup_ethtool_filters(adapter);
6191        kfree(adapter->sge.egr_map);
6192        kfree(adapter->sge.ingr_map);
6193        kfree(adapter->sge.starving_fl);
6194        kfree(adapter->sge.txq_maperr);
6195#ifdef CONFIG_DEBUG_FS
6196        kfree(adapter->sge.blocked_fl);
6197#endif
6198        disable_msi(adapter);
6199
6200        for_each_port(adapter, i)
6201                if (adapter->port[i]) {
6202                        struct port_info *pi = adap2pinfo(adapter, i);
6203
6204                        if (pi->viid != 0)
6205                                t4_free_vi(adapter, adapter->mbox, adapter->pf,
6206                                           0, pi->viid);
6207                        kfree(adap2pinfo(adapter, i)->rss);
6208                        free_netdev(adapter->port[i]);
6209                }
6210        if (adapter->flags & CXGB4_FW_OK)
6211                t4_fw_bye(adapter, adapter->pf);
6212}
6213
6214#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6215                   NETIF_F_GSO_UDP_L4)
6216#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6217                   NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6218#define SEGMENT_SIZE 128
6219
6220static int t4_get_chip_type(struct adapter *adap, int ver)
6221{
6222        u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
6223
6224        switch (ver) {
6225        case CHELSIO_T4:
6226                return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6227        case CHELSIO_T5:
6228                return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6229        case CHELSIO_T6:
6230                return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6231        default:
6232                break;
6233        }
6234        return -EINVAL;
6235}
6236
6237#ifdef CONFIG_PCI_IOV
6238static void cxgb4_mgmt_setup(struct net_device *dev)
6239{
6240        dev->type = ARPHRD_NONE;
6241        dev->mtu = 0;
6242        dev->hard_header_len = 0;
6243        dev->addr_len = 0;
6244        dev->tx_queue_len = 0;
6245        dev->flags |= IFF_NOARP;
6246        dev->priv_flags |= IFF_NO_QUEUE;
6247
6248        /* Initialize the device structure. */
6249        dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6250        dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
6251}
6252
6253static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
6254{
6255        struct adapter *adap = pci_get_drvdata(pdev);
6256        int err = 0;
6257        int current_vfs = pci_num_vf(pdev);
6258        u32 pcie_fw;
6259
6260        pcie_fw = readl(adap->regs + PCIE_FW_A);
6261        /* Check if fw is initialized */
6262        if (!(pcie_fw & PCIE_FW_INIT_F)) {
6263                dev_warn(&pdev->dev, "Device not initialized\n");
6264                return -EOPNOTSUPP;
6265        }
6266
6267        /* If any of the VF's is already assigned to Guest OS, then
6268         * SRIOV for the same cannot be modified
6269         */
6270        if (current_vfs && pci_vfs_assigned(pdev)) {
6271                dev_err(&pdev->dev,
6272                        "Cannot modify SR-IOV while VFs are assigned\n");
6273                return current_vfs;
6274        }
6275        /* Note that the upper-level code ensures that we're never called with
6276         * a non-zero "num_vfs" when we already have VFs instantiated.  But
6277         * it never hurts to code defensively.
6278         */
6279        if (num_vfs != 0 && current_vfs != 0)
6280                return -EBUSY;
6281
6282        /* Nothing to do for no change. */
6283        if (num_vfs == current_vfs)
6284                return num_vfs;
6285
6286        /* Disable SRIOV when zero is passed. */
6287        if (!num_vfs) {
6288                pci_disable_sriov(pdev);
6289                /* free VF Management Interface */
6290                unregister_netdev(adap->port[0]);
6291                free_netdev(adap->port[0]);
6292                adap->port[0] = NULL;
6293
6294                /* free VF resources */
6295                adap->num_vfs = 0;
6296                kfree(adap->vfinfo);
6297                adap->vfinfo = NULL;
6298                return 0;
6299        }
6300
6301        if (!current_vfs) {
6302                struct fw_pfvf_cmd port_cmd, port_rpl;
6303                struct net_device *netdev;
6304                unsigned int pmask, port;
6305                struct pci_dev *pbridge;
6306                struct port_info *pi;
6307                char name[IFNAMSIZ];
6308                u32 devcap2;
6309                u16 flags;
6310
6311                /* If we want to instantiate Virtual Functions, then our
6312                 * parent bridge's PCI-E needs to support Alternative Routing
6313                 * ID (ARI) because our VFs will show up at function offset 8
6314                 * and above.
6315                 */
6316                pbridge = pdev->bus->self;
6317                pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6318                pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
6319
6320                if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
6321                    !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
6322                        /* Our parent bridge does not support ARI so issue a
6323                         * warning and skip instantiating the VFs.  They
6324                         * won't be reachable.
6325                         */
6326                        dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6327                                 pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6328                                 PCI_FUNC(pbridge->devfn));
6329                        return -ENOTSUPP;
6330                }
6331                memset(&port_cmd, 0, sizeof(port_cmd));
6332                port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6333                                                 FW_CMD_REQUEST_F |
6334                                                 FW_CMD_READ_F |
6335                                                 FW_PFVF_CMD_PFN_V(adap->pf) |
6336                                                 FW_PFVF_CMD_VFN_V(0));
6337                port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6338                err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6339                                 &port_rpl);
6340                if (err)
6341                        return err;
6342                pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6343                port = ffs(pmask) - 1;
6344                /* Allocate VF Management Interface. */
6345                snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6346                         adap->pf);
6347                netdev = alloc_netdev(sizeof(struct port_info),
6348                                      name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6349                if (!netdev)
6350                        return -ENOMEM;
6351
6352                pi = netdev_priv(netdev);
6353                pi->adapter = adap;
6354                pi->lport = port;
6355                pi->tx_chan = port;
6356                SET_NETDEV_DEV(netdev, &pdev->dev);
6357
6358                adap->port[0] = netdev;
6359                pi->port_id = 0;
6360
6361                err = register_netdev(adap->port[0]);
6362                if (err) {
6363                        pr_info("Unable to register VF mgmt netdev %s\n", name);
6364                        free_netdev(adap->port[0]);
6365                        adap->port[0] = NULL;
6366                        return err;
6367                }
6368                /* Allocate and set up VF Information. */
6369                adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6370                                       sizeof(struct vf_info), GFP_KERNEL);
6371                if (!adap->vfinfo) {
6372                        unregister_netdev(adap->port[0]);
6373                        free_netdev(adap->port[0]);
6374                        adap->port[0] = NULL;
6375                        return -ENOMEM;
6376                }
6377                cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6378        }
6379        /* Instantiate the requested number of VFs. */
6380        err = pci_enable_sriov(pdev, num_vfs);
6381        if (err) {
6382                pr_info("Unable to instantiate %d VFs\n", num_vfs);
6383                if (!current_vfs) {
6384                        unregister_netdev(adap->port[0]);
6385                        free_netdev(adap->port[0]);
6386                        adap->port[0] = NULL;
6387                        kfree(adap->vfinfo);
6388                        adap->vfinfo = NULL;
6389                }
6390                return err;
6391        }
6392
6393        adap->num_vfs = num_vfs;
6394        return num_vfs;
6395}
6396#endif /* CONFIG_PCI_IOV */
6397
6398#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6399
6400static int chcr_offload_state(struct adapter *adap,
6401                              enum cxgb4_netdev_tls_ops op_val)
6402{
6403        switch (op_val) {
6404#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6405        case CXGB4_TLSDEV_OPS:
6406                if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6407                        dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
6408                        return -EOPNOTSUPP;
6409                }
6410                if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
6411                        dev_dbg(adap->pdev_dev,
6412                                "ch_ktls driver has no registered tlsdev_ops\n");
6413                        return -EOPNOTSUPP;
6414                }
6415                break;
6416#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6417#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6418        case CXGB4_XFRMDEV_OPS:
6419                if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6420                        dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6421                        return -EOPNOTSUPP;
6422                }
6423                if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6424                        dev_dbg(adap->pdev_dev,
6425                                "chipsec driver has no registered xfrmdev_ops\n");
6426                        return -EOPNOTSUPP;
6427                }
6428                break;
6429#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6430        default:
6431                dev_dbg(adap->pdev_dev,
6432                        "driver has no support for offload %d\n", op_val);
6433                return -EOPNOTSUPP;
6434        }
6435
6436        return 0;
6437}
6438
6439#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
6440
6441#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6442
6443static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6444                              enum tls_offload_ctx_dir direction,
6445                              struct tls_crypto_info *crypto_info,
6446                              u32 tcp_sn)
6447{
6448        struct adapter *adap = netdev2adap(netdev);
6449        int ret;
6450
6451        mutex_lock(&uld_mutex);
6452        ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
6453        if (ret)
6454                goto out_unlock;
6455
6456        ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6457        if (ret)
6458                goto out_unlock;
6459
6460        ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6461                                                                direction,
6462                                                                crypto_info,
6463                                                                tcp_sn);
6464        /* if there is a failure, clear the refcount */
6465        if (ret)
6466                cxgb4_set_ktls_feature(adap,
6467                                       FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6468out_unlock:
6469        mutex_unlock(&uld_mutex);
6470        return ret;
6471}
6472
6473static void cxgb4_ktls_dev_del(struct net_device *netdev,
6474                               struct tls_context *tls_ctx,
6475                               enum tls_offload_ctx_dir direction)
6476{
6477        struct adapter *adap = netdev2adap(netdev);
6478
6479        mutex_lock(&uld_mutex);
6480        if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
6481                goto out_unlock;
6482
6483        adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6484                                                          direction);
6485
6486out_unlock:
6487        cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6488        mutex_unlock(&uld_mutex);
6489}
6490
6491static const struct tlsdev_ops cxgb4_ktls_ops = {
6492        .tls_dev_add = cxgb4_ktls_dev_add,
6493        .tls_dev_del = cxgb4_ktls_dev_del,
6494};
6495#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6496
6497#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6498
6499static int cxgb4_xfrm_add_state(struct xfrm_state *x)
6500{
6501        struct adapter *adap = netdev2adap(x->xso.dev);
6502        int ret;
6503
6504        if (!mutex_trylock(&uld_mutex)) {
6505                dev_dbg(adap->pdev_dev,
6506                        "crypto uld critical resource is under use\n");
6507                return -EBUSY;
6508        }
6509        ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
6510        if (ret)
6511                goto out_unlock;
6512
6513        ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
6514
6515out_unlock:
6516        mutex_unlock(&uld_mutex);
6517
6518        return ret;
6519}
6520
6521static void cxgb4_xfrm_del_state(struct xfrm_state *x)
6522{
6523        struct adapter *adap = netdev2adap(x->xso.dev);
6524
6525        if (!mutex_trylock(&uld_mutex)) {
6526                dev_dbg(adap->pdev_dev,
6527                        "crypto uld critical resource is under use\n");
6528                return;
6529        }
6530        if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6531                goto out_unlock;
6532
6533        adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
6534
6535out_unlock:
6536        mutex_unlock(&uld_mutex);
6537}
6538
6539static void cxgb4_xfrm_free_state(struct xfrm_state *x)
6540{
6541        struct adapter *adap = netdev2adap(x->xso.dev);
6542
6543        if (!mutex_trylock(&uld_mutex)) {
6544                dev_dbg(adap->pdev_dev,
6545                        "crypto uld critical resource is under use\n");
6546                return;
6547        }
6548        if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6549                goto out_unlock;
6550
6551        adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
6552
6553out_unlock:
6554        mutex_unlock(&uld_mutex);
6555}
6556
6557static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
6558{
6559        struct adapter *adap = netdev2adap(x->xso.dev);
6560        bool ret = false;
6561
6562        if (!mutex_trylock(&uld_mutex)) {
6563                dev_dbg(adap->pdev_dev,
6564                        "crypto uld critical resource is under use\n");
6565                return ret;
6566        }
6567        if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6568                goto out_unlock;
6569
6570        ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
6571
6572out_unlock:
6573        mutex_unlock(&uld_mutex);
6574        return ret;
6575}
6576
6577static void cxgb4_advance_esn_state(struct xfrm_state *x)
6578{
6579        struct adapter *adap = netdev2adap(x->xso.dev);
6580
6581        if (!mutex_trylock(&uld_mutex)) {
6582                dev_dbg(adap->pdev_dev,
6583                        "crypto uld critical resource is under use\n");
6584                return;
6585        }
6586        if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6587                goto out_unlock;
6588
6589        adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6590
6591out_unlock:
6592        mutex_unlock(&uld_mutex);
6593}
6594
6595static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
6596        .xdo_dev_state_add      = cxgb4_xfrm_add_state,
6597        .xdo_dev_state_delete   = cxgb4_xfrm_del_state,
6598        .xdo_dev_state_free     = cxgb4_xfrm_free_state,
6599        .xdo_dev_offload_ok     = cxgb4_ipsec_offload_ok,
6600        .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
6601};
6602
6603#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6604
6605static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6606{
6607        struct net_device *netdev;
6608        struct adapter *adapter;
6609        static int adap_idx = 1;
6610        int s_qpp, qpp, num_seg;
6611        struct port_info *pi;
6612        bool highdma = false;
6613        enum chip_type chip;
6614        void __iomem *regs;
6615        int func, chip_ver;
6616        u16 device_id;
6617        int i, err;
6618        u32 whoami;
6619
6620        err = pci_request_regions(pdev, KBUILD_MODNAME);
6621        if (err) {
6622                /* Just info, some other driver may have claimed the device. */
6623                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6624                return err;
6625        }
6626
6627        err = pci_enable_device(pdev);
6628        if (err) {
6629                dev_err(&pdev->dev, "cannot enable PCI device\n");
6630                goto out_release_regions;
6631        }
6632
6633        regs = pci_ioremap_bar(pdev, 0);
6634        if (!regs) {
6635                dev_err(&pdev->dev, "cannot map device registers\n");
6636                err = -ENOMEM;
6637                goto out_disable_device;
6638        }
6639
6640        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6641        if (!adapter) {
6642                err = -ENOMEM;
6643                goto out_unmap_bar0;
6644        }
6645
6646        adapter->regs = regs;
6647        err = t4_wait_dev_ready(regs);
6648        if (err < 0)
6649                goto out_free_adapter;
6650
6651        /* We control everything through one PF */
6652        whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6653        pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6654        chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6655        if ((int)chip < 0) {
6656                dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6657                err = chip;
6658                goto out_free_adapter;
6659        }
6660        chip_ver = CHELSIO_CHIP_VERSION(chip);
6661        func = chip_ver <= CHELSIO_T5 ?
6662               SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6663
6664        adapter->pdev = pdev;
6665        adapter->pdev_dev = &pdev->dev;
6666        adapter->name = pci_name(pdev);
6667        adapter->mbox = func;
6668        adapter->pf = func;
6669        adapter->params.chip = chip;
6670        adapter->adap_idx = adap_idx;
6671        adapter->msg_enable = DFLT_MSG_ENABLE;
6672        adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6673                                    (sizeof(struct mbox_cmd) *
6674                                     T4_OS_LOG_MBOX_CMDS),
6675                                    GFP_KERNEL);
6676        if (!adapter->mbox_log) {
6677                err = -ENOMEM;
6678                goto out_free_adapter;
6679        }
6680        spin_lock_init(&adapter->mbox_lock);
6681        INIT_LIST_HEAD(&adapter->mlist.list);
6682        adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6683        pci_set_drvdata(pdev, adapter);
6684
6685        if (func != ent->driver_data) {
6686                pci_disable_device(pdev);
6687                pci_save_state(pdev);        /* to restore SR-IOV later */
6688                return 0;
6689        }
6690
6691        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6692                highdma = true;
6693                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6694                if (err) {
6695                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6696                                "coherent allocations\n");
6697                        goto out_free_adapter;
6698                }
6699        } else {
6700                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6701                if (err) {
6702                        dev_err(&pdev->dev, "no usable DMA configuration\n");
6703                        goto out_free_adapter;
6704                }
6705        }
6706
6707        pci_enable_pcie_error_reporting(pdev);
6708        pci_set_master(pdev);
6709        pci_save_state(pdev);
6710        adap_idx++;
6711        adapter->workq = create_singlethread_workqueue("cxgb4");
6712        if (!adapter->workq) {
6713                err = -ENOMEM;
6714                goto out_free_adapter;
6715        }
6716
6717        /* PCI device has been enabled */
6718        adapter->flags |= CXGB4_DEV_ENABLED;
6719        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6720
6721        /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
6722         * Ingress Packet Data to Free List Buffers in order to allow for
6723         * chipset performance optimizations between the Root Complex and
6724         * Memory Controllers.  (Messages to the associated Ingress Queue
6725         * notifying new Packet Placement in the Free Lists Buffers will be
6726         * send without the Relaxed Ordering Attribute thus guaranteeing that
6727         * all preceding PCIe Transaction Layer Packets will be processed
6728         * first.)  But some Root Complexes have various issues with Upstream
6729         * Transaction Layer Packets with the Relaxed Ordering Attribute set.
6730         * The PCIe devices which under the Root Complexes will be cleared the
6731         * Relaxed Ordering bit in the configuration space, So we check our
6732         * PCIe configuration space to see if it's flagged with advice against
6733         * using Relaxed Ordering.
6734         */
6735        if (!pcie_relaxed_ordering_enabled(pdev))
6736                adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6737
6738        spin_lock_init(&adapter->stats_lock);
6739        spin_lock_init(&adapter->tid_release_lock);
6740        spin_lock_init(&adapter->win0_lock);
6741
6742        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6743        INIT_WORK(&adapter->db_full_task, process_db_full);
6744        INIT_WORK(&adapter->db_drop_task, process_db_drop);
6745        INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6746
6747        err = t4_prep_adapter(adapter);
6748        if (err)
6749                goto out_free_adapter;
6750
6751        if (is_kdump_kernel()) {
6752                /* Collect hardware state and append to /proc/vmcore */
6753                err = cxgb4_cudbg_vmcore_add_dump(adapter);
6754                if (err) {
6755                        dev_warn(adapter->pdev_dev,
6756                                 "Fail collecting vmcore device dump, err: %d. Continuing\n",
6757                                 err);
6758                        err = 0;
6759                }
6760        }
6761
6762        if (!is_t4(adapter->params.chip)) {
6763                s_qpp = (QUEUESPERPAGEPF0_S +
6764                        (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6765                        adapter->pf);
6766                qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6767                      SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6768                num_seg = PAGE_SIZE / SEGMENT_SIZE;
6769
6770                /* Each segment size is 128B. Write coalescing is enabled only
6771                 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6772                 * queue is less no of segments that can be accommodated in
6773                 * a page size.
6774                 */
6775                if (qpp > num_seg) {
6776                        dev_err(&pdev->dev,
6777                                "Incorrect number of egress queues per page\n");
6778                        err = -EINVAL;
6779                        goto out_free_adapter;
6780                }
6781                adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6782                pci_resource_len(pdev, 2));
6783                if (!adapter->bar2) {
6784                        dev_err(&pdev->dev, "cannot map device bar2 region\n");
6785                        err = -ENOMEM;
6786                        goto out_free_adapter;
6787                }
6788        }
6789
6790        setup_memwin(adapter);
6791        err = adap_init0(adapter, 0);
6792        if (err)
6793                goto out_unmap_bar;
6794
6795        setup_memwin_rdma(adapter);
6796
6797        /* configure SGE_STAT_CFG_A to read WC stats */
6798        if (!is_t4(adapter->params.chip))
6799                t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6800                             (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6801                              T6_STATMODE_V(0)));
6802
6803        /* Initialize hash mac addr list */
6804        INIT_LIST_HEAD(&adapter->mac_hlist);
6805
6806        for_each_port(adapter, i) {
6807                /* For supporting MQPRIO Offload, need some extra
6808                 * queues for each ETHOFLD TIDs. Keep it equal to
6809                 * MAX_ATIDs for now. Once we connect to firmware
6810                 * later and query the EOTID params, we'll come to
6811                 * know the actual # of EOTIDs supported.
6812                 */
6813                netdev = alloc_etherdev_mq(sizeof(struct port_info),
6814                                           MAX_ETH_QSETS + MAX_ATIDS);
6815                if (!netdev) {
6816                        err = -ENOMEM;
6817                        goto out_free_dev;
6818                }
6819
6820                SET_NETDEV_DEV(netdev, &pdev->dev);
6821
6822                adapter->port[i] = netdev;
6823                pi = netdev_priv(netdev);
6824                pi->adapter = adapter;
6825                pi->xact_addr_filt = -1;
6826                pi->port_id = i;
6827                netdev->irq = pdev->irq;
6828
6829                netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6830                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6831                        NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6832                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6833                        NETIF_F_HW_TC | NETIF_F_NTUPLE;
6834
6835                if (chip_ver > CHELSIO_T5) {
6836                        netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6837                                                   NETIF_F_IPV6_CSUM |
6838                                                   NETIF_F_RXCSUM |
6839                                                   NETIF_F_GSO_UDP_TUNNEL |
6840                                                   NETIF_F_GSO_UDP_TUNNEL_CSUM |
6841                                                   NETIF_F_TSO | NETIF_F_TSO6;
6842
6843                        netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6844                                               NETIF_F_GSO_UDP_TUNNEL_CSUM |
6845                                               NETIF_F_HW_TLS_RECORD;
6846
6847                        if (adapter->rawf_cnt)
6848                                netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
6849                }
6850
6851                if (highdma)
6852                        netdev->hw_features |= NETIF_F_HIGHDMA;
6853                netdev->features |= netdev->hw_features;
6854                netdev->vlan_features = netdev->features & VLAN_FEAT;
6855#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6856                if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6857                        netdev->hw_features |= NETIF_F_HW_TLS_TX;
6858                        netdev->tlsdev_ops = &cxgb4_ktls_ops;
6859                        /* initialize the refcount */
6860                        refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6861                }
6862#endif /* CONFIG_CHELSIO_TLS_DEVICE */
6863#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6864                if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6865                        netdev->hw_enc_features |= NETIF_F_HW_ESP;
6866                        netdev->features |= NETIF_F_HW_ESP;
6867                        netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6868                }
6869#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6870
6871                netdev->priv_flags |= IFF_UNICAST_FLT;
6872
6873                /* MTU range: 81 - 9600 */
6874                netdev->min_mtu = 81;              /* accommodate SACK */
6875                netdev->max_mtu = MAX_MTU;
6876
6877                netdev->netdev_ops = &cxgb4_netdev_ops;
6878#ifdef CONFIG_CHELSIO_T4_DCB
6879                netdev->dcbnl_ops = &cxgb4_dcb_ops;
6880                cxgb4_dcb_state_init(netdev);
6881                cxgb4_dcb_version_init(netdev);
6882#endif
6883                cxgb4_set_ethtool_ops(netdev);
6884        }
6885
6886        cxgb4_init_ethtool_dump(adapter);
6887
6888        pci_set_drvdata(pdev, adapter);
6889
6890        if (adapter->flags & CXGB4_FW_OK) {
6891                err = t4_port_init(adapter, func, func, 0);
6892                if (err)
6893                        goto out_free_dev;
6894        } else if (adapter->params.nports == 1) {
6895                /* If we don't have a connection to the firmware -- possibly
6896                 * because of an error -- grab the raw VPD parameters so we
6897                 * can set the proper MAC Address on the debug network
6898                 * interface that we've created.
6899                 */
6900                u8 hw_addr[ETH_ALEN];
6901                u8 *na = adapter->params.vpd.na;
6902
6903                err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6904                if (!err) {
6905                        for (i = 0; i < ETH_ALEN; i++)
6906                                hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6907                                              hex2val(na[2 * i + 1]));
6908                        t4_set_hw_addr(adapter, 0, hw_addr);
6909                }
6910        }
6911
6912        if (!(adapter->flags & CXGB4_FW_OK))
6913                goto fw_attach_fail;
6914
6915        /* Configure queues and allocate tables now, they can be needed as
6916         * soon as the first register_netdev completes.
6917         */
6918        err = cfg_queues(adapter);
6919        if (err)
6920                goto out_free_dev;
6921
6922        adapter->smt = t4_init_smt();
6923        if (!adapter->smt) {
6924                /* We tolerate a lack of SMT, giving up some functionality */
6925                dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6926        }
6927
6928        adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6929        if (!adapter->l2t) {
6930                /* We tolerate a lack of L2T, giving up some functionality */
6931                dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6932                adapter->params.offload = 0;
6933        }
6934
6935#if IS_ENABLED(CONFIG_IPV6)
6936        if (chip_ver <= CHELSIO_T5 &&
6937            (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6938                /* CLIP functionality is not present in hardware,
6939                 * hence disable all offload features
6940                 */
6941                dev_warn(&pdev->dev,
6942                         "CLIP not enabled in hardware, continuing\n");
6943                adapter->params.offload = 0;
6944        } else {
6945                adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6946                                                  adapter->clipt_end);
6947                if (!adapter->clipt) {
6948                        /* We tolerate a lack of clip_table, giving up
6949                         * some functionality
6950                         */
6951                        dev_warn(&pdev->dev,
6952                                 "could not allocate Clip table, continuing\n");
6953                        adapter->params.offload = 0;
6954                }
6955        }
6956#endif
6957
6958        for_each_port(adapter, i) {
6959                pi = adap2pinfo(adapter, i);
6960                pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6961                if (!pi->sched_tbl)
6962                        dev_warn(&pdev->dev,
6963                                 "could not activate scheduling on port %d\n",
6964                                 i);
6965        }
6966
6967        if (is_offload(adapter) || is_hashfilter(adapter)) {
6968                if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6969                        u32 v;
6970
6971                        v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6972                        if (chip_ver <= CHELSIO_T5) {
6973                                adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6974                                v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6975                                adapter->tids.hash_base = v / 4;
6976                        } else {
6977                                adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6978                                v = t4_read_reg(adapter,
6979                                                T6_LE_DB_HASH_TID_BASE_A);
6980                                adapter->tids.hash_base = v;
6981                        }
6982                }
6983        }
6984
6985        if (tid_init(&adapter->tids) < 0) {
6986                dev_warn(&pdev->dev, "could not allocate TID table, "
6987                         "continuing\n");
6988                adapter->params.offload = 0;
6989        } else {
6990                adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6991                if (!adapter->tc_u32)
6992                        dev_warn(&pdev->dev,
6993                                 "could not offload tc u32, continuing\n");
6994
6995                if (cxgb4_init_tc_flower(adapter))
6996                        dev_warn(&pdev->dev,
6997                                 "could not offload tc flower, continuing\n");
6998
6999                if (cxgb4_init_tc_mqprio(adapter))
7000                        dev_warn(&pdev->dev,
7001                                 "could not offload tc mqprio, continuing\n");
7002
7003                if (cxgb4_init_tc_matchall(adapter))
7004                        dev_warn(&pdev->dev,
7005                                 "could not offload tc matchall, continuing\n");
7006                if (cxgb4_init_ethtool_filters(adapter))
7007                        dev_warn(&pdev->dev,
7008                                 "could not initialize ethtool filters, continuing\n");
7009        }
7010
7011        /* See what interrupts we'll be using */
7012        if (msi > 1 && enable_msix(adapter) == 0)
7013                adapter->flags |= CXGB4_USING_MSIX;
7014        else if (msi > 0 && pci_enable_msi(pdev) == 0) {
7015                adapter->flags |= CXGB4_USING_MSI;
7016                if (msi > 1)
7017                        free_msix_info(adapter);
7018        }
7019
7020        /* check for PCI Express bandwidth capabiltites */
7021        pcie_print_link_status(pdev);
7022
7023        cxgb4_init_mps_ref_entries(adapter);
7024
7025        err = init_rss(adapter);
7026        if (err)
7027                goto out_free_dev;
7028
7029        err = setup_non_data_intr(adapter);
7030        if (err) {
7031                dev_err(adapter->pdev_dev,
7032                        "Non Data interrupt allocation failed, err: %d\n", err);
7033                goto out_free_dev;
7034        }
7035
7036        err = setup_fw_sge_queues(adapter);
7037        if (err) {
7038                dev_err(adapter->pdev_dev,
7039                        "FW sge queue allocation failed, err %d", err);
7040                goto out_free_dev;
7041        }
7042
7043fw_attach_fail:
7044        /*
7045         * The card is now ready to go.  If any errors occur during device
7046         * registration we do not fail the whole card but rather proceed only
7047         * with the ports we manage to register successfully.  However we must
7048         * register at least one net device.
7049         */
7050        for_each_port(adapter, i) {
7051                pi = adap2pinfo(adapter, i);
7052                adapter->port[i]->dev_port = pi->lport;
7053                netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
7054                netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
7055
7056                netif_carrier_off(adapter->port[i]);
7057
7058                err = register_netdev(adapter->port[i]);
7059                if (err)
7060                        break;
7061                adapter->chan_map[pi->tx_chan] = i;
7062                print_port_info(adapter->port[i]);
7063        }
7064        if (i == 0) {
7065                dev_err(&pdev->dev, "could not register any net devices\n");
7066                goto out_free_dev;
7067        }
7068        if (err) {
7069                dev_warn(&pdev->dev, "only %d net devices registered\n", i);
7070                err = 0;
7071        }
7072
7073        if (cxgb4_debugfs_root) {
7074                adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
7075                                                           cxgb4_debugfs_root);
7076                setup_debugfs(adapter);
7077        }
7078
7079        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7080        pdev->needs_freset = 1;
7081
7082        if (is_uld(adapter))
7083                cxgb4_uld_enable(adapter);
7084
7085        if (!is_t4(adapter->params.chip))
7086                cxgb4_ptp_init(adapter);
7087
7088        if (IS_REACHABLE(CONFIG_THERMAL) &&
7089            !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
7090                cxgb4_thermal_init(adapter);
7091
7092        print_adapter_info(adapter);
7093        return 0;
7094
7095 out_free_dev:
7096        t4_free_sge_resources(adapter);
7097        free_some_resources(adapter);
7098        if (adapter->flags & CXGB4_USING_MSIX)
7099                free_msix_info(adapter);
7100        if (adapter->num_uld || adapter->num_ofld_uld)
7101                t4_uld_mem_free(adapter);
7102 out_unmap_bar:
7103        if (!is_t4(adapter->params.chip))
7104                iounmap(adapter->bar2);
7105 out_free_adapter:
7106        if (adapter->workq)
7107                destroy_workqueue(adapter->workq);
7108
7109        kfree(adapter->mbox_log);
7110        kfree(adapter);
7111 out_unmap_bar0:
7112        iounmap(regs);
7113 out_disable_device:
7114        pci_disable_pcie_error_reporting(pdev);
7115        pci_disable_device(pdev);
7116 out_release_regions:
7117        pci_release_regions(pdev);
7118        return err;
7119}
7120
7121static void remove_one(struct pci_dev *pdev)
7122{
7123        struct adapter *adapter = pci_get_drvdata(pdev);
7124        struct hash_mac_addr *entry, *tmp;
7125
7126        if (!adapter) {
7127                pci_release_regions(pdev);
7128                return;
7129        }
7130
7131        /* If we allocated filters, free up state associated with any
7132         * valid filters ...
7133         */
7134        clear_all_filters(adapter);
7135
7136        adapter->flags |= CXGB4_SHUTTING_DOWN;
7137
7138        if (adapter->pf == 4) {
7139                int i;
7140
7141                /* Tear down per-adapter Work Queue first since it can contain
7142                 * references to our adapter data structure.
7143                 */
7144                destroy_workqueue(adapter->workq);
7145
7146                detach_ulds(adapter);
7147
7148                for_each_port(adapter, i)
7149                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7150                                unregister_netdev(adapter->port[i]);
7151
7152                t4_uld_clean_up(adapter);
7153
7154                adap_free_hma_mem(adapter);
7155
7156                disable_interrupts(adapter);
7157
7158                cxgb4_free_mps_ref_entries(adapter);
7159
7160                debugfs_remove_recursive(adapter->debugfs_root);
7161
7162                if (!is_t4(adapter->params.chip))
7163                        cxgb4_ptp_stop(adapter);
7164                if (IS_REACHABLE(CONFIG_THERMAL))
7165                        cxgb4_thermal_remove(adapter);
7166
7167                if (adapter->flags & CXGB4_FULL_INIT_DONE)
7168                        cxgb_down(adapter);
7169
7170                if (adapter->flags & CXGB4_USING_MSIX)
7171                        free_msix_info(adapter);
7172                if (adapter->num_uld || adapter->num_ofld_uld)
7173                        t4_uld_mem_free(adapter);
7174                free_some_resources(adapter);
7175                list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7176                                         list) {
7177                        list_del(&entry->list);
7178                        kfree(entry);
7179                }
7180
7181#if IS_ENABLED(CONFIG_IPV6)
7182                t4_cleanup_clip_tbl(adapter);
7183#endif
7184                if (!is_t4(adapter->params.chip))
7185                        iounmap(adapter->bar2);
7186        }
7187#ifdef CONFIG_PCI_IOV
7188        else {
7189                cxgb4_iov_configure(adapter->pdev, 0);
7190        }
7191#endif
7192        iounmap(adapter->regs);
7193        pci_disable_pcie_error_reporting(pdev);
7194        if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7195                pci_disable_device(pdev);
7196                adapter->flags &= ~CXGB4_DEV_ENABLED;
7197        }
7198        pci_release_regions(pdev);
7199        kfree(adapter->mbox_log);
7200        synchronize_rcu();
7201        kfree(adapter);
7202}
7203
7204/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
7205 * delivery.  This is essentially a stripped down version of the PCI remove()
7206 * function where we do the minimal amount of work necessary to shutdown any
7207 * further activity.
7208 */
7209static void shutdown_one(struct pci_dev *pdev)
7210{
7211        struct adapter *adapter = pci_get_drvdata(pdev);
7212
7213        /* As with remove_one() above (see extended comment), we only want do
7214         * do cleanup on PCI Devices which went all the way through init_one()
7215         * ...
7216         */
7217        if (!adapter) {
7218                pci_release_regions(pdev);
7219                return;
7220        }
7221
7222        adapter->flags |= CXGB4_SHUTTING_DOWN;
7223
7224        if (adapter->pf == 4) {
7225                int i;
7226
7227                for_each_port(adapter, i)
7228                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7229                                cxgb_close(adapter->port[i]);
7230
7231                rtnl_lock();
7232                cxgb4_mqprio_stop_offload(adapter);
7233                rtnl_unlock();
7234
7235                if (is_uld(adapter)) {
7236                        detach_ulds(adapter);
7237                        t4_uld_clean_up(adapter);
7238                }
7239
7240                disable_interrupts(adapter);
7241                disable_msi(adapter);
7242
7243                t4_sge_stop(adapter);
7244                if (adapter->flags & CXGB4_FW_OK)
7245                        t4_fw_bye(adapter, adapter->mbox);
7246        }
7247}
7248
7249static struct pci_driver cxgb4_driver = {
7250        .name     = KBUILD_MODNAME,
7251        .id_table = cxgb4_pci_tbl,
7252        .probe    = init_one,
7253        .remove   = remove_one,
7254        .shutdown = shutdown_one,
7255#ifdef CONFIG_PCI_IOV
7256        .sriov_configure = cxgb4_iov_configure,
7257#endif
7258        .err_handler = &cxgb4_eeh,
7259};
7260
7261static int __init cxgb4_init_module(void)
7262{
7263        int ret;
7264
7265        cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
7266
7267        ret = pci_register_driver(&cxgb4_driver);
7268        if (ret < 0)
7269                goto err_pci;
7270
7271#if IS_ENABLED(CONFIG_IPV6)
7272        if (!inet6addr_registered) {
7273                ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7274                if (ret)
7275                        pci_unregister_driver(&cxgb4_driver);
7276                else
7277                        inet6addr_registered = true;
7278        }
7279#endif
7280
7281        if (ret == 0)
7282                return ret;
7283
7284err_pci:
7285        debugfs_remove(cxgb4_debugfs_root);
7286
7287        return ret;
7288}
7289
7290static void __exit cxgb4_cleanup_module(void)
7291{
7292#if IS_ENABLED(CONFIG_IPV6)
7293        if (inet6addr_registered) {
7294                unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7295                inet6addr_registered = false;
7296        }
7297#endif
7298        pci_unregister_driver(&cxgb4_driver);
7299        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
7300}
7301
7302module_init(cxgb4_init_module);
7303module_exit(cxgb4_cleanup_module);
7304