linux/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/pci.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/if_vlan.h>
  42#include <linux/mdio.h>
  43#include <linux/sockios.h>
  44#include <linux/workqueue.h>
  45#include <linux/proc_fs.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/stringify.h>
  50#include <linux/sched.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/nospec.h>
  54
  55#include "common.h"
  56#include "cxgb3_ioctl.h"
  57#include "regs.h"
  58#include "cxgb3_offload.h"
  59#include "version.h"
  60
  61#include "cxgb3_ctl_defs.h"
  62#include "t3_cpl.h"
  63#include "firmware_exports.h"
  64
  65enum {
  66        MAX_TXQ_ENTRIES = 16384,
  67        MAX_CTRL_TXQ_ENTRIES = 1024,
  68        MAX_RSPQ_ENTRIES = 16384,
  69        MAX_RX_BUFFERS = 16384,
  70        MAX_RX_JUMBO_BUFFERS = 16384,
  71        MIN_TXQ_ENTRIES = 4,
  72        MIN_CTRL_TXQ_ENTRIES = 4,
  73        MIN_RSPQ_ENTRIES = 32,
  74        MIN_FL_ENTRIES = 32
  75};
  76
  77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83#define EEPROM_MAGIC 0x38E2F10C
  84
  85#define CH_DEVICE(devid, idx) \
  86        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87
  88static const struct pci_device_id cxgb3_pci_tbl[] = {
  89        CH_DEVICE(0x20, 0),     /* PE9000 */
  90        CH_DEVICE(0x21, 1),     /* T302E */
  91        CH_DEVICE(0x22, 2),     /* T310E */
  92        CH_DEVICE(0x23, 3),     /* T320X */
  93        CH_DEVICE(0x24, 1),     /* T302X */
  94        CH_DEVICE(0x25, 3),     /* T320E */
  95        CH_DEVICE(0x26, 2),     /* T310X */
  96        CH_DEVICE(0x30, 2),     /* T3B10 */
  97        CH_DEVICE(0x31, 3),     /* T3B20 */
  98        CH_DEVICE(0x32, 1),     /* T3B02 */
  99        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100        CH_DEVICE(0x36, 3),     /* S320E-CR */
 101        CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102        {0,}
 103};
 104
 105MODULE_DESCRIPTION(DRV_DESC);
 106MODULE_AUTHOR("Chelsio Communications");
 107MODULE_LICENSE("Dual BSD/GPL");
 108MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 109
 110static int dflt_msg_enable = DFLT_MSG_ENABLE;
 111
 112module_param(dflt_msg_enable, int, 0644);
 113MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 114
 115/*
 116 * The driver uses the best interrupt scheme available on a platform in the
 117 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 118 * of these schemes the driver may consider as follows:
 119 *
 120 * msi = 2: choose from among all three options
 121 * msi = 1: only consider MSI and pin interrupts
 122 * msi = 0: force pin interrupts
 123 */
 124static int msi = 2;
 125
 126module_param(msi, int, 0644);
 127MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 128
 129/*
 130 * The driver enables offload as a default.
 131 * To disable it, use ofld_disable = 1.
 132 */
 133
 134static int ofld_disable = 0;
 135
 136module_param(ofld_disable, int, 0644);
 137MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 138
 139/*
 140 * We have work elements that we need to cancel when an interface is taken
 141 * down.  Normally the work elements would be executed by keventd but that
 142 * can deadlock because of linkwatch.  If our close method takes the rtnl
 143 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 144 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 145 * for our work to complete.  Get our own work queue to solve this.
 146 */
 147struct workqueue_struct *cxgb3_wq;
 148
 149/**
 150 *      link_report - show link status and link speed/duplex
 151 *      @dev: the port whose settings are to be reported
 152 *
 153 *      Shows the link status, speed, and duplex of a port.
 154 */
 155static void link_report(struct net_device *dev)
 156{
 157        if (!netif_carrier_ok(dev))
 158                netdev_info(dev, "link down\n");
 159        else {
 160                const char *s = "10Mbps";
 161                const struct port_info *p = netdev_priv(dev);
 162
 163                switch (p->link_config.speed) {
 164                case SPEED_10000:
 165                        s = "10Gbps";
 166                        break;
 167                case SPEED_1000:
 168                        s = "1000Mbps";
 169                        break;
 170                case SPEED_100:
 171                        s = "100Mbps";
 172                        break;
 173                }
 174
 175                netdev_info(dev, "link up, %s, %s-duplex\n",
 176                            s, p->link_config.duplex == DUPLEX_FULL
 177                            ? "full" : "half");
 178        }
 179}
 180
 181static void enable_tx_fifo_drain(struct adapter *adapter,
 182                                 struct port_info *pi)
 183{
 184        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 185                         F_ENDROPPKT);
 186        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 187        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 188        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 189}
 190
 191static void disable_tx_fifo_drain(struct adapter *adapter,
 192                                  struct port_info *pi)
 193{
 194        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 195                         F_ENDROPPKT, 0);
 196}
 197
 198void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 199{
 200        struct net_device *dev = adap->port[port_id];
 201        struct port_info *pi = netdev_priv(dev);
 202
 203        if (state == netif_carrier_ok(dev))
 204                return;
 205
 206        if (state) {
 207                struct cmac *mac = &pi->mac;
 208
 209                netif_carrier_on(dev);
 210
 211                disable_tx_fifo_drain(adap, pi);
 212
 213                /* Clear local faults */
 214                t3_xgm_intr_disable(adap, pi->port_id);
 215                t3_read_reg(adap, A_XGM_INT_STATUS +
 216                                    pi->mac.offset);
 217                t3_write_reg(adap,
 218                             A_XGM_INT_CAUSE + pi->mac.offset,
 219                             F_XGM_INT);
 220
 221                t3_set_reg_field(adap,
 222                                 A_XGM_INT_ENABLE +
 223                                 pi->mac.offset,
 224                                 F_XGM_INT, F_XGM_INT);
 225                t3_xgm_intr_enable(adap, pi->port_id);
 226
 227                t3_mac_enable(mac, MAC_DIRECTION_TX);
 228        } else {
 229                netif_carrier_off(dev);
 230
 231                /* Flush TX FIFO */
 232                enable_tx_fifo_drain(adap, pi);
 233        }
 234        link_report(dev);
 235}
 236
 237/**
 238 *      t3_os_link_changed - handle link status changes
 239 *      @adapter: the adapter associated with the link change
 240 *      @port_id: the port index whose limk status has changed
 241 *      @link_stat: the new status of the link
 242 *      @speed: the new speed setting
 243 *      @duplex: the new duplex setting
 244 *      @pause: the new flow-control setting
 245 *
 246 *      This is the OS-dependent handler for link status changes.  The OS
 247 *      neutral handler takes care of most of the processing for these events,
 248 *      then calls this handler for any OS-specific processing.
 249 */
 250void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 251                        int speed, int duplex, int pause)
 252{
 253        struct net_device *dev = adapter->port[port_id];
 254        struct port_info *pi = netdev_priv(dev);
 255        struct cmac *mac = &pi->mac;
 256
 257        /* Skip changes from disabled ports. */
 258        if (!netif_running(dev))
 259                return;
 260
 261        if (link_stat != netif_carrier_ok(dev)) {
 262                if (link_stat) {
 263                        disable_tx_fifo_drain(adapter, pi);
 264
 265                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 266
 267                        /* Clear local faults */
 268                        t3_xgm_intr_disable(adapter, pi->port_id);
 269                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 270                                    pi->mac.offset);
 271                        t3_write_reg(adapter,
 272                                     A_XGM_INT_CAUSE + pi->mac.offset,
 273                                     F_XGM_INT);
 274
 275                        t3_set_reg_field(adapter,
 276                                         A_XGM_INT_ENABLE + pi->mac.offset,
 277                                         F_XGM_INT, F_XGM_INT);
 278                        t3_xgm_intr_enable(adapter, pi->port_id);
 279
 280                        netif_carrier_on(dev);
 281                } else {
 282                        netif_carrier_off(dev);
 283
 284                        t3_xgm_intr_disable(adapter, pi->port_id);
 285                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 286                        t3_set_reg_field(adapter,
 287                                         A_XGM_INT_ENABLE + pi->mac.offset,
 288                                         F_XGM_INT, 0);
 289
 290                        if (is_10G(adapter))
 291                                pi->phy.ops->power_down(&pi->phy, 1);
 292
 293                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 294                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 295                        t3_link_start(&pi->phy, mac, &pi->link_config);
 296
 297                        /* Flush TX FIFO */
 298                        enable_tx_fifo_drain(adapter, pi);
 299                }
 300
 301                link_report(dev);
 302        }
 303}
 304
 305/**
 306 *      t3_os_phymod_changed - handle PHY module changes
 307 *      @adap: the adapter associated with the link change
 308 *      @port_id: the port index whose limk status has changed
 309 *
 310 *      This is the OS-dependent handler for PHY module changes.  It is
 311 *      invoked when a PHY module is removed or inserted for any OS-specific
 312 *      processing.
 313 */
 314void t3_os_phymod_changed(struct adapter *adap, int port_id)
 315{
 316        static const char *mod_str[] = {
 317                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 318        };
 319
 320        const struct net_device *dev = adap->port[port_id];
 321        const struct port_info *pi = netdev_priv(dev);
 322
 323        if (pi->phy.modtype == phy_modtype_none)
 324                netdev_info(dev, "PHY module unplugged\n");
 325        else
 326                netdev_info(dev, "%s PHY module inserted\n",
 327                            mod_str[pi->phy.modtype]);
 328}
 329
 330static void cxgb_set_rxmode(struct net_device *dev)
 331{
 332        struct port_info *pi = netdev_priv(dev);
 333
 334        t3_mac_set_rx_mode(&pi->mac, dev);
 335}
 336
 337/**
 338 *      link_start - enable a port
 339 *      @dev: the device to enable
 340 *
 341 *      Performs the MAC and PHY actions needed to enable a port.
 342 */
 343static void link_start(struct net_device *dev)
 344{
 345        struct port_info *pi = netdev_priv(dev);
 346        struct cmac *mac = &pi->mac;
 347
 348        t3_mac_reset(mac);
 349        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 350        t3_mac_set_mtu(mac, dev->mtu);
 351        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 352        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 353        t3_mac_set_rx_mode(mac, dev);
 354        t3_link_start(&pi->phy, mac, &pi->link_config);
 355        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 356}
 357
 358static inline void cxgb_disable_msi(struct adapter *adapter)
 359{
 360        if (adapter->flags & USING_MSIX) {
 361                pci_disable_msix(adapter->pdev);
 362                adapter->flags &= ~USING_MSIX;
 363        } else if (adapter->flags & USING_MSI) {
 364                pci_disable_msi(adapter->pdev);
 365                adapter->flags &= ~USING_MSI;
 366        }
 367}
 368
 369/*
 370 * Interrupt handler for asynchronous events used with MSI-X.
 371 */
 372static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 373{
 374        t3_slow_intr_handler(cookie);
 375        return IRQ_HANDLED;
 376}
 377
 378/*
 379 * Name the MSI-X interrupts.
 380 */
 381static void name_msix_vecs(struct adapter *adap)
 382{
 383        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 384
 385        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 386        adap->msix_info[0].desc[n] = 0;
 387
 388        for_each_port(adap, j) {
 389                struct net_device *d = adap->port[j];
 390                const struct port_info *pi = netdev_priv(d);
 391
 392                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 393                        snprintf(adap->msix_info[msi_idx].desc, n,
 394                                 "%s-%d", d->name, pi->first_qset + i);
 395                        adap->msix_info[msi_idx].desc[n] = 0;
 396                }
 397        }
 398}
 399
 400static int request_msix_data_irqs(struct adapter *adap)
 401{
 402        int i, j, err, qidx = 0;
 403
 404        for_each_port(adap, i) {
 405                int nqsets = adap2pinfo(adap, i)->nqsets;
 406
 407                for (j = 0; j < nqsets; ++j) {
 408                        err = request_irq(adap->msix_info[qidx + 1].vec,
 409                                          t3_intr_handler(adap,
 410                                                          adap->sge.qs[qidx].
 411                                                          rspq.polling), 0,
 412                                          adap->msix_info[qidx + 1].desc,
 413                                          &adap->sge.qs[qidx]);
 414                        if (err) {
 415                                while (--qidx >= 0)
 416                                        free_irq(adap->msix_info[qidx + 1].vec,
 417                                                 &adap->sge.qs[qidx]);
 418                                return err;
 419                        }
 420                        qidx++;
 421                }
 422        }
 423        return 0;
 424}
 425
 426static void free_irq_resources(struct adapter *adapter)
 427{
 428        if (adapter->flags & USING_MSIX) {
 429                int i, n = 0;
 430
 431                free_irq(adapter->msix_info[0].vec, adapter);
 432                for_each_port(adapter, i)
 433                        n += adap2pinfo(adapter, i)->nqsets;
 434
 435                for (i = 0; i < n; ++i)
 436                        free_irq(adapter->msix_info[i + 1].vec,
 437                                 &adapter->sge.qs[i]);
 438        } else
 439                free_irq(adapter->pdev->irq, adapter);
 440}
 441
 442static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 443                              unsigned long n)
 444{
 445        int attempts = 10;
 446
 447        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 448                if (!--attempts)
 449                        return -ETIMEDOUT;
 450                msleep(10);
 451        }
 452        return 0;
 453}
 454
 455static int init_tp_parity(struct adapter *adap)
 456{
 457        int i;
 458        struct sk_buff *skb;
 459        struct cpl_set_tcb_field *greq;
 460        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 461
 462        t3_tp_set_offload_mode(adap, 1);
 463
 464        for (i = 0; i < 16; i++) {
 465                struct cpl_smt_write_req *req;
 466
 467                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 468                if (!skb)
 469                        skb = adap->nofail_skb;
 470                if (!skb)
 471                        goto alloc_skb_fail;
 472
 473                req = __skb_put_zero(skb, sizeof(*req));
 474                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 475                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 476                req->mtu_idx = NMTUS - 1;
 477                req->iff = i;
 478                t3_mgmt_tx(adap, skb);
 479                if (skb == adap->nofail_skb) {
 480                        await_mgmt_replies(adap, cnt, i + 1);
 481                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 482                        if (!adap->nofail_skb)
 483                                goto alloc_skb_fail;
 484                }
 485        }
 486
 487        for (i = 0; i < 2048; i++) {
 488                struct cpl_l2t_write_req *req;
 489
 490                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 491                if (!skb)
 492                        skb = adap->nofail_skb;
 493                if (!skb)
 494                        goto alloc_skb_fail;
 495
 496                req = __skb_put_zero(skb, sizeof(*req));
 497                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 498                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 499                req->params = htonl(V_L2T_W_IDX(i));
 500                t3_mgmt_tx(adap, skb);
 501                if (skb == adap->nofail_skb) {
 502                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 503                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 504                        if (!adap->nofail_skb)
 505                                goto alloc_skb_fail;
 506                }
 507        }
 508
 509        for (i = 0; i < 2048; i++) {
 510                struct cpl_rte_write_req *req;
 511
 512                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 513                if (!skb)
 514                        skb = adap->nofail_skb;
 515                if (!skb)
 516                        goto alloc_skb_fail;
 517
 518                req = __skb_put_zero(skb, sizeof(*req));
 519                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 520                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 521                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 522                t3_mgmt_tx(adap, skb);
 523                if (skb == adap->nofail_skb) {
 524                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 525                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 526                        if (!adap->nofail_skb)
 527                                goto alloc_skb_fail;
 528                }
 529        }
 530
 531        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 532        if (!skb)
 533                skb = adap->nofail_skb;
 534        if (!skb)
 535                goto alloc_skb_fail;
 536
 537        greq = __skb_put_zero(skb, sizeof(*greq));
 538        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 539        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 540        greq->mask = cpu_to_be64(1);
 541        t3_mgmt_tx(adap, skb);
 542
 543        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 544        if (skb == adap->nofail_skb) {
 545                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 546                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 547        }
 548
 549        t3_tp_set_offload_mode(adap, 0);
 550        return i;
 551
 552alloc_skb_fail:
 553        t3_tp_set_offload_mode(adap, 0);
 554        return -ENOMEM;
 555}
 556
 557/**
 558 *      setup_rss - configure RSS
 559 *      @adap: the adapter
 560 *
 561 *      Sets up RSS to distribute packets to multiple receive queues.  We
 562 *      configure the RSS CPU lookup table to distribute to the number of HW
 563 *      receive queues, and the response queue lookup table to narrow that
 564 *      down to the response queues actually configured for each port.
 565 *      We always configure the RSS mapping for two ports since the mapping
 566 *      table has plenty of entries.
 567 */
 568static void setup_rss(struct adapter *adap)
 569{
 570        int i;
 571        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 572        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 573        u8 cpus[SGE_QSETS + 1];
 574        u16 rspq_map[RSS_TABLE_SIZE + 1];
 575
 576        for (i = 0; i < SGE_QSETS; ++i)
 577                cpus[i] = i;
 578        cpus[SGE_QSETS] = 0xff; /* terminator */
 579
 580        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 581                rspq_map[i] = i % nq0;
 582                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 583        }
 584        rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 585
 586        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 587                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 588                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 589}
 590
 591static void ring_dbs(struct adapter *adap)
 592{
 593        int i, j;
 594
 595        for (i = 0; i < SGE_QSETS; i++) {
 596                struct sge_qset *qs = &adap->sge.qs[i];
 597
 598                if (qs->adap)
 599                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 600                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 601        }
 602}
 603
 604static void init_napi(struct adapter *adap)
 605{
 606        int i;
 607
 608        for (i = 0; i < SGE_QSETS; i++) {
 609                struct sge_qset *qs = &adap->sge.qs[i];
 610
 611                if (qs->adap)
 612                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 613                                       64);
 614        }
 615
 616        /*
 617         * netif_napi_add() can be called only once per napi_struct because it
 618         * adds each new napi_struct to a list.  Be careful not to call it a
 619         * second time, e.g., during EEH recovery, by making a note of it.
 620         */
 621        adap->flags |= NAPI_INIT;
 622}
 623
 624/*
 625 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 626 * both netdevices representing interfaces and the dummy ones for the extra
 627 * queues.
 628 */
 629static void quiesce_rx(struct adapter *adap)
 630{
 631        int i;
 632
 633        for (i = 0; i < SGE_QSETS; i++)
 634                if (adap->sge.qs[i].adap)
 635                        napi_disable(&adap->sge.qs[i].napi);
 636}
 637
 638static void enable_all_napi(struct adapter *adap)
 639{
 640        int i;
 641        for (i = 0; i < SGE_QSETS; i++)
 642                if (adap->sge.qs[i].adap)
 643                        napi_enable(&adap->sge.qs[i].napi);
 644}
 645
 646/**
 647 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 648 *      @adap: the adapter
 649 *
 650 *      Determines how many sets of SGE queues to use and initializes them.
 651 *      We support multiple queue sets per port if we have MSI-X, otherwise
 652 *      just one queue set per port.
 653 */
 654static int setup_sge_qsets(struct adapter *adap)
 655{
 656        int i, j, err, irq_idx = 0, qset_idx = 0;
 657        unsigned int ntxq = SGE_TXQ_PER_SET;
 658
 659        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 660                irq_idx = -1;
 661
 662        for_each_port(adap, i) {
 663                struct net_device *dev = adap->port[i];
 664                struct port_info *pi = netdev_priv(dev);
 665
 666                pi->qs = &adap->sge.qs[pi->first_qset];
 667                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 668                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 669                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 670                                                             irq_idx,
 671                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 672                                netdev_get_tx_queue(dev, j));
 673                        if (err) {
 674                                t3_free_sge_resources(adap);
 675                                return err;
 676                        }
 677                }
 678        }
 679
 680        return 0;
 681}
 682
 683static ssize_t attr_show(struct device *d, char *buf,
 684                         ssize_t(*format) (struct net_device *, char *))
 685{
 686        ssize_t len;
 687
 688        /* Synchronize with ioctls that may shut down the device */
 689        rtnl_lock();
 690        len = (*format) (to_net_dev(d), buf);
 691        rtnl_unlock();
 692        return len;
 693}
 694
 695static ssize_t attr_store(struct device *d,
 696                          const char *buf, size_t len,
 697                          ssize_t(*set) (struct net_device *, unsigned int),
 698                          unsigned int min_val, unsigned int max_val)
 699{
 700        ssize_t ret;
 701        unsigned int val;
 702
 703        if (!capable(CAP_NET_ADMIN))
 704                return -EPERM;
 705
 706        ret = kstrtouint(buf, 0, &val);
 707        if (ret)
 708                return ret;
 709        if (val < min_val || val > max_val)
 710                return -EINVAL;
 711
 712        rtnl_lock();
 713        ret = (*set) (to_net_dev(d), val);
 714        if (!ret)
 715                ret = len;
 716        rtnl_unlock();
 717        return ret;
 718}
 719
 720#define CXGB3_SHOW(name, val_expr) \
 721static ssize_t format_##name(struct net_device *dev, char *buf) \
 722{ \
 723        struct port_info *pi = netdev_priv(dev); \
 724        struct adapter *adap = pi->adapter; \
 725        return sprintf(buf, "%u\n", val_expr); \
 726} \
 727static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 728                           char *buf) \
 729{ \
 730        return attr_show(d, buf, format_##name); \
 731}
 732
 733static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 734{
 735        struct port_info *pi = netdev_priv(dev);
 736        struct adapter *adap = pi->adapter;
 737        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 738
 739        if (adap->flags & FULL_INIT_DONE)
 740                return -EBUSY;
 741        if (val && adap->params.rev == 0)
 742                return -EINVAL;
 743        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 744            min_tids)
 745                return -EINVAL;
 746        adap->params.mc5.nfilters = val;
 747        return 0;
 748}
 749
 750static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 751                              const char *buf, size_t len)
 752{
 753        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 754}
 755
 756static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 757{
 758        struct port_info *pi = netdev_priv(dev);
 759        struct adapter *adap = pi->adapter;
 760
 761        if (adap->flags & FULL_INIT_DONE)
 762                return -EBUSY;
 763        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 764            MC5_MIN_TIDS)
 765                return -EINVAL;
 766        adap->params.mc5.nservers = val;
 767        return 0;
 768}
 769
 770static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 771                              const char *buf, size_t len)
 772{
 773        return attr_store(d, buf, len, set_nservers, 0, ~0);
 774}
 775
 776#define CXGB3_ATTR_R(name, val_expr) \
 777CXGB3_SHOW(name, val_expr) \
 778static DEVICE_ATTR(name, 0444, show_##name, NULL)
 779
 780#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 781CXGB3_SHOW(name, val_expr) \
 782static DEVICE_ATTR(name, 0644, show_##name, store_method)
 783
 784CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 785CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 786CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 787
 788static struct attribute *cxgb3_attrs[] = {
 789        &dev_attr_cam_size.attr,
 790        &dev_attr_nfilters.attr,
 791        &dev_attr_nservers.attr,
 792        NULL
 793};
 794
 795static const struct attribute_group cxgb3_attr_group = {
 796        .attrs = cxgb3_attrs,
 797};
 798
 799static ssize_t tm_attr_show(struct device *d,
 800                            char *buf, int sched)
 801{
 802        struct port_info *pi = netdev_priv(to_net_dev(d));
 803        struct adapter *adap = pi->adapter;
 804        unsigned int v, addr, bpt, cpt;
 805        ssize_t len;
 806
 807        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 808        rtnl_lock();
 809        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 810        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 811        if (sched & 1)
 812                v >>= 16;
 813        bpt = (v >> 8) & 0xff;
 814        cpt = v & 0xff;
 815        if (!cpt)
 816                len = sprintf(buf, "disabled\n");
 817        else {
 818                v = (adap->params.vpd.cclk * 1000) / cpt;
 819                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 820        }
 821        rtnl_unlock();
 822        return len;
 823}
 824
 825static ssize_t tm_attr_store(struct device *d,
 826                             const char *buf, size_t len, int sched)
 827{
 828        struct port_info *pi = netdev_priv(to_net_dev(d));
 829        struct adapter *adap = pi->adapter;
 830        unsigned int val;
 831        ssize_t ret;
 832
 833        if (!capable(CAP_NET_ADMIN))
 834                return -EPERM;
 835
 836        ret = kstrtouint(buf, 0, &val);
 837        if (ret)
 838                return ret;
 839        if (val > 10000000)
 840                return -EINVAL;
 841
 842        rtnl_lock();
 843        ret = t3_config_sched(adap, val, sched);
 844        if (!ret)
 845                ret = len;
 846        rtnl_unlock();
 847        return ret;
 848}
 849
 850#define TM_ATTR(name, sched) \
 851static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 852                           char *buf) \
 853{ \
 854        return tm_attr_show(d, buf, sched); \
 855} \
 856static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 857                            const char *buf, size_t len) \
 858{ \
 859        return tm_attr_store(d, buf, len, sched); \
 860} \
 861static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 862
 863TM_ATTR(sched0, 0);
 864TM_ATTR(sched1, 1);
 865TM_ATTR(sched2, 2);
 866TM_ATTR(sched3, 3);
 867TM_ATTR(sched4, 4);
 868TM_ATTR(sched5, 5);
 869TM_ATTR(sched6, 6);
 870TM_ATTR(sched7, 7);
 871
 872static struct attribute *offload_attrs[] = {
 873        &dev_attr_sched0.attr,
 874        &dev_attr_sched1.attr,
 875        &dev_attr_sched2.attr,
 876        &dev_attr_sched3.attr,
 877        &dev_attr_sched4.attr,
 878        &dev_attr_sched5.attr,
 879        &dev_attr_sched6.attr,
 880        &dev_attr_sched7.attr,
 881        NULL
 882};
 883
 884static const struct attribute_group offload_attr_group = {
 885        .attrs = offload_attrs,
 886};
 887
 888/*
 889 * Sends an sk_buff to an offload queue driver
 890 * after dealing with any active network taps.
 891 */
 892static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 893{
 894        int ret;
 895
 896        local_bh_disable();
 897        ret = t3_offload_tx(tdev, skb);
 898        local_bh_enable();
 899        return ret;
 900}
 901
 902static int write_smt_entry(struct adapter *adapter, int idx)
 903{
 904        struct cpl_smt_write_req *req;
 905        struct port_info *pi = netdev_priv(adapter->port[idx]);
 906        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 907
 908        if (!skb)
 909                return -ENOMEM;
 910
 911        req = __skb_put(skb, sizeof(*req));
 912        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 913        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 914        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 915        req->iff = idx;
 916        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 917        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 918        skb->priority = 1;
 919        offload_tx(&adapter->tdev, skb);
 920        return 0;
 921}
 922
 923static int init_smt(struct adapter *adapter)
 924{
 925        int i;
 926
 927        for_each_port(adapter, i)
 928            write_smt_entry(adapter, i);
 929        return 0;
 930}
 931
 932static void init_port_mtus(struct adapter *adapter)
 933{
 934        unsigned int mtus = adapter->port[0]->mtu;
 935
 936        if (adapter->port[1])
 937                mtus |= adapter->port[1]->mtu << 16;
 938        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 939}
 940
 941static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 942                              int hi, int port)
 943{
 944        struct sk_buff *skb;
 945        struct mngt_pktsched_wr *req;
 946        int ret;
 947
 948        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 949        if (!skb)
 950                skb = adap->nofail_skb;
 951        if (!skb)
 952                return -ENOMEM;
 953
 954        req = skb_put(skb, sizeof(*req));
 955        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 956        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 957        req->sched = sched;
 958        req->idx = qidx;
 959        req->min = lo;
 960        req->max = hi;
 961        req->binding = port;
 962        ret = t3_mgmt_tx(adap, skb);
 963        if (skb == adap->nofail_skb) {
 964                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 965                                             GFP_KERNEL);
 966                if (!adap->nofail_skb)
 967                        ret = -ENOMEM;
 968        }
 969
 970        return ret;
 971}
 972
 973static int bind_qsets(struct adapter *adap)
 974{
 975        int i, j, err = 0;
 976
 977        for_each_port(adap, i) {
 978                const struct port_info *pi = adap2pinfo(adap, i);
 979
 980                for (j = 0; j < pi->nqsets; ++j) {
 981                        int ret = send_pktsched_cmd(adap, 1,
 982                                                    pi->first_qset + j, -1,
 983                                                    -1, i);
 984                        if (ret)
 985                                err = ret;
 986                }
 987        }
 988
 989        return err;
 990}
 991
 992#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 993        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 994#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 995#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 996        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 997#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 998#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
 999#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001MODULE_FIRMWARE(FW_FNAME);
1002MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1007
1008static inline const char *get_edc_fw_name(int edc_idx)
1009{
1010        const char *fw_name = NULL;
1011
1012        switch (edc_idx) {
1013        case EDC_OPT_AEL2005:
1014                fw_name = AEL2005_OPT_EDC_NAME;
1015                break;
1016        case EDC_TWX_AEL2005:
1017                fw_name = AEL2005_TWX_EDC_NAME;
1018                break;
1019        case EDC_TWX_AEL2020:
1020                fw_name = AEL2020_TWX_EDC_NAME;
1021                break;
1022        }
1023        return fw_name;
1024}
1025
1026int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027{
1028        struct adapter *adapter = phy->adapter;
1029        const struct firmware *fw;
1030        const char *fw_name;
1031        u32 csum;
1032        const __be32 *p;
1033        u16 *cache = phy->phy_cache;
1034        int i, ret = -EINVAL;
1035
1036        fw_name = get_edc_fw_name(edc_idx);
1037        if (fw_name)
1038                ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1039        if (ret < 0) {
1040                dev_err(&adapter->pdev->dev,
1041                        "could not upgrade firmware: unable to load %s\n",
1042                        fw_name);
1043                return ret;
1044        }
1045
1046        /* check size, take checksum in account */
1047        if (fw->size > size + 4) {
1048                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049                       (unsigned int)fw->size, size + 4);
1050                ret = -EINVAL;
1051        }
1052
1053        /* compute checksum */
1054        p = (const __be32 *)fw->data;
1055        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056                csum += ntohl(p[i]);
1057
1058        if (csum != 0xffffffff) {
1059                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060                       csum);
1061                ret = -EINVAL;
1062        }
1063
1064        for (i = 0; i < size / 4 ; i++) {
1065                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1067        }
1068
1069        release_firmware(fw);
1070
1071        return ret;
1072}
1073
1074static int upgrade_fw(struct adapter *adap)
1075{
1076        int ret;
1077        const struct firmware *fw;
1078        struct device *dev = &adap->pdev->dev;
1079
1080        ret = request_firmware(&fw, FW_FNAME, dev);
1081        if (ret < 0) {
1082                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1083                        FW_FNAME);
1084                return ret;
1085        }
1086        ret = t3_load_fw(adap, fw->data, fw->size);
1087        release_firmware(fw);
1088
1089        if (ret == 0)
1090                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092        else
1093                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095
1096        return ret;
1097}
1098
1099static inline char t3rev2char(struct adapter *adapter)
1100{
1101        char rev = 0;
1102
1103        switch(adapter->params.rev) {
1104        case T3_REV_B:
1105        case T3_REV_B2:
1106                rev = 'b';
1107                break;
1108        case T3_REV_C:
1109                rev = 'c';
1110                break;
1111        }
1112        return rev;
1113}
1114
1115static int update_tpsram(struct adapter *adap)
1116{
1117        const struct firmware *tpsram;
1118        char buf[64];
1119        struct device *dev = &adap->pdev->dev;
1120        int ret;
1121        char rev;
1122
1123        rev = t3rev2char(adap);
1124        if (!rev)
1125                return 0;
1126
1127        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1128
1129        ret = request_firmware(&tpsram, buf, dev);
1130        if (ret < 0) {
1131                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132                        buf);
1133                return ret;
1134        }
1135
1136        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137        if (ret)
1138                goto release_tpsram;
1139
1140        ret = t3_set_proto_sram(adap, tpsram->data);
1141        if (ret == 0)
1142                dev_info(dev,
1143                         "successful update of protocol engine "
1144                         "to %d.%d.%d\n",
1145                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146        else
1147                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149        if (ret)
1150                dev_err(dev, "loading protocol SRAM failed\n");
1151
1152release_tpsram:
1153        release_firmware(tpsram);
1154
1155        return ret;
1156}
1157
1158/**
1159 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1160 * @adap: the adapter
1161 * @p: the port
1162 *
1163 * Ensures that current Rx processing on any of the queues associated with
1164 * the given port completes before returning.  We do this by acquiring and
1165 * releasing the locks of the response queues associated with the port.
1166 */
1167static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1168{
1169        int i;
1170
1171        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1172                struct sge_rspq *q = &adap->sge.qs[i].rspq;
1173
1174                spin_lock_irq(&q->lock);
1175                spin_unlock_irq(&q->lock);
1176        }
1177}
1178
1179static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1180{
1181        struct port_info *pi = netdev_priv(dev);
1182        struct adapter *adapter = pi->adapter;
1183
1184        if (adapter->params.rev > 0) {
1185                t3_set_vlan_accel(adapter, 1 << pi->port_id,
1186                                  features & NETIF_F_HW_VLAN_CTAG_RX);
1187        } else {
1188                /* single control for all ports */
1189                unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                for_each_port(adapter, i)
1192                        have_vlans |=
1193                                adapter->port[i]->features &
1194                                NETIF_F_HW_VLAN_CTAG_RX;
1195
1196                t3_set_vlan_accel(adapter, 1, have_vlans);
1197        }
1198        t3_synchronize_rx(adapter, pi);
1199}
1200
1201/**
1202 *      cxgb_up - enable the adapter
1203 *      @adap: adapter being enabled
1204 *
1205 *      Called when the first port is enabled, this function performs the
1206 *      actions necessary to make an adapter operational, such as completing
1207 *      the initialization of HW modules, and enabling interrupts.
1208 *
1209 *      Must be called with the rtnl lock held.
1210 */
1211static int cxgb_up(struct adapter *adap)
1212{
1213        int i, err;
1214
1215        if (!(adap->flags & FULL_INIT_DONE)) {
1216                err = t3_check_fw_version(adap);
1217                if (err == -EINVAL) {
1218                        err = upgrade_fw(adap);
1219                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1220                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1221                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1222                }
1223
1224                err = t3_check_tpsram_version(adap);
1225                if (err == -EINVAL) {
1226                        err = update_tpsram(adap);
1227                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1228                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1229                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1230                }
1231
1232                /*
1233                 * Clear interrupts now to catch errors if t3_init_hw fails.
1234                 * We clear them again later as initialization may trigger
1235                 * conditions that can interrupt.
1236                 */
1237                t3_intr_clear(adap);
1238
1239                err = t3_init_hw(adap, 0);
1240                if (err)
1241                        goto out;
1242
1243                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1244                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1245
1246                err = setup_sge_qsets(adap);
1247                if (err)
1248                        goto out;
1249
1250                for_each_port(adap, i)
1251                        cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1252
1253                setup_rss(adap);
1254                if (!(adap->flags & NAPI_INIT))
1255                        init_napi(adap);
1256
1257                t3_start_sge_timers(adap);
1258                adap->flags |= FULL_INIT_DONE;
1259        }
1260
1261        t3_intr_clear(adap);
1262
1263        if (adap->flags & USING_MSIX) {
1264                name_msix_vecs(adap);
1265                err = request_irq(adap->msix_info[0].vec,
1266                                  t3_async_intr_handler, 0,
1267                                  adap->msix_info[0].desc, adap);
1268                if (err)
1269                        goto irq_err;
1270
1271                err = request_msix_data_irqs(adap);
1272                if (err) {
1273                        free_irq(adap->msix_info[0].vec, adap);
1274                        goto irq_err;
1275                }
1276        } else {
1277                err = request_irq(adap->pdev->irq,
1278                                  t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1279                                  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1280                                  adap->name, adap);
1281                if (err)
1282                        goto irq_err;
1283        }
1284
1285        enable_all_napi(adap);
1286        t3_sge_start(adap);
1287        t3_intr_enable(adap);
1288
1289        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1290            is_offload(adap) && init_tp_parity(adap) == 0)
1291                adap->flags |= TP_PARITY_INIT;
1292
1293        if (adap->flags & TP_PARITY_INIT) {
1294                t3_write_reg(adap, A_TP_INT_CAUSE,
1295                             F_CMCACHEPERR | F_ARPLUTPERR);
1296                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1297        }
1298
1299        if (!(adap->flags & QUEUES_BOUND)) {
1300                int ret = bind_qsets(adap);
1301
1302                if (ret < 0) {
1303                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1304                        t3_intr_disable(adap);
1305                        free_irq_resources(adap);
1306                        err = ret;
1307                        goto out;
1308                }
1309                adap->flags |= QUEUES_BOUND;
1310        }
1311
1312out:
1313        return err;
1314irq_err:
1315        CH_ERR(adap, "request_irq failed, err %d\n", err);
1316        goto out;
1317}
1318
1319/*
1320 * Release resources when all the ports and offloading have been stopped.
1321 */
1322static void cxgb_down(struct adapter *adapter, int on_wq)
1323{
1324        t3_sge_stop(adapter);
1325        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1326        t3_intr_disable(adapter);
1327        spin_unlock_irq(&adapter->work_lock);
1328
1329        free_irq_resources(adapter);
1330        quiesce_rx(adapter);
1331        t3_sge_stop(adapter);
1332        if (!on_wq)
1333                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1334}
1335
1336static void schedule_chk_task(struct adapter *adap)
1337{
1338        unsigned int timeo;
1339
1340        timeo = adap->params.linkpoll_period ?
1341            (HZ * adap->params.linkpoll_period) / 10 :
1342            adap->params.stats_update_period * HZ;
1343        if (timeo)
1344                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1345}
1346
1347static int offload_open(struct net_device *dev)
1348{
1349        struct port_info *pi = netdev_priv(dev);
1350        struct adapter *adapter = pi->adapter;
1351        struct t3cdev *tdev = dev2t3cdev(dev);
1352        int adap_up = adapter->open_device_map & PORT_MASK;
1353        int err;
1354
1355        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1356                return 0;
1357
1358        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1359                goto out;
1360
1361        t3_tp_set_offload_mode(adapter, 1);
1362        tdev->lldev = adapter->port[0];
1363        err = cxgb3_offload_activate(adapter);
1364        if (err)
1365                goto out;
1366
1367        init_port_mtus(adapter);
1368        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1369                     adapter->params.b_wnd,
1370                     adapter->params.rev == 0 ?
1371                     adapter->port[0]->mtu : 0xffff);
1372        init_smt(adapter);
1373
1374        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1375                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1376
1377        /* Call back all registered clients */
1378        cxgb3_add_clients(tdev);
1379
1380out:
1381        /* restore them in case the offload module has changed them */
1382        if (err) {
1383                t3_tp_set_offload_mode(adapter, 0);
1384                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1385                cxgb3_set_dummy_ops(tdev);
1386        }
1387        return err;
1388}
1389
1390static int offload_close(struct t3cdev *tdev)
1391{
1392        struct adapter *adapter = tdev2adap(tdev);
1393        struct t3c_data *td = T3C_DATA(tdev);
1394
1395        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1396                return 0;
1397
1398        /* Call back all registered clients */
1399        cxgb3_remove_clients(tdev);
1400
1401        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1402
1403        /* Flush work scheduled while releasing TIDs */
1404        flush_work(&td->tid_release_task);
1405
1406        tdev->lldev = NULL;
1407        cxgb3_set_dummy_ops(tdev);
1408        t3_tp_set_offload_mode(adapter, 0);
1409        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1410
1411        if (!adapter->open_device_map)
1412                cxgb_down(adapter, 0);
1413
1414        cxgb3_offload_deactivate(adapter);
1415        return 0;
1416}
1417
1418static int cxgb_open(struct net_device *dev)
1419{
1420        struct port_info *pi = netdev_priv(dev);
1421        struct adapter *adapter = pi->adapter;
1422        int other_ports = adapter->open_device_map & PORT_MASK;
1423        int err;
1424
1425        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1426                return err;
1427
1428        set_bit(pi->port_id, &adapter->open_device_map);
1429        if (is_offload(adapter) && !ofld_disable) {
1430                err = offload_open(dev);
1431                if (err)
1432                        pr_warn("Could not initialize offload capabilities\n");
1433        }
1434
1435        netif_set_real_num_tx_queues(dev, pi->nqsets);
1436        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1437        if (err)
1438                return err;
1439        link_start(dev);
1440        t3_port_intr_enable(adapter, pi->port_id);
1441        netif_tx_start_all_queues(dev);
1442        if (!other_ports)
1443                schedule_chk_task(adapter);
1444
1445        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1446        return 0;
1447}
1448
1449static int __cxgb_close(struct net_device *dev, int on_wq)
1450{
1451        struct port_info *pi = netdev_priv(dev);
1452        struct adapter *adapter = pi->adapter;
1453
1454        
1455        if (!adapter->open_device_map)
1456                return 0;
1457
1458        /* Stop link fault interrupts */
1459        t3_xgm_intr_disable(adapter, pi->port_id);
1460        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1461
1462        t3_port_intr_disable(adapter, pi->port_id);
1463        netif_tx_stop_all_queues(dev);
1464        pi->phy.ops->power_down(&pi->phy, 1);
1465        netif_carrier_off(dev);
1466        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1467
1468        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1469        clear_bit(pi->port_id, &adapter->open_device_map);
1470        spin_unlock_irq(&adapter->work_lock);
1471
1472        if (!(adapter->open_device_map & PORT_MASK))
1473                cancel_delayed_work_sync(&adapter->adap_check_task);
1474
1475        if (!adapter->open_device_map)
1476                cxgb_down(adapter, on_wq);
1477
1478        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1479        return 0;
1480}
1481
1482static int cxgb_close(struct net_device *dev)
1483{
1484        return __cxgb_close(dev, 0);
1485}
1486
1487static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1488{
1489        struct port_info *pi = netdev_priv(dev);
1490        struct adapter *adapter = pi->adapter;
1491        struct net_device_stats *ns = &dev->stats;
1492        const struct mac_stats *pstats;
1493
1494        spin_lock(&adapter->stats_lock);
1495        pstats = t3_mac_update_stats(&pi->mac);
1496        spin_unlock(&adapter->stats_lock);
1497
1498        ns->tx_bytes = pstats->tx_octets;
1499        ns->tx_packets = pstats->tx_frames;
1500        ns->rx_bytes = pstats->rx_octets;
1501        ns->rx_packets = pstats->rx_frames;
1502        ns->multicast = pstats->rx_mcast_frames;
1503
1504        ns->tx_errors = pstats->tx_underrun;
1505        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1506            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1507            pstats->rx_fifo_ovfl;
1508
1509        /* detailed rx_errors */
1510        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1511        ns->rx_over_errors = 0;
1512        ns->rx_crc_errors = pstats->rx_fcs_errs;
1513        ns->rx_frame_errors = pstats->rx_symbol_errs;
1514        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1515        ns->rx_missed_errors = pstats->rx_cong_drops;
1516
1517        /* detailed tx_errors */
1518        ns->tx_aborted_errors = 0;
1519        ns->tx_carrier_errors = 0;
1520        ns->tx_fifo_errors = pstats->tx_underrun;
1521        ns->tx_heartbeat_errors = 0;
1522        ns->tx_window_errors = 0;
1523        return ns;
1524}
1525
1526static u32 get_msglevel(struct net_device *dev)
1527{
1528        struct port_info *pi = netdev_priv(dev);
1529        struct adapter *adapter = pi->adapter;
1530
1531        return adapter->msg_enable;
1532}
1533
1534static void set_msglevel(struct net_device *dev, u32 val)
1535{
1536        struct port_info *pi = netdev_priv(dev);
1537        struct adapter *adapter = pi->adapter;
1538
1539        adapter->msg_enable = val;
1540}
1541
1542static const char stats_strings[][ETH_GSTRING_LEN] = {
1543        "TxOctetsOK         ",
1544        "TxFramesOK         ",
1545        "TxMulticastFramesOK",
1546        "TxBroadcastFramesOK",
1547        "TxPauseFrames      ",
1548        "TxUnderrun         ",
1549        "TxExtUnderrun      ",
1550
1551        "TxFrames64         ",
1552        "TxFrames65To127    ",
1553        "TxFrames128To255   ",
1554        "TxFrames256To511   ",
1555        "TxFrames512To1023  ",
1556        "TxFrames1024To1518 ",
1557        "TxFrames1519ToMax  ",
1558
1559        "RxOctetsOK         ",
1560        "RxFramesOK         ",
1561        "RxMulticastFramesOK",
1562        "RxBroadcastFramesOK",
1563        "RxPauseFrames      ",
1564        "RxFCSErrors        ",
1565        "RxSymbolErrors     ",
1566        "RxShortErrors      ",
1567        "RxJabberErrors     ",
1568        "RxLengthErrors     ",
1569        "RxFIFOoverflow     ",
1570
1571        "RxFrames64         ",
1572        "RxFrames65To127    ",
1573        "RxFrames128To255   ",
1574        "RxFrames256To511   ",
1575        "RxFrames512To1023  ",
1576        "RxFrames1024To1518 ",
1577        "RxFrames1519ToMax  ",
1578
1579        "PhyFIFOErrors      ",
1580        "TSO                ",
1581        "VLANextractions    ",
1582        "VLANinsertions     ",
1583        "TxCsumOffload      ",
1584        "RxCsumGood         ",
1585        "LroAggregated      ",
1586        "LroFlushed         ",
1587        "LroNoDesc          ",
1588        "RxDrops            ",
1589
1590        "CheckTXEnToggled   ",
1591        "CheckResets        ",
1592
1593        "LinkFaults         ",
1594};
1595
1596static int get_sset_count(struct net_device *dev, int sset)
1597{
1598        switch (sset) {
1599        case ETH_SS_STATS:
1600                return ARRAY_SIZE(stats_strings);
1601        default:
1602                return -EOPNOTSUPP;
1603        }
1604}
1605
1606#define T3_REGMAP_SIZE (3 * 1024)
1607
1608static int get_regs_len(struct net_device *dev)
1609{
1610        return T3_REGMAP_SIZE;
1611}
1612
1613static int get_eeprom_len(struct net_device *dev)
1614{
1615        return EEPROMSIZE;
1616}
1617
1618static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1619{
1620        struct port_info *pi = netdev_priv(dev);
1621        struct adapter *adapter = pi->adapter;
1622        u32 fw_vers = 0;
1623        u32 tp_vers = 0;
1624
1625        spin_lock(&adapter->stats_lock);
1626        t3_get_fw_version(adapter, &fw_vers);
1627        t3_get_tp_version(adapter, &tp_vers);
1628        spin_unlock(&adapter->stats_lock);
1629
1630        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1631        strlcpy(info->bus_info, pci_name(adapter->pdev),
1632                sizeof(info->bus_info));
1633        if (fw_vers)
1634                snprintf(info->fw_version, sizeof(info->fw_version),
1635                         "%s %u.%u.%u TP %u.%u.%u",
1636                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1637                         G_FW_VERSION_MAJOR(fw_vers),
1638                         G_FW_VERSION_MINOR(fw_vers),
1639                         G_FW_VERSION_MICRO(fw_vers),
1640                         G_TP_VERSION_MAJOR(tp_vers),
1641                         G_TP_VERSION_MINOR(tp_vers),
1642                         G_TP_VERSION_MICRO(tp_vers));
1643}
1644
1645static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1646{
1647        if (stringset == ETH_SS_STATS)
1648                memcpy(data, stats_strings, sizeof(stats_strings));
1649}
1650
1651static unsigned long collect_sge_port_stats(struct adapter *adapter,
1652                                            struct port_info *p, int idx)
1653{
1654        int i;
1655        unsigned long tot = 0;
1656
1657        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1658                tot += adapter->sge.qs[i].port_stats[idx];
1659        return tot;
1660}
1661
1662static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1663                      u64 *data)
1664{
1665        struct port_info *pi = netdev_priv(dev);
1666        struct adapter *adapter = pi->adapter;
1667        const struct mac_stats *s;
1668
1669        spin_lock(&adapter->stats_lock);
1670        s = t3_mac_update_stats(&pi->mac);
1671        spin_unlock(&adapter->stats_lock);
1672
1673        *data++ = s->tx_octets;
1674        *data++ = s->tx_frames;
1675        *data++ = s->tx_mcast_frames;
1676        *data++ = s->tx_bcast_frames;
1677        *data++ = s->tx_pause;
1678        *data++ = s->tx_underrun;
1679        *data++ = s->tx_fifo_urun;
1680
1681        *data++ = s->tx_frames_64;
1682        *data++ = s->tx_frames_65_127;
1683        *data++ = s->tx_frames_128_255;
1684        *data++ = s->tx_frames_256_511;
1685        *data++ = s->tx_frames_512_1023;
1686        *data++ = s->tx_frames_1024_1518;
1687        *data++ = s->tx_frames_1519_max;
1688
1689        *data++ = s->rx_octets;
1690        *data++ = s->rx_frames;
1691        *data++ = s->rx_mcast_frames;
1692        *data++ = s->rx_bcast_frames;
1693        *data++ = s->rx_pause;
1694        *data++ = s->rx_fcs_errs;
1695        *data++ = s->rx_symbol_errs;
1696        *data++ = s->rx_short;
1697        *data++ = s->rx_jabber;
1698        *data++ = s->rx_too_long;
1699        *data++ = s->rx_fifo_ovfl;
1700
1701        *data++ = s->rx_frames_64;
1702        *data++ = s->rx_frames_65_127;
1703        *data++ = s->rx_frames_128_255;
1704        *data++ = s->rx_frames_256_511;
1705        *data++ = s->rx_frames_512_1023;
1706        *data++ = s->rx_frames_1024_1518;
1707        *data++ = s->rx_frames_1519_max;
1708
1709        *data++ = pi->phy.fifo_errors;
1710
1711        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1712        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1713        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1714        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1715        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1716        *data++ = 0;
1717        *data++ = 0;
1718        *data++ = 0;
1719        *data++ = s->rx_cong_drops;
1720
1721        *data++ = s->num_toggled;
1722        *data++ = s->num_resets;
1723
1724        *data++ = s->link_faults;
1725}
1726
1727static inline void reg_block_dump(struct adapter *ap, void *buf,
1728                                  unsigned int start, unsigned int end)
1729{
1730        u32 *p = buf + start;
1731
1732        for (; start <= end; start += sizeof(u32))
1733                *p++ = t3_read_reg(ap, start);
1734}
1735
1736static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1737                     void *buf)
1738{
1739        struct port_info *pi = netdev_priv(dev);
1740        struct adapter *ap = pi->adapter;
1741
1742        /*
1743         * Version scheme:
1744         * bits 0..9: chip version
1745         * bits 10..15: chip revision
1746         * bit 31: set for PCIe cards
1747         */
1748        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1749
1750        /*
1751         * We skip the MAC statistics registers because they are clear-on-read.
1752         * Also reading multi-register stats would need to synchronize with the
1753         * periodic mac stats accumulation.  Hard to justify the complexity.
1754         */
1755        memset(buf, 0, T3_REGMAP_SIZE);
1756        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1757        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1758        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1759        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1760        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1761        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1762                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1763        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1764                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1765}
1766
1767static int restart_autoneg(struct net_device *dev)
1768{
1769        struct port_info *p = netdev_priv(dev);
1770
1771        if (!netif_running(dev))
1772                return -EAGAIN;
1773        if (p->link_config.autoneg != AUTONEG_ENABLE)
1774                return -EINVAL;
1775        p->phy.ops->autoneg_restart(&p->phy);
1776        return 0;
1777}
1778
1779static int set_phys_id(struct net_device *dev,
1780                       enum ethtool_phys_id_state state)
1781{
1782        struct port_info *pi = netdev_priv(dev);
1783        struct adapter *adapter = pi->adapter;
1784
1785        switch (state) {
1786        case ETHTOOL_ID_ACTIVE:
1787                return 1;       /* cycle on/off once per second */
1788
1789        case ETHTOOL_ID_OFF:
1790                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1791                break;
1792
1793        case ETHTOOL_ID_ON:
1794        case ETHTOOL_ID_INACTIVE:
1795                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1796                         F_GPIO0_OUT_VAL);
1797        }
1798
1799        return 0;
1800}
1801
1802static int get_link_ksettings(struct net_device *dev,
1803                              struct ethtool_link_ksettings *cmd)
1804{
1805        struct port_info *p = netdev_priv(dev);
1806        u32 supported;
1807
1808        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1809                                                p->link_config.supported);
1810        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1811                                                p->link_config.advertising);
1812
1813        if (netif_carrier_ok(dev)) {
1814                cmd->base.speed = p->link_config.speed;
1815                cmd->base.duplex = p->link_config.duplex;
1816        } else {
1817                cmd->base.speed = SPEED_UNKNOWN;
1818                cmd->base.duplex = DUPLEX_UNKNOWN;
1819        }
1820
1821        ethtool_convert_link_mode_to_legacy_u32(&supported,
1822                                                cmd->link_modes.supported);
1823
1824        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1825        cmd->base.phy_address = p->phy.mdio.prtad;
1826        cmd->base.autoneg = p->link_config.autoneg;
1827        return 0;
1828}
1829
1830static int speed_duplex_to_caps(int speed, int duplex)
1831{
1832        int cap = 0;
1833
1834        switch (speed) {
1835        case SPEED_10:
1836                if (duplex == DUPLEX_FULL)
1837                        cap = SUPPORTED_10baseT_Full;
1838                else
1839                        cap = SUPPORTED_10baseT_Half;
1840                break;
1841        case SPEED_100:
1842                if (duplex == DUPLEX_FULL)
1843                        cap = SUPPORTED_100baseT_Full;
1844                else
1845                        cap = SUPPORTED_100baseT_Half;
1846                break;
1847        case SPEED_1000:
1848                if (duplex == DUPLEX_FULL)
1849                        cap = SUPPORTED_1000baseT_Full;
1850                else
1851                        cap = SUPPORTED_1000baseT_Half;
1852                break;
1853        case SPEED_10000:
1854                if (duplex == DUPLEX_FULL)
1855                        cap = SUPPORTED_10000baseT_Full;
1856        }
1857        return cap;
1858}
1859
1860#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1861                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1862                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1863                      ADVERTISED_10000baseT_Full)
1864
1865static int set_link_ksettings(struct net_device *dev,
1866                              const struct ethtool_link_ksettings *cmd)
1867{
1868        struct port_info *p = netdev_priv(dev);
1869        struct link_config *lc = &p->link_config;
1870        u32 advertising;
1871
1872        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1873                                                cmd->link_modes.advertising);
1874
1875        if (!(lc->supported & SUPPORTED_Autoneg)) {
1876                /*
1877                 * PHY offers a single speed/duplex.  See if that's what's
1878                 * being requested.
1879                 */
1880                if (cmd->base.autoneg == AUTONEG_DISABLE) {
1881                        u32 speed = cmd->base.speed;
1882                        int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1883                        if (lc->supported & cap)
1884                                return 0;
1885                }
1886                return -EINVAL;
1887        }
1888
1889        if (cmd->base.autoneg == AUTONEG_DISABLE) {
1890                u32 speed = cmd->base.speed;
1891                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1892
1893                if (!(lc->supported & cap) || (speed == SPEED_1000))
1894                        return -EINVAL;
1895                lc->requested_speed = speed;
1896                lc->requested_duplex = cmd->base.duplex;
1897                lc->advertising = 0;
1898        } else {
1899                advertising &= ADVERTISED_MASK;
1900                advertising &= lc->supported;
1901                if (!advertising)
1902                        return -EINVAL;
1903                lc->requested_speed = SPEED_INVALID;
1904                lc->requested_duplex = DUPLEX_INVALID;
1905                lc->advertising = advertising | ADVERTISED_Autoneg;
1906        }
1907        lc->autoneg = cmd->base.autoneg;
1908        if (netif_running(dev))
1909                t3_link_start(&p->phy, &p->mac, lc);
1910        return 0;
1911}
1912
1913static void get_pauseparam(struct net_device *dev,
1914                           struct ethtool_pauseparam *epause)
1915{
1916        struct port_info *p = netdev_priv(dev);
1917
1918        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1919        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1920        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1921}
1922
1923static int set_pauseparam(struct net_device *dev,
1924                          struct ethtool_pauseparam *epause)
1925{
1926        struct port_info *p = netdev_priv(dev);
1927        struct link_config *lc = &p->link_config;
1928
1929        if (epause->autoneg == AUTONEG_DISABLE)
1930                lc->requested_fc = 0;
1931        else if (lc->supported & SUPPORTED_Autoneg)
1932                lc->requested_fc = PAUSE_AUTONEG;
1933        else
1934                return -EINVAL;
1935
1936        if (epause->rx_pause)
1937                lc->requested_fc |= PAUSE_RX;
1938        if (epause->tx_pause)
1939                lc->requested_fc |= PAUSE_TX;
1940        if (lc->autoneg == AUTONEG_ENABLE) {
1941                if (netif_running(dev))
1942                        t3_link_start(&p->phy, &p->mac, lc);
1943        } else {
1944                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1945                if (netif_running(dev))
1946                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1947        }
1948        return 0;
1949}
1950
1951static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1952{
1953        struct port_info *pi = netdev_priv(dev);
1954        struct adapter *adapter = pi->adapter;
1955        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1956
1957        e->rx_max_pending = MAX_RX_BUFFERS;
1958        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1959        e->tx_max_pending = MAX_TXQ_ENTRIES;
1960
1961        e->rx_pending = q->fl_size;
1962        e->rx_mini_pending = q->rspq_size;
1963        e->rx_jumbo_pending = q->jumbo_size;
1964        e->tx_pending = q->txq_size[0];
1965}
1966
1967static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1968{
1969        struct port_info *pi = netdev_priv(dev);
1970        struct adapter *adapter = pi->adapter;
1971        struct qset_params *q;
1972        int i;
1973
1974        if (e->rx_pending > MAX_RX_BUFFERS ||
1975            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1976            e->tx_pending > MAX_TXQ_ENTRIES ||
1977            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1978            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1979            e->rx_pending < MIN_FL_ENTRIES ||
1980            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1981            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1982                return -EINVAL;
1983
1984        if (adapter->flags & FULL_INIT_DONE)
1985                return -EBUSY;
1986
1987        q = &adapter->params.sge.qset[pi->first_qset];
1988        for (i = 0; i < pi->nqsets; ++i, ++q) {
1989                q->rspq_size = e->rx_mini_pending;
1990                q->fl_size = e->rx_pending;
1991                q->jumbo_size = e->rx_jumbo_pending;
1992                q->txq_size[0] = e->tx_pending;
1993                q->txq_size[1] = e->tx_pending;
1994                q->txq_size[2] = e->tx_pending;
1995        }
1996        return 0;
1997}
1998
1999static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2000{
2001        struct port_info *pi = netdev_priv(dev);
2002        struct adapter *adapter = pi->adapter;
2003        struct qset_params *qsp;
2004        struct sge_qset *qs;
2005        int i;
2006
2007        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2008                return -EINVAL;
2009
2010        for (i = 0; i < pi->nqsets; i++) {
2011                qsp = &adapter->params.sge.qset[i];
2012                qs = &adapter->sge.qs[i];
2013                qsp->coalesce_usecs = c->rx_coalesce_usecs;
2014                t3_update_qset_coalesce(qs, qsp);
2015        }
2016
2017        return 0;
2018}
2019
2020static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2021{
2022        struct port_info *pi = netdev_priv(dev);
2023        struct adapter *adapter = pi->adapter;
2024        struct qset_params *q = adapter->params.sge.qset;
2025
2026        c->rx_coalesce_usecs = q->coalesce_usecs;
2027        return 0;
2028}
2029
2030static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2031                      u8 * data)
2032{
2033        struct port_info *pi = netdev_priv(dev);
2034        struct adapter *adapter = pi->adapter;
2035        int i, err = 0;
2036
2037        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2038        if (!buf)
2039                return -ENOMEM;
2040
2041        e->magic = EEPROM_MAGIC;
2042        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2043                err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2044
2045        if (!err)
2046                memcpy(data, buf + e->offset, e->len);
2047        kfree(buf);
2048        return err;
2049}
2050
2051static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2052                      u8 * data)
2053{
2054        struct port_info *pi = netdev_priv(dev);
2055        struct adapter *adapter = pi->adapter;
2056        u32 aligned_offset, aligned_len;
2057        __le32 *p;
2058        u8 *buf;
2059        int err;
2060
2061        if (eeprom->magic != EEPROM_MAGIC)
2062                return -EINVAL;
2063
2064        aligned_offset = eeprom->offset & ~3;
2065        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2066
2067        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2068                buf = kmalloc(aligned_len, GFP_KERNEL);
2069                if (!buf)
2070                        return -ENOMEM;
2071                err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2072                if (!err && aligned_len > 4)
2073                        err = t3_seeprom_read(adapter,
2074                                              aligned_offset + aligned_len - 4,
2075                                              (__le32 *) & buf[aligned_len - 4]);
2076                if (err)
2077                        goto out;
2078                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2079        } else
2080                buf = data;
2081
2082        err = t3_seeprom_wp(adapter, 0);
2083        if (err)
2084                goto out;
2085
2086        for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2087                err = t3_seeprom_write(adapter, aligned_offset, *p);
2088                aligned_offset += 4;
2089        }
2090
2091        if (!err)
2092                err = t3_seeprom_wp(adapter, 1);
2093out:
2094        if (buf != data)
2095                kfree(buf);
2096        return err;
2097}
2098
2099static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2100{
2101        wol->supported = 0;
2102        wol->wolopts = 0;
2103        memset(&wol->sopass, 0, sizeof(wol->sopass));
2104}
2105
2106static const struct ethtool_ops cxgb_ethtool_ops = {
2107        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2108        .get_drvinfo = get_drvinfo,
2109        .get_msglevel = get_msglevel,
2110        .set_msglevel = set_msglevel,
2111        .get_ringparam = get_sge_param,
2112        .set_ringparam = set_sge_param,
2113        .get_coalesce = get_coalesce,
2114        .set_coalesce = set_coalesce,
2115        .get_eeprom_len = get_eeprom_len,
2116        .get_eeprom = get_eeprom,
2117        .set_eeprom = set_eeprom,
2118        .get_pauseparam = get_pauseparam,
2119        .set_pauseparam = set_pauseparam,
2120        .get_link = ethtool_op_get_link,
2121        .get_strings = get_strings,
2122        .set_phys_id = set_phys_id,
2123        .nway_reset = restart_autoneg,
2124        .get_sset_count = get_sset_count,
2125        .get_ethtool_stats = get_stats,
2126        .get_regs_len = get_regs_len,
2127        .get_regs = get_regs,
2128        .get_wol = get_wol,
2129        .get_link_ksettings = get_link_ksettings,
2130        .set_link_ksettings = set_link_ksettings,
2131};
2132
2133static int in_range(int val, int lo, int hi)
2134{
2135        return val < 0 || (val <= hi && val >= lo);
2136}
2137
2138static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2139{
2140        struct port_info *pi = netdev_priv(dev);
2141        struct adapter *adapter = pi->adapter;
2142        u32 cmd;
2143        int ret;
2144
2145        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2146                return -EFAULT;
2147
2148        switch (cmd) {
2149        case CHELSIO_SET_QSET_PARAMS:{
2150                int i;
2151                struct qset_params *q;
2152                struct ch_qset_params t;
2153                int q1 = pi->first_qset;
2154                int nqsets = pi->nqsets;
2155
2156                if (!capable(CAP_NET_ADMIN))
2157                        return -EPERM;
2158                if (copy_from_user(&t, useraddr, sizeof(t)))
2159                        return -EFAULT;
2160                if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2161                        return -EINVAL;
2162                if (t.qset_idx >= SGE_QSETS)
2163                        return -EINVAL;
2164                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2165                    !in_range(t.cong_thres, 0, 255) ||
2166                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2167                              MAX_TXQ_ENTRIES) ||
2168                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2169                              MAX_TXQ_ENTRIES) ||
2170                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2171                              MAX_CTRL_TXQ_ENTRIES) ||
2172                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2173                              MAX_RX_BUFFERS) ||
2174                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2175                              MAX_RX_JUMBO_BUFFERS) ||
2176                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2177                              MAX_RSPQ_ENTRIES))
2178                        return -EINVAL;
2179
2180                if ((adapter->flags & FULL_INIT_DONE) &&
2181                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2182                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2183                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2184                        t.polling >= 0 || t.cong_thres >= 0))
2185                        return -EBUSY;
2186
2187                /* Allow setting of any available qset when offload enabled */
2188                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2189                        q1 = 0;
2190                        for_each_port(adapter, i) {
2191                                pi = adap2pinfo(adapter, i);
2192                                nqsets += pi->first_qset + pi->nqsets;
2193                        }
2194                }
2195
2196                if (t.qset_idx < q1)
2197                        return -EINVAL;
2198                if (t.qset_idx > q1 + nqsets - 1)
2199                        return -EINVAL;
2200
2201                q = &adapter->params.sge.qset[t.qset_idx];
2202
2203                if (t.rspq_size >= 0)
2204                        q->rspq_size = t.rspq_size;
2205                if (t.fl_size[0] >= 0)
2206                        q->fl_size = t.fl_size[0];
2207                if (t.fl_size[1] >= 0)
2208                        q->jumbo_size = t.fl_size[1];
2209                if (t.txq_size[0] >= 0)
2210                        q->txq_size[0] = t.txq_size[0];
2211                if (t.txq_size[1] >= 0)
2212                        q->txq_size[1] = t.txq_size[1];
2213                if (t.txq_size[2] >= 0)
2214                        q->txq_size[2] = t.txq_size[2];
2215                if (t.cong_thres >= 0)
2216                        q->cong_thres = t.cong_thres;
2217                if (t.intr_lat >= 0) {
2218                        struct sge_qset *qs =
2219                                &adapter->sge.qs[t.qset_idx];
2220
2221                        q->coalesce_usecs = t.intr_lat;
2222                        t3_update_qset_coalesce(qs, q);
2223                }
2224                if (t.polling >= 0) {
2225                        if (adapter->flags & USING_MSIX)
2226                                q->polling = t.polling;
2227                        else {
2228                                /* No polling with INTx for T3A */
2229                                if (adapter->params.rev == 0 &&
2230                                        !(adapter->flags & USING_MSI))
2231                                        t.polling = 0;
2232
2233                                for (i = 0; i < SGE_QSETS; i++) {
2234                                        q = &adapter->params.sge.
2235                                                qset[i];
2236                                        q->polling = t.polling;
2237                                }
2238                        }
2239                }
2240
2241                if (t.lro >= 0) {
2242                        if (t.lro)
2243                                dev->wanted_features |= NETIF_F_GRO;
2244                        else
2245                                dev->wanted_features &= ~NETIF_F_GRO;
2246                        netdev_update_features(dev);
2247                }
2248
2249                break;
2250        }
2251        case CHELSIO_GET_QSET_PARAMS:{
2252                struct qset_params *q;
2253                struct ch_qset_params t;
2254                int q1 = pi->first_qset;
2255                int nqsets = pi->nqsets;
2256                int i;
2257
2258                if (copy_from_user(&t, useraddr, sizeof(t)))
2259                        return -EFAULT;
2260
2261                if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2262                        return -EINVAL;
2263
2264                /* Display qsets for all ports when offload enabled */
2265                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2266                        q1 = 0;
2267                        for_each_port(adapter, i) {
2268                                pi = adap2pinfo(adapter, i);
2269                                nqsets = pi->first_qset + pi->nqsets;
2270                        }
2271                }
2272
2273                if (t.qset_idx >= nqsets)
2274                        return -EINVAL;
2275                t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2276
2277                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2278                t.rspq_size = q->rspq_size;
2279                t.txq_size[0] = q->txq_size[0];
2280                t.txq_size[1] = q->txq_size[1];
2281                t.txq_size[2] = q->txq_size[2];
2282                t.fl_size[0] = q->fl_size;
2283                t.fl_size[1] = q->jumbo_size;
2284                t.polling = q->polling;
2285                t.lro = !!(dev->features & NETIF_F_GRO);
2286                t.intr_lat = q->coalesce_usecs;
2287                t.cong_thres = q->cong_thres;
2288                t.qnum = q1;
2289
2290                if (adapter->flags & USING_MSIX)
2291                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2292                else
2293                        t.vector = adapter->pdev->irq;
2294
2295                if (copy_to_user(useraddr, &t, sizeof(t)))
2296                        return -EFAULT;
2297                break;
2298        }
2299        case CHELSIO_SET_QSET_NUM:{
2300                struct ch_reg edata;
2301                unsigned int i, first_qset = 0, other_qsets = 0;
2302
2303                if (!capable(CAP_NET_ADMIN))
2304                        return -EPERM;
2305                if (adapter->flags & FULL_INIT_DONE)
2306                        return -EBUSY;
2307                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2308                        return -EFAULT;
2309                if (edata.cmd != CHELSIO_SET_QSET_NUM)
2310                        return -EINVAL;
2311                if (edata.val < 1 ||
2312                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2313                        return -EINVAL;
2314
2315                for_each_port(adapter, i)
2316                        if (adapter->port[i] && adapter->port[i] != dev)
2317                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2318
2319                if (edata.val + other_qsets > SGE_QSETS)
2320                        return -EINVAL;
2321
2322                pi->nqsets = edata.val;
2323
2324                for_each_port(adapter, i)
2325                        if (adapter->port[i]) {
2326                                pi = adap2pinfo(adapter, i);
2327                                pi->first_qset = first_qset;
2328                                first_qset += pi->nqsets;
2329                        }
2330                break;
2331        }
2332        case CHELSIO_GET_QSET_NUM:{
2333                struct ch_reg edata;
2334
2335                memset(&edata, 0, sizeof(struct ch_reg));
2336
2337                edata.cmd = CHELSIO_GET_QSET_NUM;
2338                edata.val = pi->nqsets;
2339                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2340                        return -EFAULT;
2341                break;
2342        }
2343        case CHELSIO_LOAD_FW:{
2344                u8 *fw_data;
2345                struct ch_mem_range t;
2346
2347                if (!capable(CAP_SYS_RAWIO))
2348                        return -EPERM;
2349                if (copy_from_user(&t, useraddr, sizeof(t)))
2350                        return -EFAULT;
2351                if (t.cmd != CHELSIO_LOAD_FW)
2352                        return -EINVAL;
2353                /* Check t.len sanity ? */
2354                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2355                if (IS_ERR(fw_data))
2356                        return PTR_ERR(fw_data);
2357
2358                ret = t3_load_fw(adapter, fw_data, t.len);
2359                kfree(fw_data);
2360                if (ret)
2361                        return ret;
2362                break;
2363        }
2364        case CHELSIO_SETMTUTAB:{
2365                struct ch_mtus m;
2366                int i;
2367
2368                if (!is_offload(adapter))
2369                        return -EOPNOTSUPP;
2370                if (!capable(CAP_NET_ADMIN))
2371                        return -EPERM;
2372                if (offload_running(adapter))
2373                        return -EBUSY;
2374                if (copy_from_user(&m, useraddr, sizeof(m)))
2375                        return -EFAULT;
2376                if (m.cmd != CHELSIO_SETMTUTAB)
2377                        return -EINVAL;
2378                if (m.nmtus != NMTUS)
2379                        return -EINVAL;
2380                if (m.mtus[0] < 81)     /* accommodate SACK */
2381                        return -EINVAL;
2382
2383                /* MTUs must be in ascending order */
2384                for (i = 1; i < NMTUS; ++i)
2385                        if (m.mtus[i] < m.mtus[i - 1])
2386                                return -EINVAL;
2387
2388                memcpy(adapter->params.mtus, m.mtus,
2389                        sizeof(adapter->params.mtus));
2390                break;
2391        }
2392        case CHELSIO_GET_PM:{
2393                struct tp_params *p = &adapter->params.tp;
2394                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2395
2396                if (!is_offload(adapter))
2397                        return -EOPNOTSUPP;
2398                m.tx_pg_sz = p->tx_pg_size;
2399                m.tx_num_pg = p->tx_num_pgs;
2400                m.rx_pg_sz = p->rx_pg_size;
2401                m.rx_num_pg = p->rx_num_pgs;
2402                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2403                if (copy_to_user(useraddr, &m, sizeof(m)))
2404                        return -EFAULT;
2405                break;
2406        }
2407        case CHELSIO_SET_PM:{
2408                struct ch_pm m;
2409                struct tp_params *p = &adapter->params.tp;
2410
2411                if (!is_offload(adapter))
2412                        return -EOPNOTSUPP;
2413                if (!capable(CAP_NET_ADMIN))
2414                        return -EPERM;
2415                if (adapter->flags & FULL_INIT_DONE)
2416                        return -EBUSY;
2417                if (copy_from_user(&m, useraddr, sizeof(m)))
2418                        return -EFAULT;
2419                if (m.cmd != CHELSIO_SET_PM)
2420                        return -EINVAL;
2421                if (!is_power_of_2(m.rx_pg_sz) ||
2422                        !is_power_of_2(m.tx_pg_sz))
2423                        return -EINVAL; /* not power of 2 */
2424                if (!(m.rx_pg_sz & 0x14000))
2425                        return -EINVAL; /* not 16KB or 64KB */
2426                if (!(m.tx_pg_sz & 0x1554000))
2427                        return -EINVAL;
2428                if (m.tx_num_pg == -1)
2429                        m.tx_num_pg = p->tx_num_pgs;
2430                if (m.rx_num_pg == -1)
2431                        m.rx_num_pg = p->rx_num_pgs;
2432                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2433                        return -EINVAL;
2434                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2435                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2436                        return -EINVAL;
2437                p->rx_pg_size = m.rx_pg_sz;
2438                p->tx_pg_size = m.tx_pg_sz;
2439                p->rx_num_pgs = m.rx_num_pg;
2440                p->tx_num_pgs = m.tx_num_pg;
2441                break;
2442        }
2443        case CHELSIO_GET_MEM:{
2444                struct ch_mem_range t;
2445                struct mc7 *mem;
2446                u64 buf[32];
2447
2448                if (!is_offload(adapter))
2449                        return -EOPNOTSUPP;
2450                if (!capable(CAP_NET_ADMIN))
2451                        return -EPERM;
2452                if (!(adapter->flags & FULL_INIT_DONE))
2453                        return -EIO;    /* need the memory controllers */
2454                if (copy_from_user(&t, useraddr, sizeof(t)))
2455                        return -EFAULT;
2456                if (t.cmd != CHELSIO_GET_MEM)
2457                        return -EINVAL;
2458                if ((t.addr & 7) || (t.len & 7))
2459                        return -EINVAL;
2460                if (t.mem_id == MEM_CM)
2461                        mem = &adapter->cm;
2462                else if (t.mem_id == MEM_PMRX)
2463                        mem = &adapter->pmrx;
2464                else if (t.mem_id == MEM_PMTX)
2465                        mem = &adapter->pmtx;
2466                else
2467                        return -EINVAL;
2468
2469                /*
2470                 * Version scheme:
2471                 * bits 0..9: chip version
2472                 * bits 10..15: chip revision
2473                 */
2474                t.version = 3 | (adapter->params.rev << 10);
2475                if (copy_to_user(useraddr, &t, sizeof(t)))
2476                        return -EFAULT;
2477
2478                /*
2479                 * Read 256 bytes at a time as len can be large and we don't
2480                 * want to use huge intermediate buffers.
2481                 */
2482                useraddr += sizeof(t);  /* advance to start of buffer */
2483                while (t.len) {
2484                        unsigned int chunk =
2485                                min_t(unsigned int, t.len, sizeof(buf));
2486
2487                        ret =
2488                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2489                                                buf);
2490                        if (ret)
2491                                return ret;
2492                        if (copy_to_user(useraddr, buf, chunk))
2493                                return -EFAULT;
2494                        useraddr += chunk;
2495                        t.addr += chunk;
2496                        t.len -= chunk;
2497                }
2498                break;
2499        }
2500        case CHELSIO_SET_TRACE_FILTER:{
2501                struct ch_trace t;
2502                const struct trace_params *tp;
2503
2504                if (!capable(CAP_NET_ADMIN))
2505                        return -EPERM;
2506                if (!offload_running(adapter))
2507                        return -EAGAIN;
2508                if (copy_from_user(&t, useraddr, sizeof(t)))
2509                        return -EFAULT;
2510                if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2511                        return -EINVAL;
2512
2513                tp = (const struct trace_params *)&t.sip;
2514                if (t.config_tx)
2515                        t3_config_trace_filter(adapter, tp, 0,
2516                                                t.invert_match,
2517                                                t.trace_tx);
2518                if (t.config_rx)
2519                        t3_config_trace_filter(adapter, tp, 1,
2520                                                t.invert_match,
2521                                                t.trace_rx);
2522                break;
2523        }
2524        default:
2525                return -EOPNOTSUPP;
2526        }
2527        return 0;
2528}
2529
2530static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2531{
2532        struct mii_ioctl_data *data = if_mii(req);
2533        struct port_info *pi = netdev_priv(dev);
2534        struct adapter *adapter = pi->adapter;
2535
2536        switch (cmd) {
2537        case SIOCGMIIREG:
2538        case SIOCSMIIREG:
2539                /* Convert phy_id from older PRTAD/DEVAD format */
2540                if (is_10G(adapter) &&
2541                    !mdio_phy_id_is_c45(data->phy_id) &&
2542                    (data->phy_id & 0x1f00) &&
2543                    !(data->phy_id & 0xe0e0))
2544                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2545                                                       data->phy_id & 0x1f);
2546                fallthrough;
2547        case SIOCGMIIPHY:
2548                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2549        case SIOCCHIOCTL:
2550                return cxgb_extension_ioctl(dev, req->ifr_data);
2551        default:
2552                return -EOPNOTSUPP;
2553        }
2554}
2555
2556static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2557{
2558        struct port_info *pi = netdev_priv(dev);
2559        struct adapter *adapter = pi->adapter;
2560        int ret;
2561
2562        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2563                return ret;
2564        dev->mtu = new_mtu;
2565        init_port_mtus(adapter);
2566        if (adapter->params.rev == 0 && offload_running(adapter))
2567                t3_load_mtus(adapter, adapter->params.mtus,
2568                             adapter->params.a_wnd, adapter->params.b_wnd,
2569                             adapter->port[0]->mtu);
2570        return 0;
2571}
2572
2573static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2574{
2575        struct port_info *pi = netdev_priv(dev);
2576        struct adapter *adapter = pi->adapter;
2577        struct sockaddr *addr = p;
2578
2579        if (!is_valid_ether_addr(addr->sa_data))
2580                return -EADDRNOTAVAIL;
2581
2582        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2583        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2584        if (offload_running(adapter))
2585                write_smt_entry(adapter, pi->port_id);
2586        return 0;
2587}
2588
2589static netdev_features_t cxgb_fix_features(struct net_device *dev,
2590        netdev_features_t features)
2591{
2592        /*
2593         * Since there is no support for separate rx/tx vlan accel
2594         * enable/disable make sure tx flag is always in same state as rx.
2595         */
2596        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2597                features |= NETIF_F_HW_VLAN_CTAG_TX;
2598        else
2599                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2600
2601        return features;
2602}
2603
2604static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2605{
2606        netdev_features_t changed = dev->features ^ features;
2607
2608        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2609                cxgb_vlan_mode(dev, features);
2610
2611        return 0;
2612}
2613
2614#ifdef CONFIG_NET_POLL_CONTROLLER
2615static void cxgb_netpoll(struct net_device *dev)
2616{
2617        struct port_info *pi = netdev_priv(dev);
2618        struct adapter *adapter = pi->adapter;
2619        int qidx;
2620
2621        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2622                struct sge_qset *qs = &adapter->sge.qs[qidx];
2623                void *source;
2624
2625                if (adapter->flags & USING_MSIX)
2626                        source = qs;
2627                else
2628                        source = adapter;
2629
2630                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2631        }
2632}
2633#endif
2634
2635/*
2636 * Periodic accumulation of MAC statistics.
2637 */
2638static void mac_stats_update(struct adapter *adapter)
2639{
2640        int i;
2641
2642        for_each_port(adapter, i) {
2643                struct net_device *dev = adapter->port[i];
2644                struct port_info *p = netdev_priv(dev);
2645
2646                if (netif_running(dev)) {
2647                        spin_lock(&adapter->stats_lock);
2648                        t3_mac_update_stats(&p->mac);
2649                        spin_unlock(&adapter->stats_lock);
2650                }
2651        }
2652}
2653
2654static void check_link_status(struct adapter *adapter)
2655{
2656        int i;
2657
2658        for_each_port(adapter, i) {
2659                struct net_device *dev = adapter->port[i];
2660                struct port_info *p = netdev_priv(dev);
2661                int link_fault;
2662
2663                spin_lock_irq(&adapter->work_lock);
2664                link_fault = p->link_fault;
2665                spin_unlock_irq(&adapter->work_lock);
2666
2667                if (link_fault) {
2668                        t3_link_fault(adapter, i);
2669                        continue;
2670                }
2671
2672                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2673                        t3_xgm_intr_disable(adapter, i);
2674                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2675
2676                        t3_link_changed(adapter, i);
2677                        t3_xgm_intr_enable(adapter, i);
2678                }
2679        }
2680}
2681
2682static void check_t3b2_mac(struct adapter *adapter)
2683{
2684        int i;
2685
2686        if (!rtnl_trylock())    /* synchronize with ifdown */
2687                return;
2688
2689        for_each_port(adapter, i) {
2690                struct net_device *dev = adapter->port[i];
2691                struct port_info *p = netdev_priv(dev);
2692                int status;
2693
2694                if (!netif_running(dev))
2695                        continue;
2696
2697                status = 0;
2698                if (netif_running(dev) && netif_carrier_ok(dev))
2699                        status = t3b2_mac_watchdog_task(&p->mac);
2700                if (status == 1)
2701                        p->mac.stats.num_toggled++;
2702                else if (status == 2) {
2703                        struct cmac *mac = &p->mac;
2704
2705                        t3_mac_set_mtu(mac, dev->mtu);
2706                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2707                        cxgb_set_rxmode(dev);
2708                        t3_link_start(&p->phy, mac, &p->link_config);
2709                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2710                        t3_port_intr_enable(adapter, p->port_id);
2711                        p->mac.stats.num_resets++;
2712                }
2713        }
2714        rtnl_unlock();
2715}
2716
2717
2718static void t3_adap_check_task(struct work_struct *work)
2719{
2720        struct adapter *adapter = container_of(work, struct adapter,
2721                                               adap_check_task.work);
2722        const struct adapter_params *p = &adapter->params;
2723        int port;
2724        unsigned int v, status, reset;
2725
2726        adapter->check_task_cnt++;
2727
2728        check_link_status(adapter);
2729
2730        /* Accumulate MAC stats if needed */
2731        if (!p->linkpoll_period ||
2732            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2733            p->stats_update_period) {
2734                mac_stats_update(adapter);
2735                adapter->check_task_cnt = 0;
2736        }
2737
2738        if (p->rev == T3_REV_B2)
2739                check_t3b2_mac(adapter);
2740
2741        /*
2742         * Scan the XGMAC's to check for various conditions which we want to
2743         * monitor in a periodic polling manner rather than via an interrupt
2744         * condition.  This is used for conditions which would otherwise flood
2745         * the system with interrupts and we only really need to know that the
2746         * conditions are "happening" ...  For each condition we count the
2747         * detection of the condition and reset it for the next polling loop.
2748         */
2749        for_each_port(adapter, port) {
2750                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2751                u32 cause;
2752
2753                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2754                reset = 0;
2755                if (cause & F_RXFIFO_OVERFLOW) {
2756                        mac->stats.rx_fifo_ovfl++;
2757                        reset |= F_RXFIFO_OVERFLOW;
2758                }
2759
2760                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2761        }
2762
2763        /*
2764         * We do the same as above for FL_EMPTY interrupts.
2765         */
2766        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2767        reset = 0;
2768
2769        if (status & F_FLEMPTY) {
2770                struct sge_qset *qs = &adapter->sge.qs[0];
2771                int i = 0;
2772
2773                reset |= F_FLEMPTY;
2774
2775                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2776                    0xffff;
2777
2778                while (v) {
2779                        qs->fl[i].empty += (v & 1);
2780                        if (i)
2781                                qs++;
2782                        i ^= 1;
2783                        v >>= 1;
2784                }
2785        }
2786
2787        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2788
2789        /* Schedule the next check update if any port is active. */
2790        spin_lock_irq(&adapter->work_lock);
2791        if (adapter->open_device_map & PORT_MASK)
2792                schedule_chk_task(adapter);
2793        spin_unlock_irq(&adapter->work_lock);
2794}
2795
2796static void db_full_task(struct work_struct *work)
2797{
2798        struct adapter *adapter = container_of(work, struct adapter,
2799                                               db_full_task);
2800
2801        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2802}
2803
2804static void db_empty_task(struct work_struct *work)
2805{
2806        struct adapter *adapter = container_of(work, struct adapter,
2807                                               db_empty_task);
2808
2809        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2810}
2811
2812static void db_drop_task(struct work_struct *work)
2813{
2814        struct adapter *adapter = container_of(work, struct adapter,
2815                                               db_drop_task);
2816        unsigned long delay = 1000;
2817        unsigned short r;
2818
2819        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2820
2821        /*
2822         * Sleep a while before ringing the driver qset dbs.
2823         * The delay is between 1000-2023 usecs.
2824         */
2825        get_random_bytes(&r, 2);
2826        delay += r & 1023;
2827        set_current_state(TASK_UNINTERRUPTIBLE);
2828        schedule_timeout(usecs_to_jiffies(delay));
2829        ring_dbs(adapter);
2830}
2831
2832/*
2833 * Processes external (PHY) interrupts in process context.
2834 */
2835static void ext_intr_task(struct work_struct *work)
2836{
2837        struct adapter *adapter = container_of(work, struct adapter,
2838                                               ext_intr_handler_task);
2839        int i;
2840
2841        /* Disable link fault interrupts */
2842        for_each_port(adapter, i) {
2843                struct net_device *dev = adapter->port[i];
2844                struct port_info *p = netdev_priv(dev);
2845
2846                t3_xgm_intr_disable(adapter, i);
2847                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2848        }
2849
2850        /* Re-enable link fault interrupts */
2851        t3_phy_intr_handler(adapter);
2852
2853        for_each_port(adapter, i)
2854                t3_xgm_intr_enable(adapter, i);
2855
2856        /* Now reenable external interrupts */
2857        spin_lock_irq(&adapter->work_lock);
2858        if (adapter->slow_intr_mask) {
2859                adapter->slow_intr_mask |= F_T3DBG;
2860                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2861                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2862                             adapter->slow_intr_mask);
2863        }
2864        spin_unlock_irq(&adapter->work_lock);
2865}
2866
2867/*
2868 * Interrupt-context handler for external (PHY) interrupts.
2869 */
2870void t3_os_ext_intr_handler(struct adapter *adapter)
2871{
2872        /*
2873         * Schedule a task to handle external interrupts as they may be slow
2874         * and we use a mutex to protect MDIO registers.  We disable PHY
2875         * interrupts in the meantime and let the task reenable them when
2876         * it's done.
2877         */
2878        spin_lock(&adapter->work_lock);
2879        if (adapter->slow_intr_mask) {
2880                adapter->slow_intr_mask &= ~F_T3DBG;
2881                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2882                             adapter->slow_intr_mask);
2883                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2884        }
2885        spin_unlock(&adapter->work_lock);
2886}
2887
2888void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2889{
2890        struct net_device *netdev = adapter->port[port_id];
2891        struct port_info *pi = netdev_priv(netdev);
2892
2893        spin_lock(&adapter->work_lock);
2894        pi->link_fault = 1;
2895        spin_unlock(&adapter->work_lock);
2896}
2897
2898static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2899{
2900        int i, ret = 0;
2901
2902        if (is_offload(adapter) &&
2903            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2904                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2905                offload_close(&adapter->tdev);
2906        }
2907
2908        /* Stop all ports */
2909        for_each_port(adapter, i) {
2910                struct net_device *netdev = adapter->port[i];
2911
2912                if (netif_running(netdev))
2913                        __cxgb_close(netdev, on_wq);
2914        }
2915
2916        /* Stop SGE timers */
2917        t3_stop_sge_timers(adapter);
2918
2919        adapter->flags &= ~FULL_INIT_DONE;
2920
2921        if (reset)
2922                ret = t3_reset_adapter(adapter);
2923
2924        pci_disable_device(adapter->pdev);
2925
2926        return ret;
2927}
2928
2929static int t3_reenable_adapter(struct adapter *adapter)
2930{
2931        if (pci_enable_device(adapter->pdev)) {
2932                dev_err(&adapter->pdev->dev,
2933                        "Cannot re-enable PCI device after reset.\n");
2934                goto err;
2935        }
2936        pci_set_master(adapter->pdev);
2937        pci_restore_state(adapter->pdev);
2938        pci_save_state(adapter->pdev);
2939
2940        /* Free sge resources */
2941        t3_free_sge_resources(adapter);
2942
2943        if (t3_replay_prep_adapter(adapter))
2944                goto err;
2945
2946        return 0;
2947err:
2948        return -1;
2949}
2950
2951static void t3_resume_ports(struct adapter *adapter)
2952{
2953        int i;
2954
2955        /* Restart the ports */
2956        for_each_port(adapter, i) {
2957                struct net_device *netdev = adapter->port[i];
2958
2959                if (netif_running(netdev)) {
2960                        if (cxgb_open(netdev)) {
2961                                dev_err(&adapter->pdev->dev,
2962                                        "can't bring device back up"
2963                                        " after reset\n");
2964                                continue;
2965                        }
2966                }
2967        }
2968
2969        if (is_offload(adapter) && !ofld_disable)
2970                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2971}
2972
2973/*
2974 * processes a fatal error.
2975 * Bring the ports down, reset the chip, bring the ports back up.
2976 */
2977static void fatal_error_task(struct work_struct *work)
2978{
2979        struct adapter *adapter = container_of(work, struct adapter,
2980                                               fatal_error_handler_task);
2981        int err = 0;
2982
2983        rtnl_lock();
2984        err = t3_adapter_error(adapter, 1, 1);
2985        if (!err)
2986                err = t3_reenable_adapter(adapter);
2987        if (!err)
2988                t3_resume_ports(adapter);
2989
2990        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2991        rtnl_unlock();
2992}
2993
2994void t3_fatal_err(struct adapter *adapter)
2995{
2996        unsigned int fw_status[4];
2997
2998        if (adapter->flags & FULL_INIT_DONE) {
2999                t3_sge_stop_dma(adapter);
3000                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3001                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3002                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3003                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3004
3005                spin_lock(&adapter->work_lock);
3006                t3_intr_disable(adapter);
3007                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3008                spin_unlock(&adapter->work_lock);
3009        }
3010        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3011        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3012                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3013                         fw_status[0], fw_status[1],
3014                         fw_status[2], fw_status[3]);
3015}
3016
3017/**
3018 * t3_io_error_detected - called when PCI error is detected
3019 * @pdev: Pointer to PCI device
3020 * @state: The current pci connection state
3021 *
3022 * This function is called after a PCI bus error affecting
3023 * this device has been detected.
3024 */
3025static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3026                                             pci_channel_state_t state)
3027{
3028        struct adapter *adapter = pci_get_drvdata(pdev);
3029
3030        if (state == pci_channel_io_perm_failure)
3031                return PCI_ERS_RESULT_DISCONNECT;
3032
3033        t3_adapter_error(adapter, 0, 0);
3034
3035        /* Request a slot reset. */
3036        return PCI_ERS_RESULT_NEED_RESET;
3037}
3038
3039/**
3040 * t3_io_slot_reset - called after the pci bus has been reset.
3041 * @pdev: Pointer to PCI device
3042 *
3043 * Restart the card from scratch, as if from a cold-boot.
3044 */
3045static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3046{
3047        struct adapter *adapter = pci_get_drvdata(pdev);
3048
3049        if (!t3_reenable_adapter(adapter))
3050                return PCI_ERS_RESULT_RECOVERED;
3051
3052        return PCI_ERS_RESULT_DISCONNECT;
3053}
3054
3055/**
3056 * t3_io_resume - called when traffic can start flowing again.
3057 * @pdev: Pointer to PCI device
3058 *
3059 * This callback is called when the error recovery driver tells us that
3060 * its OK to resume normal operation.
3061 */
3062static void t3_io_resume(struct pci_dev *pdev)
3063{
3064        struct adapter *adapter = pci_get_drvdata(pdev);
3065
3066        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3067                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3068
3069        rtnl_lock();
3070        t3_resume_ports(adapter);
3071        rtnl_unlock();
3072}
3073
3074static const struct pci_error_handlers t3_err_handler = {
3075        .error_detected = t3_io_error_detected,
3076        .slot_reset = t3_io_slot_reset,
3077        .resume = t3_io_resume,
3078};
3079
3080/*
3081 * Set the number of qsets based on the number of CPUs and the number of ports,
3082 * not to exceed the number of available qsets, assuming there are enough qsets
3083 * per port in HW.
3084 */
3085static void set_nqsets(struct adapter *adap)
3086{
3087        int i, j = 0;
3088        int num_cpus = netif_get_num_default_rss_queues();
3089        int hwports = adap->params.nports;
3090        int nqsets = adap->msix_nvectors - 1;
3091
3092        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3093                if (hwports == 2 &&
3094                    (hwports * nqsets > SGE_QSETS ||
3095                     num_cpus >= nqsets / hwports))
3096                        nqsets /= hwports;
3097                if (nqsets > num_cpus)
3098                        nqsets = num_cpus;
3099                if (nqsets < 1 || hwports == 4)
3100                        nqsets = 1;
3101        } else {
3102                nqsets = 1;
3103        }
3104
3105        for_each_port(adap, i) {
3106                struct port_info *pi = adap2pinfo(adap, i);
3107
3108                pi->first_qset = j;
3109                pi->nqsets = nqsets;
3110                j = pi->first_qset + nqsets;
3111
3112                dev_info(&adap->pdev->dev,
3113                         "Port %d using %d queue sets.\n", i, nqsets);
3114        }
3115}
3116
3117static int cxgb_enable_msix(struct adapter *adap)
3118{
3119        struct msix_entry entries[SGE_QSETS + 1];
3120        int vectors;
3121        int i;
3122
3123        vectors = ARRAY_SIZE(entries);
3124        for (i = 0; i < vectors; ++i)
3125                entries[i].entry = i;
3126
3127        vectors = pci_enable_msix_range(adap->pdev, entries,
3128                                        adap->params.nports + 1, vectors);
3129        if (vectors < 0)
3130                return vectors;
3131
3132        for (i = 0; i < vectors; ++i)
3133                adap->msix_info[i].vec = entries[i].vector;
3134        adap->msix_nvectors = vectors;
3135
3136        return 0;
3137}
3138
3139static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3140{
3141        static const char *pci_variant[] = {
3142                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3143        };
3144
3145        int i;
3146        char buf[80];
3147
3148        if (is_pcie(adap))
3149                snprintf(buf, sizeof(buf), "%s x%d",
3150                         pci_variant[adap->params.pci.variant],
3151                         adap->params.pci.width);
3152        else
3153                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3154                         pci_variant[adap->params.pci.variant],
3155                         adap->params.pci.speed, adap->params.pci.width);
3156
3157        for_each_port(adap, i) {
3158                struct net_device *dev = adap->port[i];
3159                const struct port_info *pi = netdev_priv(dev);
3160
3161                if (!test_bit(i, &adap->registered_device_map))
3162                        continue;
3163                netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3164                            ai->desc, pi->phy.desc,
3165                            is_offload(adap) ? "R" : "", adap->params.rev, buf,
3166                            (adap->flags & USING_MSIX) ? " MSI-X" :
3167                            (adap->flags & USING_MSI) ? " MSI" : "");
3168                if (adap->name == dev->name && adap->params.vpd.mclk)
3169                        pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3170                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3171                               t3_mc7_size(&adap->pmtx) >> 20,
3172                               t3_mc7_size(&adap->pmrx) >> 20,
3173                               adap->params.vpd.sn);
3174        }
3175}
3176
3177static const struct net_device_ops cxgb_netdev_ops = {
3178        .ndo_open               = cxgb_open,
3179        .ndo_stop               = cxgb_close,
3180        .ndo_start_xmit         = t3_eth_xmit,
3181        .ndo_get_stats          = cxgb_get_stats,
3182        .ndo_validate_addr      = eth_validate_addr,
3183        .ndo_set_rx_mode        = cxgb_set_rxmode,
3184        .ndo_do_ioctl           = cxgb_ioctl,
3185        .ndo_change_mtu         = cxgb_change_mtu,
3186        .ndo_set_mac_address    = cxgb_set_mac_addr,
3187        .ndo_fix_features       = cxgb_fix_features,
3188        .ndo_set_features       = cxgb_set_features,
3189#ifdef CONFIG_NET_POLL_CONTROLLER
3190        .ndo_poll_controller    = cxgb_netpoll,
3191#endif
3192};
3193
3194static void cxgb3_init_iscsi_mac(struct net_device *dev)
3195{
3196        struct port_info *pi = netdev_priv(dev);
3197
3198        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3199        pi->iscsic.mac_addr[3] |= 0x80;
3200}
3201
3202#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3203#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3204                        NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3205static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3206{
3207        int i, err, pci_using_dac = 0;
3208        resource_size_t mmio_start, mmio_len;
3209        const struct adapter_info *ai;
3210        struct adapter *adapter = NULL;
3211        struct port_info *pi;
3212
3213        if (!cxgb3_wq) {
3214                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3215                if (!cxgb3_wq) {
3216                        pr_err("cannot initialize work queue\n");
3217                        return -ENOMEM;
3218                }
3219        }
3220
3221        err = pci_enable_device(pdev);
3222        if (err) {
3223                dev_err(&pdev->dev, "cannot enable PCI device\n");
3224                goto out;
3225        }
3226
3227        err = pci_request_regions(pdev, DRV_NAME);
3228        if (err) {
3229                /* Just info, some other driver may have claimed the device. */
3230                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3231                goto out_disable_device;
3232        }
3233
3234        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3235                pci_using_dac = 1;
3236                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3237                if (err) {
3238                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3239                               "coherent allocations\n");
3240                        goto out_release_regions;
3241                }
3242        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3243                dev_err(&pdev->dev, "no usable DMA configuration\n");
3244                goto out_release_regions;
3245        }
3246
3247        pci_set_master(pdev);
3248        pci_save_state(pdev);
3249
3250        mmio_start = pci_resource_start(pdev, 0);
3251        mmio_len = pci_resource_len(pdev, 0);
3252        ai = t3_get_adapter_info(ent->driver_data);
3253
3254        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3255        if (!adapter) {
3256                err = -ENOMEM;
3257                goto out_release_regions;
3258        }
3259
3260        adapter->nofail_skb =
3261                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3262        if (!adapter->nofail_skb) {
3263                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3264                err = -ENOMEM;
3265                goto out_free_adapter;
3266        }
3267
3268        adapter->regs = ioremap(mmio_start, mmio_len);
3269        if (!adapter->regs) {
3270                dev_err(&pdev->dev, "cannot map device registers\n");
3271                err = -ENOMEM;
3272                goto out_free_adapter_nofail;
3273        }
3274
3275        adapter->pdev = pdev;
3276        adapter->name = pci_name(pdev);
3277        adapter->msg_enable = dflt_msg_enable;
3278        adapter->mmio_len = mmio_len;
3279
3280        mutex_init(&adapter->mdio_lock);
3281        spin_lock_init(&adapter->work_lock);
3282        spin_lock_init(&adapter->stats_lock);
3283
3284        INIT_LIST_HEAD(&adapter->adapter_list);
3285        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3286        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3287
3288        INIT_WORK(&adapter->db_full_task, db_full_task);
3289        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3290        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3291
3292        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3293
3294        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3295                struct net_device *netdev;
3296
3297                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3298                if (!netdev) {
3299                        err = -ENOMEM;
3300                        goto out_free_dev;
3301                }
3302
3303                SET_NETDEV_DEV(netdev, &pdev->dev);
3304
3305                adapter->port[i] = netdev;
3306                pi = netdev_priv(netdev);
3307                pi->adapter = adapter;
3308                pi->port_id = i;
3309                netif_carrier_off(netdev);
3310                netdev->irq = pdev->irq;
3311                netdev->mem_start = mmio_start;
3312                netdev->mem_end = mmio_start + mmio_len - 1;
3313                netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3314                        NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3315                netdev->features |= netdev->hw_features |
3316                                    NETIF_F_HW_VLAN_CTAG_TX;
3317                netdev->vlan_features |= netdev->features & VLAN_FEAT;
3318                if (pci_using_dac)
3319                        netdev->features |= NETIF_F_HIGHDMA;
3320
3321                netdev->netdev_ops = &cxgb_netdev_ops;
3322                netdev->ethtool_ops = &cxgb_ethtool_ops;
3323                netdev->min_mtu = 81;
3324                netdev->max_mtu = ETH_MAX_MTU;
3325                netdev->dev_port = pi->port_id;
3326        }
3327
3328        pci_set_drvdata(pdev, adapter);
3329        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3330                err = -ENODEV;
3331                goto out_free_dev;
3332        }
3333
3334        /*
3335         * The card is now ready to go.  If any errors occur during device
3336         * registration we do not fail the whole card but rather proceed only
3337         * with the ports we manage to register successfully.  However we must
3338         * register at least one net device.
3339         */
3340        for_each_port(adapter, i) {
3341                err = register_netdev(adapter->port[i]);
3342                if (err)
3343                        dev_warn(&pdev->dev,
3344                                 "cannot register net device %s, skipping\n",
3345                                 adapter->port[i]->name);
3346                else {
3347                        /*
3348                         * Change the name we use for messages to the name of
3349                         * the first successfully registered interface.
3350                         */
3351                        if (!adapter->registered_device_map)
3352                                adapter->name = adapter->port[i]->name;
3353
3354                        __set_bit(i, &adapter->registered_device_map);
3355                }
3356        }
3357        if (!adapter->registered_device_map) {
3358                dev_err(&pdev->dev, "could not register any net devices\n");
3359                goto out_free_dev;
3360        }
3361
3362        for_each_port(adapter, i)
3363                cxgb3_init_iscsi_mac(adapter->port[i]);
3364
3365        /* Driver's ready. Reflect it on LEDs */
3366        t3_led_ready(adapter);
3367
3368        if (is_offload(adapter)) {
3369                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3370                cxgb3_adapter_ofld(adapter);
3371        }
3372
3373        /* See what interrupts we'll be using */
3374        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3375                adapter->flags |= USING_MSIX;
3376        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3377                adapter->flags |= USING_MSI;
3378
3379        set_nqsets(adapter);
3380
3381        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3382                                 &cxgb3_attr_group);
3383        if (err) {
3384                dev_err(&pdev->dev, "cannot create sysfs group\n");
3385                goto out_close_led;
3386        }
3387
3388        print_port_info(adapter, ai);
3389        return 0;
3390
3391out_close_led:
3392        t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3393
3394out_free_dev:
3395        iounmap(adapter->regs);
3396        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3397                if (adapter->port[i])
3398                        free_netdev(adapter->port[i]);
3399
3400out_free_adapter_nofail:
3401        kfree_skb(adapter->nofail_skb);
3402
3403out_free_adapter:
3404        kfree(adapter);
3405
3406out_release_regions:
3407        pci_release_regions(pdev);
3408out_disable_device:
3409        pci_disable_device(pdev);
3410out:
3411        return err;
3412}
3413
3414static void remove_one(struct pci_dev *pdev)
3415{
3416        struct adapter *adapter = pci_get_drvdata(pdev);
3417
3418        if (adapter) {
3419                int i;
3420
3421                t3_sge_stop(adapter);
3422                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3423                                   &cxgb3_attr_group);
3424
3425                if (is_offload(adapter)) {
3426                        cxgb3_adapter_unofld(adapter);
3427                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3428                                     &adapter->open_device_map))
3429                                offload_close(&adapter->tdev);
3430                }
3431
3432                for_each_port(adapter, i)
3433                    if (test_bit(i, &adapter->registered_device_map))
3434                        unregister_netdev(adapter->port[i]);
3435
3436                t3_stop_sge_timers(adapter);
3437                t3_free_sge_resources(adapter);
3438                cxgb_disable_msi(adapter);
3439
3440                for_each_port(adapter, i)
3441                        if (adapter->port[i])
3442                                free_netdev(adapter->port[i]);
3443
3444                iounmap(adapter->regs);
3445                kfree_skb(adapter->nofail_skb);
3446                kfree(adapter);
3447                pci_release_regions(pdev);
3448                pci_disable_device(pdev);
3449        }
3450}
3451
3452static struct pci_driver driver = {
3453        .name = DRV_NAME,
3454        .id_table = cxgb3_pci_tbl,
3455        .probe = init_one,
3456        .remove = remove_one,
3457        .err_handler = &t3_err_handler,
3458};
3459
3460static int __init cxgb3_init_module(void)
3461{
3462        int ret;
3463
3464        cxgb3_offload_init();
3465
3466        ret = pci_register_driver(&driver);
3467        return ret;
3468}
3469
3470static void __exit cxgb3_cleanup_module(void)
3471{
3472        pci_unregister_driver(&driver);
3473        if (cxgb3_wq)
3474                destroy_workqueue(cxgb3_wq);
3475}
3476
3477module_init(cxgb3_init_module);
3478module_exit(cxgb3_cleanup_module);
3479