linux/drivers/net/ethernet/broadcom/bnxt/bnxt.c
<<
>>
Prefs
   1/* Broadcom NetXtreme-C/E network driver.
   2 *
   3 * Copyright (c) 2014-2016 Broadcom Corporation
   4 * Copyright (c) 2016-2019 Broadcom Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 */
  10
  11#include <linux/module.h>
  12
  13#include <linux/stringify.h>
  14#include <linux/kernel.h>
  15#include <linux/timer.h>
  16#include <linux/errno.h>
  17#include <linux/ioport.h>
  18#include <linux/slab.h>
  19#include <linux/vmalloc.h>
  20#include <linux/interrupt.h>
  21#include <linux/pci.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24#include <linux/skbuff.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/bitops.h>
  27#include <linux/io.h>
  28#include <linux/irq.h>
  29#include <linux/delay.h>
  30#include <asm/byteorder.h>
  31#include <asm/page.h>
  32#include <linux/time.h>
  33#include <linux/mii.h>
  34#include <linux/mdio.h>
  35#include <linux/if.h>
  36#include <linux/if_vlan.h>
  37#include <linux/if_bridge.h>
  38#include <linux/rtc.h>
  39#include <linux/bpf.h>
  40#include <net/ip.h>
  41#include <net/tcp.h>
  42#include <net/udp.h>
  43#include <net/checksum.h>
  44#include <net/ip6_checksum.h>
  45#include <net/udp_tunnel.h>
  46#include <linux/workqueue.h>
  47#include <linux/prefetch.h>
  48#include <linux/cache.h>
  49#include <linux/log2.h>
  50#include <linux/aer.h>
  51#include <linux/bitmap.h>
  52#include <linux/ptp_clock_kernel.h>
  53#include <linux/timecounter.h>
  54#include <linux/cpu_rmap.h>
  55#include <linux/cpumask.h>
  56#include <net/pkt_cls.h>
  57#include <linux/hwmon.h>
  58#include <linux/hwmon-sysfs.h>
  59#include <net/page_pool.h>
  60
  61#include "bnxt_hsi.h"
  62#include "bnxt.h"
  63#include "bnxt_ulp.h"
  64#include "bnxt_sriov.h"
  65#include "bnxt_ethtool.h"
  66#include "bnxt_dcb.h"
  67#include "bnxt_xdp.h"
  68#include "bnxt_ptp.h"
  69#include "bnxt_vfr.h"
  70#include "bnxt_tc.h"
  71#include "bnxt_devlink.h"
  72#include "bnxt_debugfs.h"
  73
  74#define BNXT_TX_TIMEOUT         (5 * HZ)
  75#define BNXT_DEF_MSG_ENABLE     (NETIF_MSG_DRV | NETIF_MSG_HW | \
  76                                 NETIF_MSG_TX_ERR)
  77
  78MODULE_LICENSE("GPL");
  79MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
  80
  81#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  82#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  83#define BNXT_RX_COPY_THRESH 256
  84
  85#define BNXT_TX_PUSH_THRESH 164
  86
  87enum board_idx {
  88        BCM57301,
  89        BCM57302,
  90        BCM57304,
  91        BCM57417_NPAR,
  92        BCM58700,
  93        BCM57311,
  94        BCM57312,
  95        BCM57402,
  96        BCM57404,
  97        BCM57406,
  98        BCM57402_NPAR,
  99        BCM57407,
 100        BCM57412,
 101        BCM57414,
 102        BCM57416,
 103        BCM57417,
 104        BCM57412_NPAR,
 105        BCM57314,
 106        BCM57417_SFP,
 107        BCM57416_SFP,
 108        BCM57404_NPAR,
 109        BCM57406_NPAR,
 110        BCM57407_SFP,
 111        BCM57407_NPAR,
 112        BCM57414_NPAR,
 113        BCM57416_NPAR,
 114        BCM57452,
 115        BCM57454,
 116        BCM5745x_NPAR,
 117        BCM57508,
 118        BCM57504,
 119        BCM57502,
 120        BCM57508_NPAR,
 121        BCM57504_NPAR,
 122        BCM57502_NPAR,
 123        BCM58802,
 124        BCM58804,
 125        BCM58808,
 126        NETXTREME_E_VF,
 127        NETXTREME_C_VF,
 128        NETXTREME_S_VF,
 129        NETXTREME_C_VF_HV,
 130        NETXTREME_E_VF_HV,
 131        NETXTREME_E_P5_VF,
 132        NETXTREME_E_P5_VF_HV,
 133};
 134
 135/* indexed by enum above */
 136static const struct {
 137        char *name;
 138} board_info[] = {
 139        [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
 140        [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
 141        [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
 142        [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
 143        [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
 144        [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
 145        [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
 146        [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
 147        [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
 148        [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
 149        [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
 150        [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
 151        [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
 152        [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
 153        [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
 154        [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
 155        [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
 156        [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
 157        [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
 158        [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
 159        [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
 160        [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
 161        [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
 162        [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
 163        [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
 164        [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
 165        [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
 166        [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 167        [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
 168        [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
 169        [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
 170        [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
 171        [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
 172        [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
 173        [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
 174        [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
 175        [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 176        [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
 177        [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
 178        [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
 179        [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
 180        [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
 181        [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
 182        [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
 183        [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
 184};
 185
 186static const struct pci_device_id bnxt_pci_tbl[] = {
 187        { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
 188        { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
 189        { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
 190        { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
 191        { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
 192        { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
 193        { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
 194        { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
 195        { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
 196        { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
 197        { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
 198        { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
 199        { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
 200        { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
 201        { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
 202        { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
 203        { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
 204        { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
 205        { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
 206        { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
 207        { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
 208        { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
 209        { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
 210        { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
 211        { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
 212        { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
 213        { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
 214        { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
 215        { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
 216        { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
 217        { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
 218        { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
 219        { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
 220        { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
 221        { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
 222        { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
 223        { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
 224        { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
 225        { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
 226        { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
 227        { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
 228        { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
 229        { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
 230        { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
 231        { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
 232        { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
 233#ifdef CONFIG_BNXT_SRIOV
 234        { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
 235        { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
 236        { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
 237        { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
 238        { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
 239        { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
 240        { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
 241        { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
 242        { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
 243        { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
 244        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
 245        { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
 246        { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
 247        { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
 248        { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
 249        { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
 250        { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
 251        { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
 252        { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
 253        { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
 254        { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
 255#endif
 256        { 0 }
 257};
 258
 259MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
 260
 261static const u16 bnxt_vf_req_snif[] = {
 262        HWRM_FUNC_CFG,
 263        HWRM_FUNC_VF_CFG,
 264        HWRM_PORT_PHY_QCFG,
 265        HWRM_CFA_L2_FILTER_ALLOC,
 266};
 267
 268static const u16 bnxt_async_events_arr[] = {
 269        ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
 270        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
 271        ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
 272        ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
 273        ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
 274        ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
 275        ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
 276        ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
 277        ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
 278        ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
 279        ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
 280        ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
 281};
 282
 283static struct workqueue_struct *bnxt_pf_wq;
 284
 285static bool bnxt_vf_pciid(enum board_idx idx)
 286{
 287        return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
 288                idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
 289                idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
 290                idx == NETXTREME_E_P5_VF_HV);
 291}
 292
 293#define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
 294#define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
 295#define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
 296
 297#define BNXT_CP_DB_IRQ_DIS(db)                                          \
 298                writel(DB_CP_IRQ_DIS_FLAGS, db)
 299
 300#define BNXT_DB_CQ(db, idx)                                             \
 301        writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
 302
 303#define BNXT_DB_NQ_P5(db, idx)                                          \
 304        writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
 305
 306#define BNXT_DB_CQ_ARM(db, idx)                                         \
 307        writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
 308
 309#define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
 310        writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
 311
 312static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 313{
 314        if (bp->flags & BNXT_FLAG_CHIP_P5)
 315                BNXT_DB_NQ_P5(db, idx);
 316        else
 317                BNXT_DB_CQ(db, idx);
 318}
 319
 320static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 321{
 322        if (bp->flags & BNXT_FLAG_CHIP_P5)
 323                BNXT_DB_NQ_ARM_P5(db, idx);
 324        else
 325                BNXT_DB_CQ_ARM(db, idx);
 326}
 327
 328static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
 329{
 330        if (bp->flags & BNXT_FLAG_CHIP_P5)
 331                writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
 332                       db->doorbell);
 333        else
 334                BNXT_DB_CQ(db, idx);
 335}
 336
 337const u16 bnxt_lhint_arr[] = {
 338        TX_BD_FLAGS_LHINT_512_AND_SMALLER,
 339        TX_BD_FLAGS_LHINT_512_TO_1023,
 340        TX_BD_FLAGS_LHINT_1024_TO_2047,
 341        TX_BD_FLAGS_LHINT_1024_TO_2047,
 342        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 343        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 344        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 345        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 346        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 347        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 348        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 349        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 350        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 351        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 352        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 353        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 354        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 355        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 356        TX_BD_FLAGS_LHINT_2048_AND_LARGER,
 357};
 358
 359static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
 360{
 361        struct metadata_dst *md_dst = skb_metadata_dst(skb);
 362
 363        if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
 364                return 0;
 365
 366        return md_dst->u.port_info.port_id;
 367}
 368
 369static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
 370                             u16 prod)
 371{
 372        bnxt_db_write(bp, &txr->tx_db, prod);
 373        txr->kick_pending = 0;
 374}
 375
 376static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
 377                                          struct bnxt_tx_ring_info *txr,
 378                                          struct netdev_queue *txq)
 379{
 380        netif_tx_stop_queue(txq);
 381
 382        /* netif_tx_stop_queue() must be done before checking
 383         * tx index in bnxt_tx_avail() below, because in
 384         * bnxt_tx_int(), we update tx index before checking for
 385         * netif_tx_queue_stopped().
 386         */
 387        smp_mb();
 388        if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
 389                netif_tx_wake_queue(txq);
 390                return false;
 391        }
 392
 393        return true;
 394}
 395
 396static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 397{
 398        struct bnxt *bp = netdev_priv(dev);
 399        struct tx_bd *txbd;
 400        struct tx_bd_ext *txbd1;
 401        struct netdev_queue *txq;
 402        int i;
 403        dma_addr_t mapping;
 404        unsigned int length, pad = 0;
 405        u32 len, free_size, vlan_tag_flags, cfa_action, flags;
 406        u16 prod, last_frag;
 407        struct pci_dev *pdev = bp->pdev;
 408        struct bnxt_tx_ring_info *txr;
 409        struct bnxt_sw_tx_bd *tx_buf;
 410        __le32 lflags = 0;
 411
 412        i = skb_get_queue_mapping(skb);
 413        if (unlikely(i >= bp->tx_nr_rings)) {
 414                dev_kfree_skb_any(skb);
 415                atomic_long_inc(&dev->tx_dropped);
 416                return NETDEV_TX_OK;
 417        }
 418
 419        txq = netdev_get_tx_queue(dev, i);
 420        txr = &bp->tx_ring[bp->tx_ring_map[i]];
 421        prod = txr->tx_prod;
 422
 423        free_size = bnxt_tx_avail(bp, txr);
 424        if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
 425                /* We must have raced with NAPI cleanup */
 426                if (net_ratelimit() && txr->kick_pending)
 427                        netif_warn(bp, tx_err, dev,
 428                                   "bnxt: ring busy w/ flush pending!\n");
 429                if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
 430                        return NETDEV_TX_BUSY;
 431        }
 432
 433        length = skb->len;
 434        len = skb_headlen(skb);
 435        last_frag = skb_shinfo(skb)->nr_frags;
 436
 437        txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 438
 439        txbd->tx_bd_opaque = prod;
 440
 441        tx_buf = &txr->tx_buf_ring[prod];
 442        tx_buf->skb = skb;
 443        tx_buf->nr_frags = last_frag;
 444
 445        vlan_tag_flags = 0;
 446        cfa_action = bnxt_xmit_get_cfa_action(skb);
 447        if (skb_vlan_tag_present(skb)) {
 448                vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
 449                                 skb_vlan_tag_get(skb);
 450                /* Currently supports 8021Q, 8021AD vlan offloads
 451                 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
 452                 */
 453                if (skb->vlan_proto == htons(ETH_P_8021Q))
 454                        vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
 455        }
 456
 457        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
 458                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
 459
 460                if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
 461                    atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
 462                        if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
 463                                            &ptp->tx_hdr_off)) {
 464                                if (vlan_tag_flags)
 465                                        ptp->tx_hdr_off += VLAN_HLEN;
 466                                lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
 467                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 468                        } else {
 469                                atomic_inc(&bp->ptp_cfg->tx_avail);
 470                        }
 471                }
 472        }
 473
 474        if (unlikely(skb->no_fcs))
 475                lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
 476
 477        if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
 478            !lflags) {
 479                struct tx_push_buffer *tx_push_buf = txr->tx_push;
 480                struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
 481                struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
 482                void __iomem *db = txr->tx_db.doorbell;
 483                void *pdata = tx_push_buf->data;
 484                u64 *end;
 485                int j, push_len;
 486
 487                /* Set COAL_NOW to be ready quickly for the next push */
 488                tx_push->tx_bd_len_flags_type =
 489                        cpu_to_le32((length << TX_BD_LEN_SHIFT) |
 490                                        TX_BD_TYPE_LONG_TX_BD |
 491                                        TX_BD_FLAGS_LHINT_512_AND_SMALLER |
 492                                        TX_BD_FLAGS_COAL_NOW |
 493                                        TX_BD_FLAGS_PACKET_END |
 494                                        (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
 495
 496                if (skb->ip_summed == CHECKSUM_PARTIAL)
 497                        tx_push1->tx_bd_hsize_lflags =
 498                                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 499                else
 500                        tx_push1->tx_bd_hsize_lflags = 0;
 501
 502                tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 503                tx_push1->tx_bd_cfa_action =
 504                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
 505
 506                end = pdata + length;
 507                end = PTR_ALIGN(end, 8) - 1;
 508                *end = 0;
 509
 510                skb_copy_from_linear_data(skb, pdata, len);
 511                pdata += len;
 512                for (j = 0; j < last_frag; j++) {
 513                        skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
 514                        void *fptr;
 515
 516                        fptr = skb_frag_address_safe(frag);
 517                        if (!fptr)
 518                                goto normal_tx;
 519
 520                        memcpy(pdata, fptr, skb_frag_size(frag));
 521                        pdata += skb_frag_size(frag);
 522                }
 523
 524                txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
 525                txbd->tx_bd_haddr = txr->data_mapping;
 526                prod = NEXT_TX(prod);
 527                txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 528                memcpy(txbd, tx_push1, sizeof(*txbd));
 529                prod = NEXT_TX(prod);
 530                tx_push->doorbell =
 531                        cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
 532                txr->tx_prod = prod;
 533
 534                tx_buf->is_push = 1;
 535                netdev_tx_sent_queue(txq, skb->len);
 536                wmb();  /* Sync is_push and byte queue before pushing data */
 537
 538                push_len = (length + sizeof(*tx_push) + 7) / 8;
 539                if (push_len > 16) {
 540                        __iowrite64_copy(db, tx_push_buf, 16);
 541                        __iowrite32_copy(db + 4, tx_push_buf + 1,
 542                                         (push_len - 16) << 1);
 543                } else {
 544                        __iowrite64_copy(db, tx_push_buf, push_len);
 545                }
 546
 547                goto tx_done;
 548        }
 549
 550normal_tx:
 551        if (length < BNXT_MIN_PKT_SIZE) {
 552                pad = BNXT_MIN_PKT_SIZE - length;
 553                if (skb_pad(skb, pad))
 554                        /* SKB already freed. */
 555                        goto tx_kick_pending;
 556                length = BNXT_MIN_PKT_SIZE;
 557        }
 558
 559        mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
 560
 561        if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 562                goto tx_free;
 563
 564        dma_unmap_addr_set(tx_buf, mapping, mapping);
 565        flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
 566                ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
 567
 568        txbd->tx_bd_haddr = cpu_to_le64(mapping);
 569
 570        prod = NEXT_TX(prod);
 571        txbd1 = (struct tx_bd_ext *)
 572                &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 573
 574        txbd1->tx_bd_hsize_lflags = lflags;
 575        if (skb_is_gso(skb)) {
 576                u32 hdr_len;
 577
 578                if (skb->encapsulation)
 579                        hdr_len = skb_inner_network_offset(skb) +
 580                                skb_inner_network_header_len(skb) +
 581                                inner_tcp_hdrlen(skb);
 582                else
 583                        hdr_len = skb_transport_offset(skb) +
 584                                tcp_hdrlen(skb);
 585
 586                txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
 587                                        TX_BD_FLAGS_T_IPID |
 588                                        (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
 589                length = skb_shinfo(skb)->gso_size;
 590                txbd1->tx_bd_mss = cpu_to_le32(length);
 591                length += hdr_len;
 592        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 593                txbd1->tx_bd_hsize_lflags |=
 594                        cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
 595                txbd1->tx_bd_mss = 0;
 596        }
 597
 598        length >>= 9;
 599        if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
 600                dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
 601                                     skb->len);
 602                i = 0;
 603                goto tx_dma_error;
 604        }
 605        flags |= bnxt_lhint_arr[length];
 606        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 607
 608        txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
 609        txbd1->tx_bd_cfa_action =
 610                        cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
 611        for (i = 0; i < last_frag; i++) {
 612                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 613
 614                prod = NEXT_TX(prod);
 615                txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 616
 617                len = skb_frag_size(frag);
 618                mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
 619                                           DMA_TO_DEVICE);
 620
 621                if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
 622                        goto tx_dma_error;
 623
 624                tx_buf = &txr->tx_buf_ring[prod];
 625                dma_unmap_addr_set(tx_buf, mapping, mapping);
 626
 627                txbd->tx_bd_haddr = cpu_to_le64(mapping);
 628
 629                flags = len << TX_BD_LEN_SHIFT;
 630                txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 631        }
 632
 633        flags &= ~TX_BD_LEN;
 634        txbd->tx_bd_len_flags_type =
 635                cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
 636                            TX_BD_FLAGS_PACKET_END);
 637
 638        netdev_tx_sent_queue(txq, skb->len);
 639
 640        skb_tx_timestamp(skb);
 641
 642        /* Sync BD data before updating doorbell */
 643        wmb();
 644
 645        prod = NEXT_TX(prod);
 646        txr->tx_prod = prod;
 647
 648        if (!netdev_xmit_more() || netif_xmit_stopped(txq))
 649                bnxt_txr_db_kick(bp, txr, prod);
 650        else
 651                txr->kick_pending = 1;
 652
 653tx_done:
 654
 655        if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
 656                if (netdev_xmit_more() && !tx_buf->is_push)
 657                        bnxt_txr_db_kick(bp, txr, prod);
 658
 659                bnxt_txr_netif_try_stop_queue(bp, txr, txq);
 660        }
 661        return NETDEV_TX_OK;
 662
 663tx_dma_error:
 664        if (BNXT_TX_PTP_IS_SET(lflags))
 665                atomic_inc(&bp->ptp_cfg->tx_avail);
 666
 667        last_frag = i;
 668
 669        /* start back at beginning and unmap skb */
 670        prod = txr->tx_prod;
 671        tx_buf = &txr->tx_buf_ring[prod];
 672        dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 673                         skb_headlen(skb), PCI_DMA_TODEVICE);
 674        prod = NEXT_TX(prod);
 675
 676        /* unmap remaining mapped pages */
 677        for (i = 0; i < last_frag; i++) {
 678                prod = NEXT_TX(prod);
 679                tx_buf = &txr->tx_buf_ring[prod];
 680                dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 681                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
 682                               PCI_DMA_TODEVICE);
 683        }
 684
 685tx_free:
 686        dev_kfree_skb_any(skb);
 687tx_kick_pending:
 688        if (txr->kick_pending)
 689                bnxt_txr_db_kick(bp, txr, txr->tx_prod);
 690        txr->tx_buf_ring[txr->tx_prod].skb = NULL;
 691        atomic_long_inc(&dev->tx_dropped);
 692        return NETDEV_TX_OK;
 693}
 694
 695static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 696{
 697        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
 698        struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
 699        u16 cons = txr->tx_cons;
 700        struct pci_dev *pdev = bp->pdev;
 701        int i;
 702        unsigned int tx_bytes = 0;
 703
 704        for (i = 0; i < nr_pkts; i++) {
 705                struct bnxt_sw_tx_bd *tx_buf;
 706                bool compl_deferred = false;
 707                struct sk_buff *skb;
 708                int j, last;
 709
 710                tx_buf = &txr->tx_buf_ring[cons];
 711                cons = NEXT_TX(cons);
 712                skb = tx_buf->skb;
 713                tx_buf->skb = NULL;
 714
 715                if (tx_buf->is_push) {
 716                        tx_buf->is_push = 0;
 717                        goto next_tx_int;
 718                }
 719
 720                dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
 721                                 skb_headlen(skb), PCI_DMA_TODEVICE);
 722                last = tx_buf->nr_frags;
 723
 724                for (j = 0; j < last; j++) {
 725                        cons = NEXT_TX(cons);
 726                        tx_buf = &txr->tx_buf_ring[cons];
 727                        dma_unmap_page(
 728                                &pdev->dev,
 729                                dma_unmap_addr(tx_buf, mapping),
 730                                skb_frag_size(&skb_shinfo(skb)->frags[j]),
 731                                PCI_DMA_TODEVICE);
 732                }
 733                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
 734                        if (bp->flags & BNXT_FLAG_CHIP_P5) {
 735                                if (!bnxt_get_tx_ts_p5(bp, skb))
 736                                        compl_deferred = true;
 737                                else
 738                                        atomic_inc(&bp->ptp_cfg->tx_avail);
 739                        }
 740                }
 741
 742next_tx_int:
 743                cons = NEXT_TX(cons);
 744
 745                tx_bytes += skb->len;
 746                if (!compl_deferred)
 747                        dev_kfree_skb_any(skb);
 748        }
 749
 750        netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
 751        txr->tx_cons = cons;
 752
 753        /* Need to make the tx_cons update visible to bnxt_start_xmit()
 754         * before checking for netif_tx_queue_stopped().  Without the
 755         * memory barrier, there is a small possibility that bnxt_start_xmit()
 756         * will miss it and cause the queue to be stopped forever.
 757         */
 758        smp_mb();
 759
 760        if (unlikely(netif_tx_queue_stopped(txq)) &&
 761            bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
 762            READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
 763                netif_tx_wake_queue(txq);
 764}
 765
 766static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
 767                                         struct bnxt_rx_ring_info *rxr,
 768                                         gfp_t gfp)
 769{
 770        struct device *dev = &bp->pdev->dev;
 771        struct page *page;
 772
 773        page = page_pool_dev_alloc_pages(rxr->page_pool);
 774        if (!page)
 775                return NULL;
 776
 777        *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
 778                                      DMA_ATTR_WEAK_ORDERING);
 779        if (dma_mapping_error(dev, *mapping)) {
 780                page_pool_recycle_direct(rxr->page_pool, page);
 781                return NULL;
 782        }
 783        *mapping += bp->rx_dma_offset;
 784        return page;
 785}
 786
 787static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
 788                                       gfp_t gfp)
 789{
 790        u8 *data;
 791        struct pci_dev *pdev = bp->pdev;
 792
 793        data = kmalloc(bp->rx_buf_size, gfp);
 794        if (!data)
 795                return NULL;
 796
 797        *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
 798                                        bp->rx_buf_use_size, bp->rx_dir,
 799                                        DMA_ATTR_WEAK_ORDERING);
 800
 801        if (dma_mapping_error(&pdev->dev, *mapping)) {
 802                kfree(data);
 803                data = NULL;
 804        }
 805        return data;
 806}
 807
 808int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 809                       u16 prod, gfp_t gfp)
 810{
 811        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 812        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
 813        dma_addr_t mapping;
 814
 815        if (BNXT_RX_PAGE_MODE(bp)) {
 816                struct page *page =
 817                        __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
 818
 819                if (!page)
 820                        return -ENOMEM;
 821
 822                rx_buf->data = page;
 823                rx_buf->data_ptr = page_address(page) + bp->rx_offset;
 824        } else {
 825                u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
 826
 827                if (!data)
 828                        return -ENOMEM;
 829
 830                rx_buf->data = data;
 831                rx_buf->data_ptr = data + bp->rx_offset;
 832        }
 833        rx_buf->mapping = mapping;
 834
 835        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 836        return 0;
 837}
 838
 839void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
 840{
 841        u16 prod = rxr->rx_prod;
 842        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
 843        struct rx_bd *cons_bd, *prod_bd;
 844
 845        prod_rx_buf = &rxr->rx_buf_ring[prod];
 846        cons_rx_buf = &rxr->rx_buf_ring[cons];
 847
 848        prod_rx_buf->data = data;
 849        prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
 850
 851        prod_rx_buf->mapping = cons_rx_buf->mapping;
 852
 853        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 854        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
 855
 856        prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
 857}
 858
 859static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
 860{
 861        u16 next, max = rxr->rx_agg_bmap_size;
 862
 863        next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
 864        if (next >= max)
 865                next = find_first_zero_bit(rxr->rx_agg_bmap, max);
 866        return next;
 867}
 868
 869static inline int bnxt_alloc_rx_page(struct bnxt *bp,
 870                                     struct bnxt_rx_ring_info *rxr,
 871                                     u16 prod, gfp_t gfp)
 872{
 873        struct rx_bd *rxbd =
 874                &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 875        struct bnxt_sw_rx_agg_bd *rx_agg_buf;
 876        struct pci_dev *pdev = bp->pdev;
 877        struct page *page;
 878        dma_addr_t mapping;
 879        u16 sw_prod = rxr->rx_sw_agg_prod;
 880        unsigned int offset = 0;
 881
 882        if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
 883                page = rxr->rx_page;
 884                if (!page) {
 885                        page = alloc_page(gfp);
 886                        if (!page)
 887                                return -ENOMEM;
 888                        rxr->rx_page = page;
 889                        rxr->rx_page_offset = 0;
 890                }
 891                offset = rxr->rx_page_offset;
 892                rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
 893                if (rxr->rx_page_offset == PAGE_SIZE)
 894                        rxr->rx_page = NULL;
 895                else
 896                        get_page(page);
 897        } else {
 898                page = alloc_page(gfp);
 899                if (!page)
 900                        return -ENOMEM;
 901        }
 902
 903        mapping = dma_map_page_attrs(&pdev->dev, page, offset,
 904                                     BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
 905                                     DMA_ATTR_WEAK_ORDERING);
 906        if (dma_mapping_error(&pdev->dev, mapping)) {
 907                __free_page(page);
 908                return -EIO;
 909        }
 910
 911        if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
 912                sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
 913
 914        __set_bit(sw_prod, rxr->rx_agg_bmap);
 915        rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
 916        rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 917
 918        rx_agg_buf->page = page;
 919        rx_agg_buf->offset = offset;
 920        rx_agg_buf->mapping = mapping;
 921        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
 922        rxbd->rx_bd_opaque = sw_prod;
 923        return 0;
 924}
 925
 926static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
 927                                       struct bnxt_cp_ring_info *cpr,
 928                                       u16 cp_cons, u16 curr)
 929{
 930        struct rx_agg_cmp *agg;
 931
 932        cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
 933        agg = (struct rx_agg_cmp *)
 934                &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
 935        return agg;
 936}
 937
 938static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
 939                                              struct bnxt_rx_ring_info *rxr,
 940                                              u16 agg_id, u16 curr)
 941{
 942        struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
 943
 944        return &tpa_info->agg_arr[curr];
 945}
 946
 947static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
 948                                   u16 start, u32 agg_bufs, bool tpa)
 949{
 950        struct bnxt_napi *bnapi = cpr->bnapi;
 951        struct bnxt *bp = bnapi->bp;
 952        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 953        u16 prod = rxr->rx_agg_prod;
 954        u16 sw_prod = rxr->rx_sw_agg_prod;
 955        bool p5_tpa = false;
 956        u32 i;
 957
 958        if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
 959                p5_tpa = true;
 960
 961        for (i = 0; i < agg_bufs; i++) {
 962                u16 cons;
 963                struct rx_agg_cmp *agg;
 964                struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
 965                struct rx_bd *prod_bd;
 966                struct page *page;
 967
 968                if (p5_tpa)
 969                        agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
 970                else
 971                        agg = bnxt_get_agg(bp, cpr, idx, start + i);
 972                cons = agg->rx_agg_cmp_opaque;
 973                __clear_bit(cons, rxr->rx_agg_bmap);
 974
 975                if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
 976                        sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
 977
 978                __set_bit(sw_prod, rxr->rx_agg_bmap);
 979                prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
 980                cons_rx_buf = &rxr->rx_agg_ring[cons];
 981
 982                /* It is possible for sw_prod to be equal to cons, so
 983                 * set cons_rx_buf->page to NULL first.
 984                 */
 985                page = cons_rx_buf->page;
 986                cons_rx_buf->page = NULL;
 987                prod_rx_buf->page = page;
 988                prod_rx_buf->offset = cons_rx_buf->offset;
 989
 990                prod_rx_buf->mapping = cons_rx_buf->mapping;
 991
 992                prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 993
 994                prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
 995                prod_bd->rx_bd_opaque = sw_prod;
 996
 997                prod = NEXT_RX_AGG(prod);
 998                sw_prod = NEXT_RX_AGG(sw_prod);
 999        }
1000        rxr->rx_agg_prod = prod;
1001        rxr->rx_sw_agg_prod = sw_prod;
1002}
1003
1004static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1005                                        struct bnxt_rx_ring_info *rxr,
1006                                        u16 cons, void *data, u8 *data_ptr,
1007                                        dma_addr_t dma_addr,
1008                                        unsigned int offset_and_len)
1009{
1010        unsigned int payload = offset_and_len >> 16;
1011        unsigned int len = offset_and_len & 0xffff;
1012        skb_frag_t *frag;
1013        struct page *page = data;
1014        u16 prod = rxr->rx_prod;
1015        struct sk_buff *skb;
1016        int off, err;
1017
1018        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1019        if (unlikely(err)) {
1020                bnxt_reuse_rx_data(rxr, cons, data);
1021                return NULL;
1022        }
1023        dma_addr -= bp->rx_dma_offset;
1024        dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1025                             DMA_ATTR_WEAK_ORDERING);
1026        page_pool_release_page(rxr->page_pool, page);
1027
1028        if (unlikely(!payload))
1029                payload = eth_get_headlen(bp->dev, data_ptr, len);
1030
1031        skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1032        if (!skb) {
1033                __free_page(page);
1034                return NULL;
1035        }
1036
1037        off = (void *)data_ptr - page_address(page);
1038        skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1039        memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1040               payload + NET_IP_ALIGN);
1041
1042        frag = &skb_shinfo(skb)->frags[0];
1043        skb_frag_size_sub(frag, payload);
1044        skb_frag_off_add(frag, payload);
1045        skb->data_len -= payload;
1046        skb->tail += payload;
1047
1048        return skb;
1049}
1050
1051static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1052                                   struct bnxt_rx_ring_info *rxr, u16 cons,
1053                                   void *data, u8 *data_ptr,
1054                                   dma_addr_t dma_addr,
1055                                   unsigned int offset_and_len)
1056{
1057        u16 prod = rxr->rx_prod;
1058        struct sk_buff *skb;
1059        int err;
1060
1061        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1062        if (unlikely(err)) {
1063                bnxt_reuse_rx_data(rxr, cons, data);
1064                return NULL;
1065        }
1066
1067        skb = build_skb(data, 0);
1068        dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1069                               bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1070        if (!skb) {
1071                kfree(data);
1072                return NULL;
1073        }
1074
1075        skb_reserve(skb, bp->rx_offset);
1076        skb_put(skb, offset_and_len & 0xffff);
1077        return skb;
1078}
1079
1080static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1081                                     struct bnxt_cp_ring_info *cpr,
1082                                     struct sk_buff *skb, u16 idx,
1083                                     u32 agg_bufs, bool tpa)
1084{
1085        struct bnxt_napi *bnapi = cpr->bnapi;
1086        struct pci_dev *pdev = bp->pdev;
1087        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1088        u16 prod = rxr->rx_agg_prod;
1089        bool p5_tpa = false;
1090        u32 i;
1091
1092        if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1093                p5_tpa = true;
1094
1095        for (i = 0; i < agg_bufs; i++) {
1096                u16 cons, frag_len;
1097                struct rx_agg_cmp *agg;
1098                struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1099                struct page *page;
1100                dma_addr_t mapping;
1101
1102                if (p5_tpa)
1103                        agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1104                else
1105                        agg = bnxt_get_agg(bp, cpr, idx, i);
1106                cons = agg->rx_agg_cmp_opaque;
1107                frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1108                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1109
1110                cons_rx_buf = &rxr->rx_agg_ring[cons];
1111                skb_fill_page_desc(skb, i, cons_rx_buf->page,
1112                                   cons_rx_buf->offset, frag_len);
1113                __clear_bit(cons, rxr->rx_agg_bmap);
1114
1115                /* It is possible for bnxt_alloc_rx_page() to allocate
1116                 * a sw_prod index that equals the cons index, so we
1117                 * need to clear the cons entry now.
1118                 */
1119                mapping = cons_rx_buf->mapping;
1120                page = cons_rx_buf->page;
1121                cons_rx_buf->page = NULL;
1122
1123                if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1124                        struct skb_shared_info *shinfo;
1125                        unsigned int nr_frags;
1126
1127                        shinfo = skb_shinfo(skb);
1128                        nr_frags = --shinfo->nr_frags;
1129                        __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1130
1131                        dev_kfree_skb(skb);
1132
1133                        cons_rx_buf->page = page;
1134
1135                        /* Update prod since possibly some pages have been
1136                         * allocated already.
1137                         */
1138                        rxr->rx_agg_prod = prod;
1139                        bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1140                        return NULL;
1141                }
1142
1143                dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1144                                     PCI_DMA_FROMDEVICE,
1145                                     DMA_ATTR_WEAK_ORDERING);
1146
1147                skb->data_len += frag_len;
1148                skb->len += frag_len;
1149                skb->truesize += PAGE_SIZE;
1150
1151                prod = NEXT_RX_AGG(prod);
1152        }
1153        rxr->rx_agg_prod = prod;
1154        return skb;
1155}
1156
1157static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1158                               u8 agg_bufs, u32 *raw_cons)
1159{
1160        u16 last;
1161        struct rx_agg_cmp *agg;
1162
1163        *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1164        last = RING_CMP(*raw_cons);
1165        agg = (struct rx_agg_cmp *)
1166                &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1167        return RX_AGG_CMP_VALID(agg, *raw_cons);
1168}
1169
1170static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1171                                            unsigned int len,
1172                                            dma_addr_t mapping)
1173{
1174        struct bnxt *bp = bnapi->bp;
1175        struct pci_dev *pdev = bp->pdev;
1176        struct sk_buff *skb;
1177
1178        skb = napi_alloc_skb(&bnapi->napi, len);
1179        if (!skb)
1180                return NULL;
1181
1182        dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1183                                bp->rx_dir);
1184
1185        memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1186               len + NET_IP_ALIGN);
1187
1188        dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1189                                   bp->rx_dir);
1190
1191        skb_put(skb, len);
1192        return skb;
1193}
1194
1195static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1196                           u32 *raw_cons, void *cmp)
1197{
1198        struct rx_cmp *rxcmp = cmp;
1199        u32 tmp_raw_cons = *raw_cons;
1200        u8 cmp_type, agg_bufs = 0;
1201
1202        cmp_type = RX_CMP_TYPE(rxcmp);
1203
1204        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1205                agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1206                            RX_CMP_AGG_BUFS) >>
1207                           RX_CMP_AGG_BUFS_SHIFT;
1208        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1209                struct rx_tpa_end_cmp *tpa_end = cmp;
1210
1211                if (bp->flags & BNXT_FLAG_CHIP_P5)
1212                        return 0;
1213
1214                agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1215        }
1216
1217        if (agg_bufs) {
1218                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1219                        return -EBUSY;
1220        }
1221        *raw_cons = tmp_raw_cons;
1222        return 0;
1223}
1224
1225static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1226{
1227        if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1228                return;
1229
1230        if (BNXT_PF(bp))
1231                queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1232        else
1233                schedule_delayed_work(&bp->fw_reset_task, delay);
1234}
1235
1236static void bnxt_queue_sp_work(struct bnxt *bp)
1237{
1238        if (BNXT_PF(bp))
1239                queue_work(bnxt_pf_wq, &bp->sp_task);
1240        else
1241                schedule_work(&bp->sp_task);
1242}
1243
1244static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1245{
1246        if (!rxr->bnapi->in_reset) {
1247                rxr->bnapi->in_reset = true;
1248                if (bp->flags & BNXT_FLAG_CHIP_P5)
1249                        set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1250                else
1251                        set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1252                bnxt_queue_sp_work(bp);
1253        }
1254        rxr->rx_next_cons = 0xffff;
1255}
1256
1257static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1258{
1259        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1260        u16 idx = agg_id & MAX_TPA_P5_MASK;
1261
1262        if (test_bit(idx, map->agg_idx_bmap))
1263                idx = find_first_zero_bit(map->agg_idx_bmap,
1264                                          BNXT_AGG_IDX_BMAP_SIZE);
1265        __set_bit(idx, map->agg_idx_bmap);
1266        map->agg_id_tbl[agg_id] = idx;
1267        return idx;
1268}
1269
1270static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1271{
1272        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1273
1274        __clear_bit(idx, map->agg_idx_bmap);
1275}
1276
1277static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1278{
1279        struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1280
1281        return map->agg_id_tbl[agg_id];
1282}
1283
1284static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1285                           struct rx_tpa_start_cmp *tpa_start,
1286                           struct rx_tpa_start_cmp_ext *tpa_start1)
1287{
1288        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1289        struct bnxt_tpa_info *tpa_info;
1290        u16 cons, prod, agg_id;
1291        struct rx_bd *prod_bd;
1292        dma_addr_t mapping;
1293
1294        if (bp->flags & BNXT_FLAG_CHIP_P5) {
1295                agg_id = TPA_START_AGG_ID_P5(tpa_start);
1296                agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1297        } else {
1298                agg_id = TPA_START_AGG_ID(tpa_start);
1299        }
1300        cons = tpa_start->rx_tpa_start_cmp_opaque;
1301        prod = rxr->rx_prod;
1302        cons_rx_buf = &rxr->rx_buf_ring[cons];
1303        prod_rx_buf = &rxr->rx_buf_ring[prod];
1304        tpa_info = &rxr->rx_tpa[agg_id];
1305
1306        if (unlikely(cons != rxr->rx_next_cons ||
1307                     TPA_START_ERROR(tpa_start))) {
1308                netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1309                            cons, rxr->rx_next_cons,
1310                            TPA_START_ERROR_CODE(tpa_start1));
1311                bnxt_sched_reset(bp, rxr);
1312                return;
1313        }
1314        /* Store cfa_code in tpa_info to use in tpa_end
1315         * completion processing.
1316         */
1317        tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1318        prod_rx_buf->data = tpa_info->data;
1319        prod_rx_buf->data_ptr = tpa_info->data_ptr;
1320
1321        mapping = tpa_info->mapping;
1322        prod_rx_buf->mapping = mapping;
1323
1324        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1325
1326        prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1327
1328        tpa_info->data = cons_rx_buf->data;
1329        tpa_info->data_ptr = cons_rx_buf->data_ptr;
1330        cons_rx_buf->data = NULL;
1331        tpa_info->mapping = cons_rx_buf->mapping;
1332
1333        tpa_info->len =
1334                le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1335                                RX_TPA_START_CMP_LEN_SHIFT;
1336        if (likely(TPA_START_HASH_VALID(tpa_start))) {
1337                u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1338
1339                tpa_info->hash_type = PKT_HASH_TYPE_L4;
1340                tpa_info->gso_type = SKB_GSO_TCPV4;
1341                /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1342                if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1343                        tpa_info->gso_type = SKB_GSO_TCPV6;
1344                tpa_info->rss_hash =
1345                        le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1346        } else {
1347                tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1348                tpa_info->gso_type = 0;
1349                netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1350        }
1351        tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1352        tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1353        tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1354        tpa_info->agg_count = 0;
1355
1356        rxr->rx_prod = NEXT_RX(prod);
1357        cons = NEXT_RX(cons);
1358        rxr->rx_next_cons = NEXT_RX(cons);
1359        cons_rx_buf = &rxr->rx_buf_ring[cons];
1360
1361        bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1362        rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1363        cons_rx_buf->data = NULL;
1364}
1365
1366static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1367{
1368        if (agg_bufs)
1369                bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1370}
1371
1372#ifdef CONFIG_INET
1373static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1374{
1375        struct udphdr *uh = NULL;
1376
1377        if (ip_proto == htons(ETH_P_IP)) {
1378                struct iphdr *iph = (struct iphdr *)skb->data;
1379
1380                if (iph->protocol == IPPROTO_UDP)
1381                        uh = (struct udphdr *)(iph + 1);
1382        } else {
1383                struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1384
1385                if (iph->nexthdr == IPPROTO_UDP)
1386                        uh = (struct udphdr *)(iph + 1);
1387        }
1388        if (uh) {
1389                if (uh->check)
1390                        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1391                else
1392                        skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1393        }
1394}
1395#endif
1396
1397static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1398                                           int payload_off, int tcp_ts,
1399                                           struct sk_buff *skb)
1400{
1401#ifdef CONFIG_INET
1402        struct tcphdr *th;
1403        int len, nw_off;
1404        u16 outer_ip_off, inner_ip_off, inner_mac_off;
1405        u32 hdr_info = tpa_info->hdr_info;
1406        bool loopback = false;
1407
1408        inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1409        inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1410        outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1411
1412        /* If the packet is an internal loopback packet, the offsets will
1413         * have an extra 4 bytes.
1414         */
1415        if (inner_mac_off == 4) {
1416                loopback = true;
1417        } else if (inner_mac_off > 4) {
1418                __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1419                                            ETH_HLEN - 2));
1420
1421                /* We only support inner iPv4/ipv6.  If we don't see the
1422                 * correct protocol ID, it must be a loopback packet where
1423                 * the offsets are off by 4.
1424                 */
1425                if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1426                        loopback = true;
1427        }
1428        if (loopback) {
1429                /* internal loopback packet, subtract all offsets by 4 */
1430                inner_ip_off -= 4;
1431                inner_mac_off -= 4;
1432                outer_ip_off -= 4;
1433        }
1434
1435        nw_off = inner_ip_off - ETH_HLEN;
1436        skb_set_network_header(skb, nw_off);
1437        if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1438                struct ipv6hdr *iph = ipv6_hdr(skb);
1439
1440                skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1441                len = skb->len - skb_transport_offset(skb);
1442                th = tcp_hdr(skb);
1443                th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1444        } else {
1445                struct iphdr *iph = ip_hdr(skb);
1446
1447                skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1448                len = skb->len - skb_transport_offset(skb);
1449                th = tcp_hdr(skb);
1450                th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1451        }
1452
1453        if (inner_mac_off) { /* tunnel */
1454                __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1455                                            ETH_HLEN - 2));
1456
1457                bnxt_gro_tunnel(skb, proto);
1458        }
1459#endif
1460        return skb;
1461}
1462
1463static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1464                                           int payload_off, int tcp_ts,
1465                                           struct sk_buff *skb)
1466{
1467#ifdef CONFIG_INET
1468        u16 outer_ip_off, inner_ip_off, inner_mac_off;
1469        u32 hdr_info = tpa_info->hdr_info;
1470        int iphdr_len, nw_off;
1471
1472        inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1473        inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1474        outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1475
1476        nw_off = inner_ip_off - ETH_HLEN;
1477        skb_set_network_header(skb, nw_off);
1478        iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1479                     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1480        skb_set_transport_header(skb, nw_off + iphdr_len);
1481
1482        if (inner_mac_off) { /* tunnel */
1483                __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1484                                            ETH_HLEN - 2));
1485
1486                bnxt_gro_tunnel(skb, proto);
1487        }
1488#endif
1489        return skb;
1490}
1491
1492#define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1493#define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1494
1495static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1496                                           int payload_off, int tcp_ts,
1497                                           struct sk_buff *skb)
1498{
1499#ifdef CONFIG_INET
1500        struct tcphdr *th;
1501        int len, nw_off, tcp_opt_len = 0;
1502
1503        if (tcp_ts)
1504                tcp_opt_len = 12;
1505
1506        if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1507                struct iphdr *iph;
1508
1509                nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1510                         ETH_HLEN;
1511                skb_set_network_header(skb, nw_off);
1512                iph = ip_hdr(skb);
1513                skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1514                len = skb->len - skb_transport_offset(skb);
1515                th = tcp_hdr(skb);
1516                th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1517        } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1518                struct ipv6hdr *iph;
1519
1520                nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1521                         ETH_HLEN;
1522                skb_set_network_header(skb, nw_off);
1523                iph = ipv6_hdr(skb);
1524                skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1525                len = skb->len - skb_transport_offset(skb);
1526                th = tcp_hdr(skb);
1527                th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1528        } else {
1529                dev_kfree_skb_any(skb);
1530                return NULL;
1531        }
1532
1533        if (nw_off) /* tunnel */
1534                bnxt_gro_tunnel(skb, skb->protocol);
1535#endif
1536        return skb;
1537}
1538
1539static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1540                                           struct bnxt_tpa_info *tpa_info,
1541                                           struct rx_tpa_end_cmp *tpa_end,
1542                                           struct rx_tpa_end_cmp_ext *tpa_end1,
1543                                           struct sk_buff *skb)
1544{
1545#ifdef CONFIG_INET
1546        int payload_off;
1547        u16 segs;
1548
1549        segs = TPA_END_TPA_SEGS(tpa_end);
1550        if (segs == 1)
1551                return skb;
1552
1553        NAPI_GRO_CB(skb)->count = segs;
1554        skb_shinfo(skb)->gso_size =
1555                le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1556        skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1557        if (bp->flags & BNXT_FLAG_CHIP_P5)
1558                payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1559        else
1560                payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1561        skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1562        if (likely(skb))
1563                tcp_gro_complete(skb);
1564#endif
1565        return skb;
1566}
1567
1568/* Given the cfa_code of a received packet determine which
1569 * netdev (vf-rep or PF) the packet is destined to.
1570 */
1571static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1572{
1573        struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1574
1575        /* if vf-rep dev is NULL, the must belongs to the PF */
1576        return dev ? dev : bp->dev;
1577}
1578
1579static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1580                                           struct bnxt_cp_ring_info *cpr,
1581                                           u32 *raw_cons,
1582                                           struct rx_tpa_end_cmp *tpa_end,
1583                                           struct rx_tpa_end_cmp_ext *tpa_end1,
1584                                           u8 *event)
1585{
1586        struct bnxt_napi *bnapi = cpr->bnapi;
1587        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1588        u8 *data_ptr, agg_bufs;
1589        unsigned int len;
1590        struct bnxt_tpa_info *tpa_info;
1591        dma_addr_t mapping;
1592        struct sk_buff *skb;
1593        u16 idx = 0, agg_id;
1594        void *data;
1595        bool gro;
1596
1597        if (unlikely(bnapi->in_reset)) {
1598                int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1599
1600                if (rc < 0)
1601                        return ERR_PTR(-EBUSY);
1602                return NULL;
1603        }
1604
1605        if (bp->flags & BNXT_FLAG_CHIP_P5) {
1606                agg_id = TPA_END_AGG_ID_P5(tpa_end);
1607                agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1608                agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1609                tpa_info = &rxr->rx_tpa[agg_id];
1610                if (unlikely(agg_bufs != tpa_info->agg_count)) {
1611                        netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1612                                    agg_bufs, tpa_info->agg_count);
1613                        agg_bufs = tpa_info->agg_count;
1614                }
1615                tpa_info->agg_count = 0;
1616                *event |= BNXT_AGG_EVENT;
1617                bnxt_free_agg_idx(rxr, agg_id);
1618                idx = agg_id;
1619                gro = !!(bp->flags & BNXT_FLAG_GRO);
1620        } else {
1621                agg_id = TPA_END_AGG_ID(tpa_end);
1622                agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1623                tpa_info = &rxr->rx_tpa[agg_id];
1624                idx = RING_CMP(*raw_cons);
1625                if (agg_bufs) {
1626                        if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1627                                return ERR_PTR(-EBUSY);
1628
1629                        *event |= BNXT_AGG_EVENT;
1630                        idx = NEXT_CMP(idx);
1631                }
1632                gro = !!TPA_END_GRO(tpa_end);
1633        }
1634        data = tpa_info->data;
1635        data_ptr = tpa_info->data_ptr;
1636        prefetch(data_ptr);
1637        len = tpa_info->len;
1638        mapping = tpa_info->mapping;
1639
1640        if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1641                bnxt_abort_tpa(cpr, idx, agg_bufs);
1642                if (agg_bufs > MAX_SKB_FRAGS)
1643                        netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1644                                    agg_bufs, (int)MAX_SKB_FRAGS);
1645                return NULL;
1646        }
1647
1648        if (len <= bp->rx_copy_thresh) {
1649                skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1650                if (!skb) {
1651                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1652                        return NULL;
1653                }
1654        } else {
1655                u8 *new_data;
1656                dma_addr_t new_mapping;
1657
1658                new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1659                if (!new_data) {
1660                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1661                        return NULL;
1662                }
1663
1664                tpa_info->data = new_data;
1665                tpa_info->data_ptr = new_data + bp->rx_offset;
1666                tpa_info->mapping = new_mapping;
1667
1668                skb = build_skb(data, 0);
1669                dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1670                                       bp->rx_buf_use_size, bp->rx_dir,
1671                                       DMA_ATTR_WEAK_ORDERING);
1672
1673                if (!skb) {
1674                        kfree(data);
1675                        bnxt_abort_tpa(cpr, idx, agg_bufs);
1676                        return NULL;
1677                }
1678                skb_reserve(skb, bp->rx_offset);
1679                skb_put(skb, len);
1680        }
1681
1682        if (agg_bufs) {
1683                skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1684                if (!skb) {
1685                        /* Page reuse already handled by bnxt_rx_pages(). */
1686                        return NULL;
1687                }
1688        }
1689
1690        skb->protocol =
1691                eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1692
1693        if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1694                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1695
1696        if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1697            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1698                __be16 vlan_proto = htons(tpa_info->metadata >>
1699                                          RX_CMP_FLAGS2_METADATA_TPID_SFT);
1700                u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1701
1702                if (eth_type_vlan(vlan_proto)) {
1703                        __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1704                } else {
1705                        dev_kfree_skb(skb);
1706                        return NULL;
1707                }
1708        }
1709
1710        skb_checksum_none_assert(skb);
1711        if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1712                skb->ip_summed = CHECKSUM_UNNECESSARY;
1713                skb->csum_level =
1714                        (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1715        }
1716
1717        if (gro)
1718                skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1719
1720        return skb;
1721}
1722
1723static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1724                         struct rx_agg_cmp *rx_agg)
1725{
1726        u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1727        struct bnxt_tpa_info *tpa_info;
1728
1729        agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1730        tpa_info = &rxr->rx_tpa[agg_id];
1731        BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1732        tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1733}
1734
1735static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1736                             struct sk_buff *skb)
1737{
1738        if (skb->dev != bp->dev) {
1739                /* this packet belongs to a vf-rep */
1740                bnxt_vf_rep_rx(bp, skb);
1741                return;
1742        }
1743        skb_record_rx_queue(skb, bnapi->index);
1744        napi_gro_receive(&bnapi->napi, skb);
1745}
1746
1747/* returns the following:
1748 * 1       - 1 packet successfully received
1749 * 0       - successful TPA_START, packet not completed yet
1750 * -EBUSY  - completion ring does not have all the agg buffers yet
1751 * -ENOMEM - packet aborted due to out of memory
1752 * -EIO    - packet aborted due to hw error indicated in BD
1753 */
1754static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1755                       u32 *raw_cons, u8 *event)
1756{
1757        struct bnxt_napi *bnapi = cpr->bnapi;
1758        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1759        struct net_device *dev = bp->dev;
1760        struct rx_cmp *rxcmp;
1761        struct rx_cmp_ext *rxcmp1;
1762        u32 tmp_raw_cons = *raw_cons;
1763        u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1764        struct bnxt_sw_rx_bd *rx_buf;
1765        unsigned int len;
1766        u8 *data_ptr, agg_bufs, cmp_type;
1767        dma_addr_t dma_addr;
1768        struct sk_buff *skb;
1769        u32 flags, misc;
1770        void *data;
1771        int rc = 0;
1772
1773        rxcmp = (struct rx_cmp *)
1774                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1775
1776        cmp_type = RX_CMP_TYPE(rxcmp);
1777
1778        if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1779                bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1780                goto next_rx_no_prod_no_len;
1781        }
1782
1783        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1784        cp_cons = RING_CMP(tmp_raw_cons);
1785        rxcmp1 = (struct rx_cmp_ext *)
1786                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1787
1788        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1789                return -EBUSY;
1790
1791        /* The valid test of the entry must be done first before
1792         * reading any further.
1793         */
1794        dma_rmb();
1795        prod = rxr->rx_prod;
1796
1797        if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1798                bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1799                               (struct rx_tpa_start_cmp_ext *)rxcmp1);
1800
1801                *event |= BNXT_RX_EVENT;
1802                goto next_rx_no_prod_no_len;
1803
1804        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1805                skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1806                                   (struct rx_tpa_end_cmp *)rxcmp,
1807                                   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1808
1809                if (IS_ERR(skb))
1810                        return -EBUSY;
1811
1812                rc = -ENOMEM;
1813                if (likely(skb)) {
1814                        bnxt_deliver_skb(bp, bnapi, skb);
1815                        rc = 1;
1816                }
1817                *event |= BNXT_RX_EVENT;
1818                goto next_rx_no_prod_no_len;
1819        }
1820
1821        cons = rxcmp->rx_cmp_opaque;
1822        if (unlikely(cons != rxr->rx_next_cons)) {
1823                int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1824
1825                /* 0xffff is forced error, don't print it */
1826                if (rxr->rx_next_cons != 0xffff)
1827                        netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1828                                    cons, rxr->rx_next_cons);
1829                bnxt_sched_reset(bp, rxr);
1830                if (rc1)
1831                        return rc1;
1832                goto next_rx_no_prod_no_len;
1833        }
1834        rx_buf = &rxr->rx_buf_ring[cons];
1835        data = rx_buf->data;
1836        data_ptr = rx_buf->data_ptr;
1837        prefetch(data_ptr);
1838
1839        misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1840        agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1841
1842        if (agg_bufs) {
1843                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1844                        return -EBUSY;
1845
1846                cp_cons = NEXT_CMP(cp_cons);
1847                *event |= BNXT_AGG_EVENT;
1848        }
1849        *event |= BNXT_RX_EVENT;
1850
1851        rx_buf->data = NULL;
1852        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1853                u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1854
1855                bnxt_reuse_rx_data(rxr, cons, data);
1856                if (agg_bufs)
1857                        bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1858                                               false);
1859
1860                rc = -EIO;
1861                if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1862                        bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1863                        if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1864                            !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1865                                netdev_warn_once(bp->dev, "RX buffer error %x\n",
1866                                                 rx_err);
1867                                bnxt_sched_reset(bp, rxr);
1868                        }
1869                }
1870                goto next_rx_no_len;
1871        }
1872
1873        flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1874        len = flags >> RX_CMP_LEN_SHIFT;
1875        dma_addr = rx_buf->mapping;
1876
1877        if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1878                rc = 1;
1879                goto next_rx;
1880        }
1881
1882        if (len <= bp->rx_copy_thresh) {
1883                skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1884                bnxt_reuse_rx_data(rxr, cons, data);
1885                if (!skb) {
1886                        if (agg_bufs)
1887                                bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1888                                                       agg_bufs, false);
1889                        rc = -ENOMEM;
1890                        goto next_rx;
1891                }
1892        } else {
1893                u32 payload;
1894
1895                if (rx_buf->data_ptr == data_ptr)
1896                        payload = misc & RX_CMP_PAYLOAD_OFFSET;
1897                else
1898                        payload = 0;
1899                skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1900                                      payload | len);
1901                if (!skb) {
1902                        rc = -ENOMEM;
1903                        goto next_rx;
1904                }
1905        }
1906
1907        if (agg_bufs) {
1908                skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1909                if (!skb) {
1910                        rc = -ENOMEM;
1911                        goto next_rx;
1912                }
1913        }
1914
1915        if (RX_CMP_HASH_VALID(rxcmp)) {
1916                u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1917                enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1918
1919                /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1920                if (hash_type != 1 && hash_type != 3)
1921                        type = PKT_HASH_TYPE_L3;
1922                skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1923        }
1924
1925        cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1926        skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1927
1928        if ((rxcmp1->rx_cmp_flags2 &
1929             cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1930            (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1931                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1932                u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1933                __be16 vlan_proto = htons(meta_data >>
1934                                          RX_CMP_FLAGS2_METADATA_TPID_SFT);
1935
1936                if (eth_type_vlan(vlan_proto)) {
1937                        __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1938                } else {
1939                        dev_kfree_skb(skb);
1940                        goto next_rx;
1941                }
1942        }
1943
1944        skb_checksum_none_assert(skb);
1945        if (RX_CMP_L4_CS_OK(rxcmp1)) {
1946                if (dev->features & NETIF_F_RXCSUM) {
1947                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1948                        skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1949                }
1950        } else {
1951                if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1952                        if (dev->features & NETIF_F_RXCSUM)
1953                                bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1954                }
1955        }
1956
1957        if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
1958                     RX_CMP_FLAGS_ITYPE_PTP_W_TS)) {
1959                if (bp->flags & BNXT_FLAG_CHIP_P5) {
1960                        u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
1961                        u64 ns, ts;
1962
1963                        if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
1964                                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
1965
1966                                spin_lock_bh(&ptp->ptp_lock);
1967                                ns = timecounter_cyc2time(&ptp->tc, ts);
1968                                spin_unlock_bh(&ptp->ptp_lock);
1969                                memset(skb_hwtstamps(skb), 0,
1970                                       sizeof(*skb_hwtstamps(skb)));
1971                                skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
1972                        }
1973                }
1974        }
1975        bnxt_deliver_skb(bp, bnapi, skb);
1976        rc = 1;
1977
1978next_rx:
1979        cpr->rx_packets += 1;
1980        cpr->rx_bytes += len;
1981
1982next_rx_no_len:
1983        rxr->rx_prod = NEXT_RX(prod);
1984        rxr->rx_next_cons = NEXT_RX(cons);
1985
1986next_rx_no_prod_no_len:
1987        *raw_cons = tmp_raw_cons;
1988
1989        return rc;
1990}
1991
1992/* In netpoll mode, if we are using a combined completion ring, we need to
1993 * discard the rx packets and recycle the buffers.
1994 */
1995static int bnxt_force_rx_discard(struct bnxt *bp,
1996                                 struct bnxt_cp_ring_info *cpr,
1997                                 u32 *raw_cons, u8 *event)
1998{
1999        u32 tmp_raw_cons = *raw_cons;
2000        struct rx_cmp_ext *rxcmp1;
2001        struct rx_cmp *rxcmp;
2002        u16 cp_cons;
2003        u8 cmp_type;
2004
2005        cp_cons = RING_CMP(tmp_raw_cons);
2006        rxcmp = (struct rx_cmp *)
2007                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2008
2009        tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2010        cp_cons = RING_CMP(tmp_raw_cons);
2011        rxcmp1 = (struct rx_cmp_ext *)
2012                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2013
2014        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2015                return -EBUSY;
2016
2017        /* The valid test of the entry must be done first before
2018         * reading any further.
2019         */
2020        dma_rmb();
2021        cmp_type = RX_CMP_TYPE(rxcmp);
2022        if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2023                rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2024                        cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2025        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2026                struct rx_tpa_end_cmp_ext *tpa_end1;
2027
2028                tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2029                tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2030                        cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2031        }
2032        return bnxt_rx_pkt(bp, cpr, raw_cons, event);
2033}
2034
2035u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2036{
2037        struct bnxt_fw_health *fw_health = bp->fw_health;
2038        u32 reg = fw_health->regs[reg_idx];
2039        u32 reg_type, reg_off, val = 0;
2040
2041        reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2042        reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2043        switch (reg_type) {
2044        case BNXT_FW_HEALTH_REG_TYPE_CFG:
2045                pci_read_config_dword(bp->pdev, reg_off, &val);
2046                break;
2047        case BNXT_FW_HEALTH_REG_TYPE_GRC:
2048                reg_off = fw_health->mapped_regs[reg_idx];
2049                fallthrough;
2050        case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2051                val = readl(bp->bar0 + reg_off);
2052                break;
2053        case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2054                val = readl(bp->bar1 + reg_off);
2055                break;
2056        }
2057        if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2058                val &= fw_health->fw_reset_inprog_reg_mask;
2059        return val;
2060}
2061
2062static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2063{
2064        int i;
2065
2066        for (i = 0; i < bp->rx_nr_rings; i++) {
2067                u16 grp_idx = bp->rx_ring[i].bnapi->index;
2068                struct bnxt_ring_grp_info *grp_info;
2069
2070                grp_info = &bp->grp_info[grp_idx];
2071                if (grp_info->agg_fw_ring_id == ring_id)
2072                        return grp_idx;
2073        }
2074        return INVALID_HW_RING_ID;
2075}
2076
2077#define BNXT_GET_EVENT_PORT(data)       \
2078        ((data) &                       \
2079         ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2080
2081#define BNXT_EVENT_RING_TYPE(data2)     \
2082        ((data2) &                      \
2083         ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2084
2085#define BNXT_EVENT_RING_TYPE_RX(data2)  \
2086        (BNXT_EVENT_RING_TYPE(data2) == \
2087         ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2088
2089static int bnxt_async_event_process(struct bnxt *bp,
2090                                    struct hwrm_async_event_cmpl *cmpl)
2091{
2092        u16 event_id = le16_to_cpu(cmpl->event_id);
2093        u32 data1 = le32_to_cpu(cmpl->event_data1);
2094        u32 data2 = le32_to_cpu(cmpl->event_data2);
2095
2096        /* TODO CHIMP_FW: Define event id's for link change, error etc */
2097        switch (event_id) {
2098        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2099                struct bnxt_link_info *link_info = &bp->link_info;
2100
2101                if (BNXT_VF(bp))
2102                        goto async_event_process_exit;
2103
2104                /* print unsupported speed warning in forced speed mode only */
2105                if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2106                    (data1 & 0x20000)) {
2107                        u16 fw_speed = link_info->force_link_speed;
2108                        u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2109
2110                        if (speed != SPEED_UNKNOWN)
2111                                netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2112                                            speed);
2113                }
2114                set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2115        }
2116                fallthrough;
2117        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2118        case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2119                set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2120                fallthrough;
2121        case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2122                set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2123                break;
2124        case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2125                set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2126                break;
2127        case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2128                u16 port_id = BNXT_GET_EVENT_PORT(data1);
2129
2130                if (BNXT_VF(bp))
2131                        break;
2132
2133                if (bp->pf.port_id != port_id)
2134                        break;
2135
2136                set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2137                break;
2138        }
2139        case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2140                if (BNXT_PF(bp))
2141                        goto async_event_process_exit;
2142                set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2143                break;
2144        case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2145                char *fatal_str = "non-fatal";
2146
2147                if (!bp->fw_health)
2148                        goto async_event_process_exit;
2149
2150                bp->fw_reset_timestamp = jiffies;
2151                bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2152                if (!bp->fw_reset_min_dsecs)
2153                        bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2154                bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2155                if (!bp->fw_reset_max_dsecs)
2156                        bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2157                if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2158                        fatal_str = "fatal";
2159                        set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2160                }
2161                netif_warn(bp, hw, bp->dev,
2162                           "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2163                           fatal_str, data1, data2,
2164                           bp->fw_reset_min_dsecs * 100,
2165                           bp->fw_reset_max_dsecs * 100);
2166                set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2167                break;
2168        }
2169        case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2170                struct bnxt_fw_health *fw_health = bp->fw_health;
2171
2172                if (!fw_health)
2173                        goto async_event_process_exit;
2174
2175                fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2176                fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2177                if (!fw_health->enabled) {
2178                        netif_info(bp, drv, bp->dev,
2179                                   "Error recovery info: error recovery[0]\n");
2180                        break;
2181                }
2182                fw_health->tmr_multiplier =
2183                        DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2184                                     bp->current_interval * 10);
2185                fw_health->tmr_counter = fw_health->tmr_multiplier;
2186                fw_health->last_fw_heartbeat =
2187                        bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2188                fw_health->last_fw_reset_cnt =
2189                        bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2190                netif_info(bp, drv, bp->dev,
2191                           "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2192                           fw_health->master, fw_health->last_fw_reset_cnt,
2193                           bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2194                goto async_event_process_exit;
2195        }
2196        case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2197                netif_notice(bp, hw, bp->dev,
2198                             "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2199                             data1, data2);
2200                goto async_event_process_exit;
2201        case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2202                struct bnxt_rx_ring_info *rxr;
2203                u16 grp_idx;
2204
2205                if (bp->flags & BNXT_FLAG_CHIP_P5)
2206                        goto async_event_process_exit;
2207
2208                netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2209                            BNXT_EVENT_RING_TYPE(data2), data1);
2210                if (!BNXT_EVENT_RING_TYPE_RX(data2))
2211                        goto async_event_process_exit;
2212
2213                grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2214                if (grp_idx == INVALID_HW_RING_ID) {
2215                        netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2216                                    data1);
2217                        goto async_event_process_exit;
2218                }
2219                rxr = bp->bnapi[grp_idx]->rx_ring;
2220                bnxt_sched_reset(bp, rxr);
2221                goto async_event_process_exit;
2222        }
2223        case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2224                struct bnxt_fw_health *fw_health = bp->fw_health;
2225
2226                netif_notice(bp, hw, bp->dev,
2227                             "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2228                             data1, data2);
2229                if (fw_health) {
2230                        fw_health->echo_req_data1 = data1;
2231                        fw_health->echo_req_data2 = data2;
2232                        set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2233                        break;
2234                }
2235                goto async_event_process_exit;
2236        }
2237        default:
2238                goto async_event_process_exit;
2239        }
2240        bnxt_queue_sp_work(bp);
2241async_event_process_exit:
2242        bnxt_ulp_async_events(bp, cmpl);
2243        return 0;
2244}
2245
2246static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2247{
2248        u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2249        struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2250        struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2251                                (struct hwrm_fwd_req_cmpl *)txcmp;
2252
2253        switch (cmpl_type) {
2254        case CMPL_BASE_TYPE_HWRM_DONE:
2255                seq_id = le16_to_cpu(h_cmpl->sequence_id);
2256                if (seq_id == bp->hwrm_intr_seq_id)
2257                        bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2258                else
2259                        netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2260                break;
2261
2262        case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2263                vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2264
2265                if ((vf_id < bp->pf.first_vf_id) ||
2266                    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2267                        netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2268                                   vf_id);
2269                        return -EINVAL;
2270                }
2271
2272                set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2273                set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2274                bnxt_queue_sp_work(bp);
2275                break;
2276
2277        case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2278                bnxt_async_event_process(bp,
2279                                         (struct hwrm_async_event_cmpl *)txcmp);
2280                break;
2281
2282        default:
2283                break;
2284        }
2285
2286        return 0;
2287}
2288
2289static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2290{
2291        struct bnxt_napi *bnapi = dev_instance;
2292        struct bnxt *bp = bnapi->bp;
2293        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2294        u32 cons = RING_CMP(cpr->cp_raw_cons);
2295
2296        cpr->event_ctr++;
2297        prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2298        napi_schedule(&bnapi->napi);
2299        return IRQ_HANDLED;
2300}
2301
2302static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2303{
2304        u32 raw_cons = cpr->cp_raw_cons;
2305        u16 cons = RING_CMP(raw_cons);
2306        struct tx_cmp *txcmp;
2307
2308        txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2309
2310        return TX_CMP_VALID(txcmp, raw_cons);
2311}
2312
2313static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2314{
2315        struct bnxt_napi *bnapi = dev_instance;
2316        struct bnxt *bp = bnapi->bp;
2317        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2318        u32 cons = RING_CMP(cpr->cp_raw_cons);
2319        u32 int_status;
2320
2321        prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2322
2323        if (!bnxt_has_work(bp, cpr)) {
2324                int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2325                /* return if erroneous interrupt */
2326                if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2327                        return IRQ_NONE;
2328        }
2329
2330        /* disable ring IRQ */
2331        BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2332
2333        /* Return here if interrupt is shared and is disabled. */
2334        if (unlikely(atomic_read(&bp->intr_sem) != 0))
2335                return IRQ_HANDLED;
2336
2337        napi_schedule(&bnapi->napi);
2338        return IRQ_HANDLED;
2339}
2340
2341static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2342                            int budget)
2343{
2344        struct bnxt_napi *bnapi = cpr->bnapi;
2345        u32 raw_cons = cpr->cp_raw_cons;
2346        u32 cons;
2347        int tx_pkts = 0;
2348        int rx_pkts = 0;
2349        u8 event = 0;
2350        struct tx_cmp *txcmp;
2351
2352        cpr->has_more_work = 0;
2353        cpr->had_work_done = 1;
2354        while (1) {
2355                int rc;
2356
2357                cons = RING_CMP(raw_cons);
2358                txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2359
2360                if (!TX_CMP_VALID(txcmp, raw_cons))
2361                        break;
2362
2363                /* The valid test of the entry must be done first before
2364                 * reading any further.
2365                 */
2366                dma_rmb();
2367                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2368                        tx_pkts++;
2369                        /* return full budget so NAPI will complete. */
2370                        if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2371                                rx_pkts = budget;
2372                                raw_cons = NEXT_RAW_CMP(raw_cons);
2373                                if (budget)
2374                                        cpr->has_more_work = 1;
2375                                break;
2376                        }
2377                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2378                        if (likely(budget))
2379                                rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2380                        else
2381                                rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2382                                                           &event);
2383                        if (likely(rc >= 0))
2384                                rx_pkts += rc;
2385                        /* Increment rx_pkts when rc is -ENOMEM to count towards
2386                         * the NAPI budget.  Otherwise, we may potentially loop
2387                         * here forever if we consistently cannot allocate
2388                         * buffers.
2389                         */
2390                        else if (rc == -ENOMEM && budget)
2391                                rx_pkts++;
2392                        else if (rc == -EBUSY)  /* partial completion */
2393                                break;
2394                } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2395                                     CMPL_BASE_TYPE_HWRM_DONE) ||
2396                                    (TX_CMP_TYPE(txcmp) ==
2397                                     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2398                                    (TX_CMP_TYPE(txcmp) ==
2399                                     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2400                        bnxt_hwrm_handler(bp, txcmp);
2401                }
2402                raw_cons = NEXT_RAW_CMP(raw_cons);
2403
2404                if (rx_pkts && rx_pkts == budget) {
2405                        cpr->has_more_work = 1;
2406                        break;
2407                }
2408        }
2409
2410        if (event & BNXT_REDIRECT_EVENT)
2411                xdp_do_flush_map();
2412
2413        if (event & BNXT_TX_EVENT) {
2414                struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2415                u16 prod = txr->tx_prod;
2416
2417                /* Sync BD data before updating doorbell */
2418                wmb();
2419
2420                bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2421        }
2422
2423        cpr->cp_raw_cons = raw_cons;
2424        bnapi->tx_pkts += tx_pkts;
2425        bnapi->events |= event;
2426        return rx_pkts;
2427}
2428
2429static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2430{
2431        if (bnapi->tx_pkts) {
2432                bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2433                bnapi->tx_pkts = 0;
2434        }
2435
2436        if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2437                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2438
2439                if (bnapi->events & BNXT_AGG_EVENT)
2440                        bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2441                bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2442        }
2443        bnapi->events = 0;
2444}
2445
2446static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2447                          int budget)
2448{
2449        struct bnxt_napi *bnapi = cpr->bnapi;
2450        int rx_pkts;
2451
2452        rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2453
2454        /* ACK completion ring before freeing tx ring and producing new
2455         * buffers in rx/agg rings to prevent overflowing the completion
2456         * ring.
2457         */
2458        bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2459
2460        __bnxt_poll_work_done(bp, bnapi);
2461        return rx_pkts;
2462}
2463
2464static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2465{
2466        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2467        struct bnxt *bp = bnapi->bp;
2468        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2469        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2470        struct tx_cmp *txcmp;
2471        struct rx_cmp_ext *rxcmp1;
2472        u32 cp_cons, tmp_raw_cons;
2473        u32 raw_cons = cpr->cp_raw_cons;
2474        u32 rx_pkts = 0;
2475        u8 event = 0;
2476
2477        while (1) {
2478                int rc;
2479
2480                cp_cons = RING_CMP(raw_cons);
2481                txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2482
2483                if (!TX_CMP_VALID(txcmp, raw_cons))
2484                        break;
2485
2486                /* The valid test of the entry must be done first before
2487                 * reading any further.
2488                 */
2489                dma_rmb();
2490                if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2491                        tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2492                        cp_cons = RING_CMP(tmp_raw_cons);
2493                        rxcmp1 = (struct rx_cmp_ext *)
2494                          &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2495
2496                        if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2497                                break;
2498
2499                        /* force an error to recycle the buffer */
2500                        rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2501                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2502
2503                        rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2504                        if (likely(rc == -EIO) && budget)
2505                                rx_pkts++;
2506                        else if (rc == -EBUSY)  /* partial completion */
2507                                break;
2508                } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2509                                    CMPL_BASE_TYPE_HWRM_DONE)) {
2510                        bnxt_hwrm_handler(bp, txcmp);
2511                } else {
2512                        netdev_err(bp->dev,
2513                                   "Invalid completion received on special ring\n");
2514                }
2515                raw_cons = NEXT_RAW_CMP(raw_cons);
2516
2517                if (rx_pkts == budget)
2518                        break;
2519        }
2520
2521        cpr->cp_raw_cons = raw_cons;
2522        BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2523        bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2524
2525        if (event & BNXT_AGG_EVENT)
2526                bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2527
2528        if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2529                napi_complete_done(napi, rx_pkts);
2530                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2531        }
2532        return rx_pkts;
2533}
2534
2535static int bnxt_poll(struct napi_struct *napi, int budget)
2536{
2537        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2538        struct bnxt *bp = bnapi->bp;
2539        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2540        int work_done = 0;
2541
2542        if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2543                napi_complete(napi);
2544                return 0;
2545        }
2546        while (1) {
2547                work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2548
2549                if (work_done >= budget) {
2550                        if (!budget)
2551                                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2552                        break;
2553                }
2554
2555                if (!bnxt_has_work(bp, cpr)) {
2556                        if (napi_complete_done(napi, work_done))
2557                                BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2558                        break;
2559                }
2560        }
2561        if (bp->flags & BNXT_FLAG_DIM) {
2562                struct dim_sample dim_sample = {};
2563
2564                dim_update_sample(cpr->event_ctr,
2565                                  cpr->rx_packets,
2566                                  cpr->rx_bytes,
2567                                  &dim_sample);
2568                net_dim(&cpr->dim, dim_sample);
2569        }
2570        return work_done;
2571}
2572
2573static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2574{
2575        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2576        int i, work_done = 0;
2577
2578        for (i = 0; i < 2; i++) {
2579                struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2580
2581                if (cpr2) {
2582                        work_done += __bnxt_poll_work(bp, cpr2,
2583                                                      budget - work_done);
2584                        cpr->has_more_work |= cpr2->has_more_work;
2585                }
2586        }
2587        return work_done;
2588}
2589
2590static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2591                                 u64 dbr_type)
2592{
2593        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2594        int i;
2595
2596        for (i = 0; i < 2; i++) {
2597                struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2598                struct bnxt_db_info *db;
2599
2600                if (cpr2 && cpr2->had_work_done) {
2601                        db = &cpr2->cp_db;
2602                        writeq(db->db_key64 | dbr_type |
2603                               RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2604                        cpr2->had_work_done = 0;
2605                }
2606        }
2607        __bnxt_poll_work_done(bp, bnapi);
2608}
2609
2610static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2611{
2612        struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2613        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2614        u32 raw_cons = cpr->cp_raw_cons;
2615        struct bnxt *bp = bnapi->bp;
2616        struct nqe_cn *nqcmp;
2617        int work_done = 0;
2618        u32 cons;
2619
2620        if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2621                napi_complete(napi);
2622                return 0;
2623        }
2624        if (cpr->has_more_work) {
2625                cpr->has_more_work = 0;
2626                work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2627        }
2628        while (1) {
2629                cons = RING_CMP(raw_cons);
2630                nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2631
2632                if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2633                        if (cpr->has_more_work)
2634                                break;
2635
2636                        __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2637                        cpr->cp_raw_cons = raw_cons;
2638                        if (napi_complete_done(napi, work_done))
2639                                BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2640                                                  cpr->cp_raw_cons);
2641                        return work_done;
2642                }
2643
2644                /* The valid test of the entry must be done first before
2645                 * reading any further.
2646                 */
2647                dma_rmb();
2648
2649                if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2650                        u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2651                        struct bnxt_cp_ring_info *cpr2;
2652
2653                        cpr2 = cpr->cp_ring_arr[idx];
2654                        work_done += __bnxt_poll_work(bp, cpr2,
2655                                                      budget - work_done);
2656                        cpr->has_more_work |= cpr2->has_more_work;
2657                } else {
2658                        bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2659                }
2660                raw_cons = NEXT_RAW_CMP(raw_cons);
2661        }
2662        __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2663        if (raw_cons != cpr->cp_raw_cons) {
2664                cpr->cp_raw_cons = raw_cons;
2665                BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2666        }
2667        return work_done;
2668}
2669
2670static void bnxt_free_tx_skbs(struct bnxt *bp)
2671{
2672        int i, max_idx;
2673        struct pci_dev *pdev = bp->pdev;
2674
2675        if (!bp->tx_ring)
2676                return;
2677
2678        max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2679        for (i = 0; i < bp->tx_nr_rings; i++) {
2680                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2681                int j;
2682
2683                for (j = 0; j < max_idx;) {
2684                        struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2685                        struct sk_buff *skb;
2686                        int k, last;
2687
2688                        if (i < bp->tx_nr_rings_xdp &&
2689                            tx_buf->action == XDP_REDIRECT) {
2690                                dma_unmap_single(&pdev->dev,
2691                                        dma_unmap_addr(tx_buf, mapping),
2692                                        dma_unmap_len(tx_buf, len),
2693                                        PCI_DMA_TODEVICE);
2694                                xdp_return_frame(tx_buf->xdpf);
2695                                tx_buf->action = 0;
2696                                tx_buf->xdpf = NULL;
2697                                j++;
2698                                continue;
2699                        }
2700
2701                        skb = tx_buf->skb;
2702                        if (!skb) {
2703                                j++;
2704                                continue;
2705                        }
2706
2707                        tx_buf->skb = NULL;
2708
2709                        if (tx_buf->is_push) {
2710                                dev_kfree_skb(skb);
2711                                j += 2;
2712                                continue;
2713                        }
2714
2715                        dma_unmap_single(&pdev->dev,
2716                                         dma_unmap_addr(tx_buf, mapping),
2717                                         skb_headlen(skb),
2718                                         PCI_DMA_TODEVICE);
2719
2720                        last = tx_buf->nr_frags;
2721                        j += 2;
2722                        for (k = 0; k < last; k++, j++) {
2723                                int ring_idx = j & bp->tx_ring_mask;
2724                                skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2725
2726                                tx_buf = &txr->tx_buf_ring[ring_idx];
2727                                dma_unmap_page(
2728                                        &pdev->dev,
2729                                        dma_unmap_addr(tx_buf, mapping),
2730                                        skb_frag_size(frag), PCI_DMA_TODEVICE);
2731                        }
2732                        dev_kfree_skb(skb);
2733                }
2734                netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2735        }
2736}
2737
2738static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2739{
2740        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2741        struct pci_dev *pdev = bp->pdev;
2742        struct bnxt_tpa_idx_map *map;
2743        int i, max_idx, max_agg_idx;
2744
2745        max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2746        max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2747        if (!rxr->rx_tpa)
2748                goto skip_rx_tpa_free;
2749
2750        for (i = 0; i < bp->max_tpa; i++) {
2751                struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2752                u8 *data = tpa_info->data;
2753
2754                if (!data)
2755                        continue;
2756
2757                dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2758                                       bp->rx_buf_use_size, bp->rx_dir,
2759                                       DMA_ATTR_WEAK_ORDERING);
2760
2761                tpa_info->data = NULL;
2762
2763                kfree(data);
2764        }
2765
2766skip_rx_tpa_free:
2767        for (i = 0; i < max_idx; i++) {
2768                struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2769                dma_addr_t mapping = rx_buf->mapping;
2770                void *data = rx_buf->data;
2771
2772                if (!data)
2773                        continue;
2774
2775                rx_buf->data = NULL;
2776                if (BNXT_RX_PAGE_MODE(bp)) {
2777                        mapping -= bp->rx_dma_offset;
2778                        dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2779                                             bp->rx_dir,
2780                                             DMA_ATTR_WEAK_ORDERING);
2781                        page_pool_recycle_direct(rxr->page_pool, data);
2782                } else {
2783                        dma_unmap_single_attrs(&pdev->dev, mapping,
2784                                               bp->rx_buf_use_size, bp->rx_dir,
2785                                               DMA_ATTR_WEAK_ORDERING);
2786                        kfree(data);
2787                }
2788        }
2789        for (i = 0; i < max_agg_idx; i++) {
2790                struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2791                struct page *page = rx_agg_buf->page;
2792
2793                if (!page)
2794                        continue;
2795
2796                dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2797                                     BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2798                                     DMA_ATTR_WEAK_ORDERING);
2799
2800                rx_agg_buf->page = NULL;
2801                __clear_bit(i, rxr->rx_agg_bmap);
2802
2803                __free_page(page);
2804        }
2805        if (rxr->rx_page) {
2806                __free_page(rxr->rx_page);
2807                rxr->rx_page = NULL;
2808        }
2809        map = rxr->rx_tpa_idx_map;
2810        if (map)
2811                memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2812}
2813
2814static void bnxt_free_rx_skbs(struct bnxt *bp)
2815{
2816        int i;
2817
2818        if (!bp->rx_ring)
2819                return;
2820
2821        for (i = 0; i < bp->rx_nr_rings; i++)
2822                bnxt_free_one_rx_ring_skbs(bp, i);
2823}
2824
2825static void bnxt_free_skbs(struct bnxt *bp)
2826{
2827        bnxt_free_tx_skbs(bp);
2828        bnxt_free_rx_skbs(bp);
2829}
2830
2831static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2832{
2833        u8 init_val = mem_init->init_val;
2834        u16 offset = mem_init->offset;
2835        u8 *p2 = p;
2836        int i;
2837
2838        if (!init_val)
2839                return;
2840        if (offset == BNXT_MEM_INVALID_OFFSET) {
2841                memset(p, init_val, len);
2842                return;
2843        }
2844        for (i = 0; i < len; i += mem_init->size)
2845                *(p2 + i + offset) = init_val;
2846}
2847
2848static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2849{
2850        struct pci_dev *pdev = bp->pdev;
2851        int i;
2852
2853        for (i = 0; i < rmem->nr_pages; i++) {
2854                if (!rmem->pg_arr[i])
2855                        continue;
2856
2857                dma_free_coherent(&pdev->dev, rmem->page_size,
2858                                  rmem->pg_arr[i], rmem->dma_arr[i]);
2859
2860                rmem->pg_arr[i] = NULL;
2861        }
2862        if (rmem->pg_tbl) {
2863                size_t pg_tbl_size = rmem->nr_pages * 8;
2864
2865                if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2866                        pg_tbl_size = rmem->page_size;
2867                dma_free_coherent(&pdev->dev, pg_tbl_size,
2868                                  rmem->pg_tbl, rmem->pg_tbl_map);
2869                rmem->pg_tbl = NULL;
2870        }
2871        if (rmem->vmem_size && *rmem->vmem) {
2872                vfree(*rmem->vmem);
2873                *rmem->vmem = NULL;
2874        }
2875}
2876
2877static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2878{
2879        struct pci_dev *pdev = bp->pdev;
2880        u64 valid_bit = 0;
2881        int i;
2882
2883        if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2884                valid_bit = PTU_PTE_VALID;
2885        if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2886                size_t pg_tbl_size = rmem->nr_pages * 8;
2887
2888                if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2889                        pg_tbl_size = rmem->page_size;
2890                rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2891                                                  &rmem->pg_tbl_map,
2892                                                  GFP_KERNEL);
2893                if (!rmem->pg_tbl)
2894                        return -ENOMEM;
2895        }
2896
2897        for (i = 0; i < rmem->nr_pages; i++) {
2898                u64 extra_bits = valid_bit;
2899
2900                rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2901                                                     rmem->page_size,
2902                                                     &rmem->dma_arr[i],
2903                                                     GFP_KERNEL);
2904                if (!rmem->pg_arr[i])
2905                        return -ENOMEM;
2906
2907                if (rmem->mem_init)
2908                        bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2909                                          rmem->page_size);
2910                if (rmem->nr_pages > 1 || rmem->depth > 0) {
2911                        if (i == rmem->nr_pages - 2 &&
2912                            (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2913                                extra_bits |= PTU_PTE_NEXT_TO_LAST;
2914                        else if (i == rmem->nr_pages - 1 &&
2915                                 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2916                                extra_bits |= PTU_PTE_LAST;
2917                        rmem->pg_tbl[i] =
2918                                cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2919                }
2920        }
2921
2922        if (rmem->vmem_size) {
2923                *rmem->vmem = vzalloc(rmem->vmem_size);
2924                if (!(*rmem->vmem))
2925                        return -ENOMEM;
2926        }
2927        return 0;
2928}
2929
2930static void bnxt_free_tpa_info(struct bnxt *bp)
2931{
2932        int i;
2933
2934        for (i = 0; i < bp->rx_nr_rings; i++) {
2935                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2936
2937                kfree(rxr->rx_tpa_idx_map);
2938                rxr->rx_tpa_idx_map = NULL;
2939                if (rxr->rx_tpa) {
2940                        kfree(rxr->rx_tpa[0].agg_arr);
2941                        rxr->rx_tpa[0].agg_arr = NULL;
2942                }
2943                kfree(rxr->rx_tpa);
2944                rxr->rx_tpa = NULL;
2945        }
2946}
2947
2948static int bnxt_alloc_tpa_info(struct bnxt *bp)
2949{
2950        int i, j, total_aggs = 0;
2951
2952        bp->max_tpa = MAX_TPA;
2953        if (bp->flags & BNXT_FLAG_CHIP_P5) {
2954                if (!bp->max_tpa_v2)
2955                        return 0;
2956                bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2957                total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2958        }
2959
2960        for (i = 0; i < bp->rx_nr_rings; i++) {
2961                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2962                struct rx_agg_cmp *agg;
2963
2964                rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2965                                      GFP_KERNEL);
2966                if (!rxr->rx_tpa)
2967                        return -ENOMEM;
2968
2969                if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2970                        continue;
2971                agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2972                rxr->rx_tpa[0].agg_arr = agg;
2973                if (!agg)
2974                        return -ENOMEM;
2975                for (j = 1; j < bp->max_tpa; j++)
2976                        rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2977                rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2978                                              GFP_KERNEL);
2979                if (!rxr->rx_tpa_idx_map)
2980                        return -ENOMEM;
2981        }
2982        return 0;
2983}
2984
2985static void bnxt_free_rx_rings(struct bnxt *bp)
2986{
2987        int i;
2988
2989        if (!bp->rx_ring)
2990                return;
2991
2992        bnxt_free_tpa_info(bp);
2993        for (i = 0; i < bp->rx_nr_rings; i++) {
2994                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2995                struct bnxt_ring_struct *ring;
2996
2997                if (rxr->xdp_prog)
2998                        bpf_prog_put(rxr->xdp_prog);
2999
3000                if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3001                        xdp_rxq_info_unreg(&rxr->xdp_rxq);
3002
3003                page_pool_destroy(rxr->page_pool);
3004                rxr->page_pool = NULL;
3005
3006                kfree(rxr->rx_agg_bmap);
3007                rxr->rx_agg_bmap = NULL;
3008
3009                ring = &rxr->rx_ring_struct;
3010                bnxt_free_ring(bp, &ring->ring_mem);
3011
3012                ring = &rxr->rx_agg_ring_struct;
3013                bnxt_free_ring(bp, &ring->ring_mem);
3014        }
3015}
3016
3017static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3018                                   struct bnxt_rx_ring_info *rxr)
3019{
3020        struct page_pool_params pp = { 0 };
3021
3022        pp.pool_size = bp->rx_ring_size;
3023        pp.nid = dev_to_node(&bp->pdev->dev);
3024        pp.dev = &bp->pdev->dev;
3025        pp.dma_dir = DMA_BIDIRECTIONAL;
3026
3027        rxr->page_pool = page_pool_create(&pp);
3028        if (IS_ERR(rxr->page_pool)) {
3029                int err = PTR_ERR(rxr->page_pool);
3030
3031                rxr->page_pool = NULL;
3032                return err;
3033        }
3034        return 0;
3035}
3036
3037static int bnxt_alloc_rx_rings(struct bnxt *bp)
3038{
3039        int i, rc = 0, agg_rings = 0;
3040
3041        if (!bp->rx_ring)
3042                return -ENOMEM;
3043
3044        if (bp->flags & BNXT_FLAG_AGG_RINGS)
3045                agg_rings = 1;
3046
3047        for (i = 0; i < bp->rx_nr_rings; i++) {
3048                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3049                struct bnxt_ring_struct *ring;
3050
3051                ring = &rxr->rx_ring_struct;
3052
3053                rc = bnxt_alloc_rx_page_pool(bp, rxr);
3054                if (rc)
3055                        return rc;
3056
3057                rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3058                if (rc < 0)
3059                        return rc;
3060
3061                rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3062                                                MEM_TYPE_PAGE_POOL,
3063                                                rxr->page_pool);
3064                if (rc) {
3065                        xdp_rxq_info_unreg(&rxr->xdp_rxq);
3066                        return rc;
3067                }
3068
3069                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3070                if (rc)
3071                        return rc;
3072
3073                ring->grp_idx = i;
3074                if (agg_rings) {
3075                        u16 mem_size;
3076
3077                        ring = &rxr->rx_agg_ring_struct;
3078                        rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3079                        if (rc)
3080                                return rc;
3081
3082                        ring->grp_idx = i;
3083                        rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3084                        mem_size = rxr->rx_agg_bmap_size / 8;
3085                        rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3086                        if (!rxr->rx_agg_bmap)
3087                                return -ENOMEM;
3088                }
3089        }
3090        if (bp->flags & BNXT_FLAG_TPA)
3091                rc = bnxt_alloc_tpa_info(bp);
3092        return rc;
3093}
3094
3095static void bnxt_free_tx_rings(struct bnxt *bp)
3096{
3097        int i;
3098        struct pci_dev *pdev = bp->pdev;
3099
3100        if (!bp->tx_ring)
3101                return;
3102
3103        for (i = 0; i < bp->tx_nr_rings; i++) {
3104                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3105                struct bnxt_ring_struct *ring;
3106
3107                if (txr->tx_push) {
3108                        dma_free_coherent(&pdev->dev, bp->tx_push_size,
3109                                          txr->tx_push, txr->tx_push_mapping);
3110                        txr->tx_push = NULL;
3111                }
3112
3113                ring = &txr->tx_ring_struct;
3114
3115                bnxt_free_ring(bp, &ring->ring_mem);
3116        }
3117}
3118
3119static int bnxt_alloc_tx_rings(struct bnxt *bp)
3120{
3121        int i, j, rc;
3122        struct pci_dev *pdev = bp->pdev;
3123
3124        bp->tx_push_size = 0;
3125        if (bp->tx_push_thresh) {
3126                int push_size;
3127
3128                push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3129                                        bp->tx_push_thresh);
3130
3131                if (push_size > 256) {
3132                        push_size = 0;
3133                        bp->tx_push_thresh = 0;
3134                }
3135
3136                bp->tx_push_size = push_size;
3137        }
3138
3139        for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3140                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3141                struct bnxt_ring_struct *ring;
3142                u8 qidx;
3143
3144                ring = &txr->tx_ring_struct;
3145
3146                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3147                if (rc)
3148                        return rc;
3149
3150                ring->grp_idx = txr->bnapi->index;
3151                if (bp->tx_push_size) {
3152                        dma_addr_t mapping;
3153
3154                        /* One pre-allocated DMA buffer to backup
3155                         * TX push operation
3156                         */
3157                        txr->tx_push = dma_alloc_coherent(&pdev->dev,
3158                                                bp->tx_push_size,
3159                                                &txr->tx_push_mapping,
3160                                                GFP_KERNEL);
3161
3162                        if (!txr->tx_push)
3163                                return -ENOMEM;
3164
3165                        mapping = txr->tx_push_mapping +
3166                                sizeof(struct tx_push_bd);
3167                        txr->data_mapping = cpu_to_le64(mapping);
3168                }
3169                qidx = bp->tc_to_qidx[j];
3170                ring->queue_id = bp->q_info[qidx].queue_id;
3171                if (i < bp->tx_nr_rings_xdp)
3172                        continue;
3173                if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3174                        j++;
3175        }
3176        return 0;
3177}
3178
3179static void bnxt_free_cp_rings(struct bnxt *bp)
3180{
3181        int i;
3182
3183        if (!bp->bnapi)
3184                return;
3185
3186        for (i = 0; i < bp->cp_nr_rings; i++) {
3187                struct bnxt_napi *bnapi = bp->bnapi[i];
3188                struct bnxt_cp_ring_info *cpr;
3189                struct bnxt_ring_struct *ring;
3190                int j;
3191
3192                if (!bnapi)
3193                        continue;
3194
3195                cpr = &bnapi->cp_ring;
3196                ring = &cpr->cp_ring_struct;
3197
3198                bnxt_free_ring(bp, &ring->ring_mem);
3199
3200                for (j = 0; j < 2; j++) {
3201                        struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3202
3203                        if (cpr2) {
3204                                ring = &cpr2->cp_ring_struct;
3205                                bnxt_free_ring(bp, &ring->ring_mem);
3206                                kfree(cpr2);
3207                                cpr->cp_ring_arr[j] = NULL;
3208                        }
3209                }
3210        }
3211}
3212
3213static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3214{
3215        struct bnxt_ring_mem_info *rmem;
3216        struct bnxt_ring_struct *ring;
3217        struct bnxt_cp_ring_info *cpr;
3218        int rc;
3219
3220        cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3221        if (!cpr)
3222                return NULL;
3223
3224        ring = &cpr->cp_ring_struct;
3225        rmem = &ring->ring_mem;
3226        rmem->nr_pages = bp->cp_nr_pages;
3227        rmem->page_size = HW_CMPD_RING_SIZE;
3228        rmem->pg_arr = (void **)cpr->cp_desc_ring;
3229        rmem->dma_arr = cpr->cp_desc_mapping;
3230        rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3231        rc = bnxt_alloc_ring(bp, rmem);
3232        if (rc) {
3233                bnxt_free_ring(bp, rmem);
3234                kfree(cpr);
3235                cpr = NULL;
3236        }
3237        return cpr;
3238}
3239
3240static int bnxt_alloc_cp_rings(struct bnxt *bp)
3241{
3242        bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3243        int i, rc, ulp_base_vec, ulp_msix;
3244
3245        ulp_msix = bnxt_get_ulp_msix_num(bp);
3246        ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3247        for (i = 0; i < bp->cp_nr_rings; i++) {
3248                struct bnxt_napi *bnapi = bp->bnapi[i];
3249                struct bnxt_cp_ring_info *cpr;
3250                struct bnxt_ring_struct *ring;
3251
3252                if (!bnapi)
3253                        continue;
3254
3255                cpr = &bnapi->cp_ring;
3256                cpr->bnapi = bnapi;
3257                ring = &cpr->cp_ring_struct;
3258
3259                rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3260                if (rc)
3261                        return rc;
3262
3263                if (ulp_msix && i >= ulp_base_vec)
3264                        ring->map_idx = i + ulp_msix;
3265                else
3266                        ring->map_idx = i;
3267
3268                if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3269                        continue;
3270
3271                if (i < bp->rx_nr_rings) {
3272                        struct bnxt_cp_ring_info *cpr2 =
3273                                bnxt_alloc_cp_sub_ring(bp);
3274
3275                        cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3276                        if (!cpr2)
3277                                return -ENOMEM;
3278                        cpr2->bnapi = bnapi;
3279                }
3280                if ((sh && i < bp->tx_nr_rings) ||
3281                    (!sh && i >= bp->rx_nr_rings)) {
3282                        struct bnxt_cp_ring_info *cpr2 =
3283                                bnxt_alloc_cp_sub_ring(bp);
3284
3285                        cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3286                        if (!cpr2)
3287                                return -ENOMEM;
3288                        cpr2->bnapi = bnapi;
3289                }
3290        }
3291        return 0;
3292}
3293
3294static void bnxt_init_ring_struct(struct bnxt *bp)
3295{
3296        int i;
3297
3298        for (i = 0; i < bp->cp_nr_rings; i++) {
3299                struct bnxt_napi *bnapi = bp->bnapi[i];
3300                struct bnxt_ring_mem_info *rmem;
3301                struct bnxt_cp_ring_info *cpr;
3302                struct bnxt_rx_ring_info *rxr;
3303                struct bnxt_tx_ring_info *txr;
3304                struct bnxt_ring_struct *ring;
3305
3306                if (!bnapi)
3307                        continue;
3308
3309                cpr = &bnapi->cp_ring;
3310                ring = &cpr->cp_ring_struct;
3311                rmem = &ring->ring_mem;
3312                rmem->nr_pages = bp->cp_nr_pages;
3313                rmem->page_size = HW_CMPD_RING_SIZE;
3314                rmem->pg_arr = (void **)cpr->cp_desc_ring;
3315                rmem->dma_arr = cpr->cp_desc_mapping;
3316                rmem->vmem_size = 0;
3317
3318                rxr = bnapi->rx_ring;
3319                if (!rxr)
3320                        goto skip_rx;
3321
3322                ring = &rxr->rx_ring_struct;
3323                rmem = &ring->ring_mem;
3324                rmem->nr_pages = bp->rx_nr_pages;
3325                rmem->page_size = HW_RXBD_RING_SIZE;
3326                rmem->pg_arr = (void **)rxr->rx_desc_ring;
3327                rmem->dma_arr = rxr->rx_desc_mapping;
3328                rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3329                rmem->vmem = (void **)&rxr->rx_buf_ring;
3330
3331                ring = &rxr->rx_agg_ring_struct;
3332                rmem = &ring->ring_mem;
3333                rmem->nr_pages = bp->rx_agg_nr_pages;
3334                rmem->page_size = HW_RXBD_RING_SIZE;
3335                rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3336                rmem->dma_arr = rxr->rx_agg_desc_mapping;
3337                rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3338                rmem->vmem = (void **)&rxr->rx_agg_ring;
3339
3340skip_rx:
3341                txr = bnapi->tx_ring;
3342                if (!txr)
3343                        continue;
3344
3345                ring = &txr->tx_ring_struct;
3346                rmem = &ring->ring_mem;
3347                rmem->nr_pages = bp->tx_nr_pages;
3348                rmem->page_size = HW_RXBD_RING_SIZE;
3349                rmem->pg_arr = (void **)txr->tx_desc_ring;
3350                rmem->dma_arr = txr->tx_desc_mapping;
3351                rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3352                rmem->vmem = (void **)&txr->tx_buf_ring;
3353        }
3354}
3355
3356static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3357{
3358        int i;
3359        u32 prod;
3360        struct rx_bd **rx_buf_ring;
3361
3362        rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3363        for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3364                int j;
3365                struct rx_bd *rxbd;
3366
3367                rxbd = rx_buf_ring[i];
3368                if (!rxbd)
3369                        continue;
3370
3371                for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3372                        rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3373                        rxbd->rx_bd_opaque = prod;
3374                }
3375        }
3376}
3377
3378static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3379{
3380        struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3381        struct net_device *dev = bp->dev;
3382        u32 prod;
3383        int i;
3384
3385        prod = rxr->rx_prod;
3386        for (i = 0; i < bp->rx_ring_size; i++) {
3387                if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3388                        netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3389                                    ring_nr, i, bp->rx_ring_size);
3390                        break;
3391                }
3392                prod = NEXT_RX(prod);
3393        }
3394        rxr->rx_prod = prod;
3395
3396        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3397                return 0;
3398
3399        prod = rxr->rx_agg_prod;
3400        for (i = 0; i < bp->rx_agg_ring_size; i++) {
3401                if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3402                        netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3403                                    ring_nr, i, bp->rx_ring_size);
3404                        break;
3405                }
3406                prod = NEXT_RX_AGG(prod);
3407        }
3408        rxr->rx_agg_prod = prod;
3409
3410        if (rxr->rx_tpa) {
3411                dma_addr_t mapping;
3412                u8 *data;
3413
3414                for (i = 0; i < bp->max_tpa; i++) {
3415                        data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3416                        if (!data)
3417                                return -ENOMEM;
3418
3419                        rxr->rx_tpa[i].data = data;
3420                        rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3421                        rxr->rx_tpa[i].mapping = mapping;
3422                }
3423        }
3424        return 0;
3425}
3426
3427static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3428{
3429        struct bnxt_rx_ring_info *rxr;
3430        struct bnxt_ring_struct *ring;
3431        u32 type;
3432
3433        type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3434                RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3435
3436        if (NET_IP_ALIGN == 2)
3437                type |= RX_BD_FLAGS_SOP;
3438
3439        rxr = &bp->rx_ring[ring_nr];
3440        ring = &rxr->rx_ring_struct;
3441        bnxt_init_rxbd_pages(ring, type);
3442
3443        if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3444                bpf_prog_add(bp->xdp_prog, 1);
3445                rxr->xdp_prog = bp->xdp_prog;
3446        }
3447        ring->fw_ring_id = INVALID_HW_RING_ID;
3448
3449        ring = &rxr->rx_agg_ring_struct;
3450        ring->fw_ring_id = INVALID_HW_RING_ID;
3451
3452        if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3453                type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3454                        RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3455
3456                bnxt_init_rxbd_pages(ring, type);
3457        }
3458
3459        return bnxt_alloc_one_rx_ring(bp, ring_nr);
3460}
3461
3462static void bnxt_init_cp_rings(struct bnxt *bp)
3463{
3464        int i, j;
3465
3466        for (i = 0; i < bp->cp_nr_rings; i++) {
3467                struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3468                struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3469
3470                ring->fw_ring_id = INVALID_HW_RING_ID;
3471                cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3472                cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3473                for (j = 0; j < 2; j++) {
3474                        struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3475
3476                        if (!cpr2)
3477                                continue;
3478
3479                        ring = &cpr2->cp_ring_struct;
3480                        ring->fw_ring_id = INVALID_HW_RING_ID;
3481                        cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3482                        cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3483                }
3484        }
3485}
3486
3487static int bnxt_init_rx_rings(struct bnxt *bp)
3488{
3489        int i, rc = 0;
3490
3491        if (BNXT_RX_PAGE_MODE(bp)) {
3492                bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3493                bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3494        } else {
3495                bp->rx_offset = BNXT_RX_OFFSET;
3496                bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3497        }
3498
3499        for (i = 0; i < bp->rx_nr_rings; i++) {
3500                rc = bnxt_init_one_rx_ring(bp, i);
3501                if (rc)
3502                        break;
3503        }
3504
3505        return rc;
3506}
3507
3508static int bnxt_init_tx_rings(struct bnxt *bp)
3509{
3510        u16 i;
3511
3512        bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3513                                   MAX_SKB_FRAGS + 1);
3514
3515        for (i = 0; i < bp->tx_nr_rings; i++) {
3516                struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3517                struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3518
3519                ring->fw_ring_id = INVALID_HW_RING_ID;
3520        }
3521
3522        return 0;
3523}
3524
3525static void bnxt_free_ring_grps(struct bnxt *bp)
3526{
3527        kfree(bp->grp_info);
3528        bp->grp_info = NULL;
3529}
3530
3531static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3532{
3533        int i;
3534
3535        if (irq_re_init) {
3536                bp->grp_info = kcalloc(bp->cp_nr_rings,
3537                                       sizeof(struct bnxt_ring_grp_info),
3538                                       GFP_KERNEL);
3539                if (!bp->grp_info)
3540                        return -ENOMEM;
3541        }
3542        for (i = 0; i < bp->cp_nr_rings; i++) {
3543                if (irq_re_init)
3544                        bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3545                bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3546                bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3547                bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3548                bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3549        }
3550        return 0;
3551}
3552
3553static void bnxt_free_vnics(struct bnxt *bp)
3554{
3555        kfree(bp->vnic_info);
3556        bp->vnic_info = NULL;
3557        bp->nr_vnics = 0;
3558}
3559
3560static int bnxt_alloc_vnics(struct bnxt *bp)
3561{
3562        int num_vnics = 1;
3563
3564#ifdef CONFIG_RFS_ACCEL
3565        if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3566                num_vnics += bp->rx_nr_rings;
3567#endif
3568
3569        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3570                num_vnics++;
3571
3572        bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3573                                GFP_KERNEL);
3574        if (!bp->vnic_info)
3575                return -ENOMEM;
3576
3577        bp->nr_vnics = num_vnics;
3578        return 0;
3579}
3580
3581static void bnxt_init_vnics(struct bnxt *bp)
3582{
3583        int i;
3584
3585        for (i = 0; i < bp->nr_vnics; i++) {
3586                struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3587                int j;
3588
3589                vnic->fw_vnic_id = INVALID_HW_RING_ID;
3590                for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3591                        vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3592
3593                vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3594
3595                if (bp->vnic_info[i].rss_hash_key) {
3596                        if (i == 0)
3597                                prandom_bytes(vnic->rss_hash_key,
3598                                              HW_HASH_KEY_SIZE);
3599                        else
3600                                memcpy(vnic->rss_hash_key,
3601                                       bp->vnic_info[0].rss_hash_key,
3602                                       HW_HASH_KEY_SIZE);
3603                }
3604        }
3605}
3606
3607static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3608{
3609        int pages;
3610
3611        pages = ring_size / desc_per_pg;
3612
3613        if (!pages)
3614                return 1;
3615
3616        pages++;
3617
3618        while (pages & (pages - 1))
3619                pages++;
3620
3621        return pages;
3622}
3623
3624void bnxt_set_tpa_flags(struct bnxt *bp)
3625{
3626        bp->flags &= ~BNXT_FLAG_TPA;
3627        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3628                return;
3629        if (bp->dev->features & NETIF_F_LRO)
3630                bp->flags |= BNXT_FLAG_LRO;
3631        else if (bp->dev->features & NETIF_F_GRO_HW)
3632                bp->flags |= BNXT_FLAG_GRO;
3633}
3634
3635/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3636 * be set on entry.
3637 */
3638void bnxt_set_ring_params(struct bnxt *bp)
3639{
3640        u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3641        u32 agg_factor = 0, agg_ring_size = 0;
3642
3643        /* 8 for CRC and VLAN */
3644        rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3645
3646        rx_space = rx_size + NET_SKB_PAD +
3647                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3648
3649        bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3650        ring_size = bp->rx_ring_size;
3651        bp->rx_agg_ring_size = 0;
3652        bp->rx_agg_nr_pages = 0;
3653
3654        if (bp->flags & BNXT_FLAG_TPA)
3655                agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3656
3657        bp->flags &= ~BNXT_FLAG_JUMBO;
3658        if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3659                u32 jumbo_factor;
3660
3661                bp->flags |= BNXT_FLAG_JUMBO;
3662                jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3663                if (jumbo_factor > agg_factor)
3664                        agg_factor = jumbo_factor;
3665        }
3666        agg_ring_size = ring_size * agg_factor;
3667
3668        if (agg_ring_size) {
3669                bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3670                                                        RX_DESC_CNT);
3671                if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3672                        u32 tmp = agg_ring_size;
3673
3674                        bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3675                        agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3676                        netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3677                                    tmp, agg_ring_size);
3678                }
3679                bp->rx_agg_ring_size = agg_ring_size;
3680                bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3681                rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3682                rx_space = rx_size + NET_SKB_PAD +
3683                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3684        }
3685
3686        bp->rx_buf_use_size = rx_size;
3687        bp->rx_buf_size = rx_space;
3688
3689        bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3690        bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3691
3692        ring_size = bp->tx_ring_size;
3693        bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3694        bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3695
3696        max_rx_cmpl = bp->rx_ring_size;
3697        /* MAX TPA needs to be added because TPA_START completions are
3698         * immediately recycled, so the TPA completions are not bound by
3699         * the RX ring size.
3700         */
3701        if (bp->flags & BNXT_FLAG_TPA)
3702                max_rx_cmpl += bp->max_tpa;
3703        /* RX and TPA completions are 32-byte, all others are 16-byte */
3704        ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3705        bp->cp_ring_size = ring_size;
3706
3707        bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3708        if (bp->cp_nr_pages > MAX_CP_PAGES) {
3709                bp->cp_nr_pages = MAX_CP_PAGES;
3710                bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3711                netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3712                            ring_size, bp->cp_ring_size);
3713        }
3714        bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3715        bp->cp_ring_mask = bp->cp_bit - 1;
3716}
3717
3718/* Changing allocation mode of RX rings.
3719 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3720 */
3721int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3722{
3723        if (